<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JSG</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Serious Games</journal-id>
      <journal-title>JMIR Serious Games</journal-title>
      <issn pub-type="epub">2291-9279</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i3e32297</article-id>
      <article-id pub-id-type="pmid">35900825</article-id>
      <article-id pub-id-type="doi">10.2196/32297</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Speech Processing as a Far-Transfer Gauge of Serious Games for Cognitive Training in Aging: Randomized Controlled Trial of Web-Based Effectivate Training</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Zary</surname>
            <given-names>Nabil</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Richert-Kaźmierska</surname>
            <given-names>Anita</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Aksoy</surname>
            <given-names>Mehmet</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Nitsan</surname>
            <given-names>Gal</given-names>
          </name>
          <degrees>MA</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8567-2823</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Baharav</surname>
            <given-names>Shai</given-names>
          </name>
          <degrees>BA</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2527-5794</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Tal-Shir</surname>
            <given-names>Dalith</given-names>
          </name>
          <degrees>BA</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8276-0991</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Shakuf</surname>
            <given-names>Vered</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7055-7080</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Ben-David</surname>
            <given-names>Boaz M</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Baruch Ivcher School of Psychology</institution>
            <institution>Reichman University (IDC)</institution>
            <addr-line>P.O. Box 167</addr-line>
            <addr-line>Herzliya, 4610101</addr-line>
            <country>Israel</country>
            <phone>972 584004055</phone>
            <email>boaz.ben.david@idc.ac.il</email>
          </address>
          <xref rid="aff4" ref-type="aff">4</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0392-962X</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Communication Sciences and Disorders</institution>
        <institution>University of Haifa</institution>
        <addr-line>Haifa</addr-line>
        <country>Israel</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Baruch Ivcher School of Psychology</institution>
        <institution>Reichman University (IDC)</institution>
        <addr-line>Herzliya</addr-line>
        <country>Israel</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Communications Disorders</institution>
        <institution>Achva Academic College</institution>
        <addr-line>Arugot</addr-line>
        <country>Israel</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Toronto Rehabilitation Institute</institution>
        <institution>University Health Networks</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Speech-Language Pathology</institution>
        <institution>University of Toronto</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Boaz M Ben-David <email>boaz.ben.david@idc.ac.il</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Jul-Sep</season>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>28</day>
        <month>7</month>
        <year>2022</year>
      </pub-date>
      <volume>10</volume>
      <issue>3</issue>
      <elocation-id>e32297</elocation-id>
      <history>
        <date date-type="received">
          <day>24</day>
          <month>7</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>3</day>
          <month>12</month>
          <year>2021</year>
        </date>
        <date date-type="rev-recd">
          <day>21</day>
          <month>4</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>28</day>
          <month>4</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Gal Nitsan, Shai Baharav, Dalith Tal-Shir, Vered Shakuf, Boaz M Ben-David. Originally published in JMIR Serious Games (https://games.jmir.org), 28.07.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Serious Games, is properly cited. The complete bibliographic information, a link to the original publication on https://games.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://games.jmir.org/2022/3/e32297" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>The number of serious games for cognitive training in aging (SGCTAs) is proliferating in the market and attempting to combat one of the most feared aspects of aging—cognitive decline. However, the efficacy of many SGCTAs is still questionable. Even the measures used to validate SGCTAs are up for debate, with most studies using cognitive measures that gauge improvement in trained tasks, also known as <italic>near transfer</italic>. This study takes a different approach, testing the efficacy of the SGCTA—<italic>Effectivate</italic>—in generating tangible <italic>far-transfer</italic> improvements in a nontrained task—the Eye tracking of Word Identification in Noise Under Memory Increased Load (E-WINDMIL)—which tests speech processing in adverse conditions.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to validate the use of a real-time measure of speech processing as a gauge of the far-transfer efficacy of an SGCTA designed to train executive functions.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>In a randomized controlled trial that included 40 participants, we tested 20 (50%) older adults before and after self-administering the SGCTA <italic>Effectivate</italic> training and compared their performance with that of the control group of 20 (50%) older adults. The E-WINDMIL eye-tracking task was administered to all participants by blinded experimenters in 2 sessions separated by 2 to 8 weeks.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Specifically, we tested the change between sessions in the efficiency of segregating the spoken target word from its sound-sharing alternative, as the word unfolds in time. We found that training with the SGCTA <italic>Effectivate</italic> improved both early and late speech processing in adverse conditions, with higher discrimination scores in the training group than in the control group (early processing: <italic>F</italic><sub>1,38</sub>=7.371; <italic>P</italic>=.01; <italic>η</italic><sub>p</sub><sup>2</sup>=0.162 and late processing: <italic>F</italic><sub>1,38</sub>=9.003; <italic>P</italic>=.005; <italic>η</italic><sub>p</sub><sup>2</sup>=0.192).</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This study found the E-WINDMIL measure of speech processing to be a valid gauge for the far-transfer effects of executive function training. As the SGCTA <italic>Effectivate</italic> does not train any auditory task or language processing, our results provide preliminary support for the ability of <italic>Effectivate</italic> to create a generalized cognitive improvement. Given the crucial role of speech processing in healthy and successful aging, we encourage researchers and developers to use speech processing measures, the E-WINDMIL in particular, to gauge the efficacy of SGCTAs. We advocate for increased industry-wide adoption of far-transfer metrics to gauge SGCTAs.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>aging</kwd>
        <kwd>cognitive aging</kwd>
        <kwd>cognitive games</kwd>
        <kwd>serious games</kwd>
        <kwd>speech processing</kwd>
        <kwd>spoken language processing</kwd>
        <kwd>eye tracking</kwd>
        <kwd>visual world paradigm</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>The age distribution of the world’s population is projected to dramatically shift over the next few decades as improved health care continues to extend life expectancy [<xref ref-type="bibr" rid="ref1">1</xref>]. By 2050, more than one-fourth (27%) of the European population is expected to be aged &#62;65 years. Although medicine can prolong relative physical health [<xref ref-type="bibr" rid="ref2">2</xref>], offsetting age-related changes in cognitive health is growing in importance [<xref ref-type="bibr" rid="ref3">3</xref>]. Although recent literature suggests that cognitive measures may inflate age-related decreases in performance [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref6">6</xref>] and disregard an increase in crystallized intelligence (eg, general knowledge and vocabulary [<xref ref-type="bibr" rid="ref7">7</xref>]), a decrease in cognitive performance is one of the most feared aspects of aging [<xref ref-type="bibr" rid="ref8">8</xref>]. Consequently, there is growing pressure to prolong active and healthy aging—<italic>successful aging</italic> [<xref ref-type="bibr" rid="ref9">9</xref>]—by targeting age-related changes in cognitive abilities such as memory and executive functions (EFs; eg, inhibition and working memory [WM]) [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>].</p>
        <p>Numerous serious games for cognitive training in aging (SGCTAs) are being developed to mediate age-related cognitive changes. However, there is much debate in the literature regarding their efficacy. In fact, in 2014, a total of 2 teams of researchers published contradictory open letters. The first letter by a group of 70 scientists refuted the efficacy of such training [<xref ref-type="bibr" rid="ref12">12</xref>]. The second letter by another group of 133 scientists claimed the opposite, supporting the benefit of cognitive training [<xref ref-type="bibr" rid="ref13">13</xref>]. These letters were followed by an extensive review [<xref ref-type="bibr" rid="ref14">14</xref>] suggesting that SGCTAs, in general, can improve performance on the trained game and associated activities—<italic>near transfer</italic>. However, the review cautions that there is insufficient evidence to suggest that these changes can <italic>generalize</italic> to activities that are not directly associated with the game—<italic>far transfer</italic>. As cognitive training games and interventions are a “means to enhance performance on other tasks” [<xref ref-type="bibr" rid="ref14">14</xref>], it seems critical to measure their effects using far-transfer measures that gauge daily activities. Such far-transfer measures would gauge cognitive abilities through performance on a different task that is mediated by the trained cognitive functions.</p>
        <p>Effective communication and speech perception play an extensive role in many daily activities and have evident effects on general health and well-being [<xref ref-type="bibr" rid="ref15">15</xref>]. Difficulty in understanding speech in adverse conditions (eg, noisy background or while conducting another task) forms one of the most prevalent complaints among older adults [<xref ref-type="bibr" rid="ref16">16</xref>]. These difficulties decrease the participation of older adults in social and professional interactions, thus limiting their independence and increasing feelings of loneliness. Growing evidence suggests that a decrease in speech processing, in turn, has a negative effect on mental health, general well-being, and even longevity [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref21">21</xref>]. The social restrictions imposed by the COVID-19 pandemic further limit interactions and other opportunities for cognitive exercise (eg, work and volunteering). Indeed, current restrictions have been found to increase loneliness and depression in older age [<xref ref-type="bibr" rid="ref22">22</xref>], even after vaccinations were made available [<xref ref-type="bibr" rid="ref23">23</xref>] and severe social restrictions were lifted [<xref ref-type="bibr" rid="ref24">24</xref>]. These, together with limited access to health care services as a result of the pandemic [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>], illustrate the necessity to create effective SGCTAs that can directly affect spoken communication in adverse conditions, even while social distancing. We suggest that testing speech processing as a far-transfer task could demonstrate the impact of training with SGCTAs, on the daily lives of older adults.</p>
        <p>In this exploratory study, we used an eye-tracking paradigm to assess whether training EFs with the SGCTA <italic>Effectivate</italic> generalizes to improved speech processing in adverse listening conditions for older adults. This will serve to validate a real-time measure of speech perception—Eye tracking of Word Identification in Noise Under Memory Increased Load (E-WINDMIL)—as a gauge of far-transfer efficacy of SGCTAs designed to train EFs and provide a case study for developers and academics on the use of far-transfer metrics.</p>
      </sec>
      <sec>
        <title>EFs and Speech Processing</title>
        <p>Many SGCTA developers are targeting EFs because of their prominent role in healthy cognitive aging (refer to the seminal work by Salthouse [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]). EFs, which include WM and inhibition, enable active maintenance and manipulation of bottom-up information with top-down information in memory, especially during the performance of a concurrent task [<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. The literature suggests that individuals with better EFs are able to hold more incoming information and incorporate and manipulate it more easily, even under adverse conditions such as distractions (ie, external noise) and memory preload (ie, remembering the context of a conversation [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]). Therefore, it is not surprising that EFs play a significant role in speech processing [<xref ref-type="bibr" rid="ref34">34</xref>].</p>
        <p>Consider a scenario in which an older adult is driving his grandson in a car and radio music is playing. The grandson says, “grandpa, have you seen the DOLL?” The older listener must perform the following tasks:</p>
        <list list-type="order">
          <list-item>
            <p>Segregate the spoken message from the background radio noise (task-irrelevant) stream as it unfolds in time</p>
          </list-item>
          <list-item>
            <p>Inhibit the activation of competing (similar-sounding) words in the mental lexicon (eg, <italic>DO</italic>/ sounds in words such as <italic>DOG</italic>) while increasing the activation of the word DOLL, as the sound <italic>L</italic> unfolds in time</p>
          </list-item>
          <list-item>
            <p>Allocate enough resources for the activities mentioned previously from a limited cognitive resource pool that is already depleted by the concurrent task of driving</p>
          </list-item>
        </list>
        <p>As mentioned previously, EFs, especially WM, are essential to perform this complex task and have been shown to be affected by aging in the following ways: (1) stream segregation slows with aging, (2) decrease in the efficiency of inhibition impairs the ability to reject incorrect lexical candidates, (3) decrease in cognitive resources can impair speech perception, and (4) age-related hearing loss distorts the perception of bottom-up signals.</p>
        <p>First, stream segregation slows with aging [<xref ref-type="bibr" rid="ref35">35</xref>]. Reduced WM capacity has been linked to limitations in inhibition [<xref ref-type="bibr" rid="ref36">36</xref>]. This affects the ability to separate relevant speech from irrelevant background noise. For example, in a study by Janse [<xref ref-type="bibr" rid="ref37">37</xref>], when speech was presented in background noise, poor inhibitory abilities led to greater interference by the competing noise, which impaired speech perception in older adults.</p>
        <p>Second, an age-related decrease in the efficiency of inhibition [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref38">38</xref>] impairs the ability to reject incorrect lexical candidates as the context unfolds in time [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>].</p>
        <p>Third, age-related decreases in cognitive resources, specifically in EFs and WM [<xref ref-type="bibr" rid="ref38">38</xref>], can impair speech perception, as suggested by the <italic>Framework for Understanding Effortful Listening</italic> [<xref ref-type="bibr" rid="ref41">41</xref>]. The <italic>Framework for Understanding Effortful Listening</italic> is an adaptation of the capacity model of attention by Nobel laureate Daniel Kahneman, which conceptualizes the relationship between mental resource capacity and cognitive demands. According to this model, mental resources have limited capacity. The presence of background noise or another resource-consuming task (eg, driving) can impede and slow down speech processing for people with lower resource capacities.</p>
        <p>Finally, age-related hearing loss distorts the perception of bottom-up signals, providing impoverished input to the central nervous system. To mitigate these effects, older adults rely heavily on the linguistic context in word recognition, often to an even greater degree than younger adults [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. Efficient context processing depends on WM capacity and information processing speed [<xref ref-type="bibr" rid="ref44">44</xref>]. As mentioned previously, these capabilities decline with age [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref45">45</xref>] and can affect older adults’ ability to use context during word recognition. Depleted WM capacity can also affect the ability to temporarily remember words from a given linguistic context for later use [<xref ref-type="bibr" rid="ref46">46</xref>].</p>
        <p>In summary, cognitive performance is intertwined with speech perception, especially in older age. Age-related difficulties in speech perception are not only affected by reduced cognitive abilities but can also accelerate the rate of cognitive decline. A total of 2 Lancet reports [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref47">47</xref>] on dementia prevention highlighted improving auditory and speech accessibility as the number one modifiable risk factor in middle to late life. In fact, the relative weight of speech accessibility in preventing dementia is estimated to be higher than in tackling smoking, diabetes, hypertension, and obesity altogether. As Lin [<xref ref-type="bibr" rid="ref48">48</xref>] suggests in his <italic>Aging and Cognitive Health Evaluation in Elders</italic> <italic>model</italic> [<xref ref-type="bibr" rid="ref49">49</xref>], degraded speech processing affects cognitive resilience in aging by decreasing physical activities, social interactions, communication, and related brain functions. Hence, it is plausible to assume that training EFs should enable participants to juggle informational weight more gracefully and process speech faster, in turn, improving their quality of life and well-being.</p>
      </sec>
      <sec>
        <title>Speech as a Far-Transfer Measure of Cognitive Training Using Eye Tracking</title>
        <p>To test the effect of cognitive training on speech processing, this study used eye tracking. We used a noninvasive infrared light source and high-precision camera that collects reflections from the eye and records the exact location of the eye gaze on the display at a rate of 500 samples per second. As the word unfolds in time, eye gaze data are time locked with what is being heard by the listener. By recording the participant’s eye movements in relation to the visual display and auditory stimuli, eye tracking provides a highly sensitive and continuous measure of spoken word processing. Unlike overt non–real-time responses (participant verbally or physically responding <italic>after</italic> the word has been heard), the covert rapidity of an eye movement allows one to determine the point in time at which the listener is able to isolate the target word from its competitors through the difference in fixations on the target and competitor over time. Although non–real-time responses, such as pointing at the screen, may be affected by age-related motor slowing, covert eye movement speed and accuracy are relatively unaffected [<xref ref-type="bibr" rid="ref50">50</xref>].</p>
        <p>To specifically gauge the cognitive mechanisms involved in speech processing under adverse conditions, our laboratory adapted the <italic>Visual World</italic> eye-tracking paradigm [<xref ref-type="bibr" rid="ref51">51</xref>] to include a concurrent task (increasing memory load) and noise (increasing distractions), creating the E-WINDMIL [<xref ref-type="bibr" rid="ref44">44</xref>]. In E-WINDMIL, listeners hear Hebrew sentences such as “point at the box” while viewing a visual display on a computer screen that contains 4 objects. In this example (<xref rid="figure1" ref-type="fig">Figure 1</xref>), the display shows a picture of the named object heard by the participant, <italic>box /ar.gaz/</italic>, along with three other objects: a phonological competitor (eg, an onset competitor that shares the first syllable with the target, <italic>rabbit /ar.nav/</italic>) and 2 additional objects that are neither semantically nor phonologically related to the heard target object or its name. Participants are asked to touch the picture of the object as quickly and accurately as possible while their eye gaze is recorded. Rather than analyzing the slower overt touch response, only the eye gaze is taken into account in later analysis.</p>
        <p>As real-life speech processing is often accompanied by other tasks, before the onset of the spoken instructions, participants are also asked to retain in memory either 1 or 4 digits (low or high memory load, respectively) for later recall. A discrimination score, which is the difference between the proportion of eye gaze fixations to the target (image representing the heard word) and the phonological competitor, was used to assess the 2 groups. The higher the difference, the more efficient the listener is in discriminating the spoken target from its signal-sharing competitor. Using the same eye-tracking paradigm, Nitsan et al [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>] showed that listeners with larger WM capacity were able to identify the target word (and reject the signal-sharing competitor) earlier than a matched group with lower capacity. These findings suggest that improving one’s cognitive capacity might improve speech processing in adverse conditions.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>An example of the experimental display. The target word in this example, /ar.nav/ (rabbit), is represented in the bottom-left corner. The phonological competitor, /ar.gaz/ (box), is represented in the bottom-right corner. The words /si.ra/ and /max.∫ev/ (boat and computer, respectively) are unrelated distractor.</p>
          </caption>
          <graphic xlink:href="games_v10i3e32297_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>This Study</title>
        <p>A total of 2 groups of older adults were tested <italic>twice</italic> on the E-WINDMIL speech processing task. One group received no cognitive training, whereas the other group followed the <italic>Effectivate</italic> SGCTA training protocol for 6 weeks. We aimed to test whether a short training period using <italic>Effectivate</italic> would engender a significant far-transfer change in speech processing ability. As the tested SGCTA does not involve any type of auditory training, improved performance on the E-WINDMIL speech processing task would provide strong support for the far-transfer effect and demonstrate the use of far-transfer measures in gauging training success for the validation of SGCTAs.</p>
        <p>We hypothesized that if the tested SGCTA, <italic>Effectivate</italic>, improves generalized EFs, speech processing in adverse conditions, as measured by performance on the E-WINDMIL, would improve for the training group. Specifically, the difference in discrimination scores between the training group and control group would not be significant in the first test session, although an advantage for the training group would be found in the second test session (after training). This would suggest that training had a significant impact on real-time speech processing, above and beyond practice with the E-WINDMIL task.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Participants</title>
        <p>A total of 54 older adults were recruited via phone calls from the Reichman University’s older adult research volunteer group and randomly assigned to either the cognitive training or control group. Although the groups cannot be said to reflect the diversity of the global older adult population, they are representative of the population residing in central Israel, where the study was conducted. Of these 54 individuals, 8 (15%) did not return for the second eye-tracking session, and 6 (11%) were excluded because of failure in eye movement recording or loss of eye-tracking signal. Recruitment was continuous for the duration of 6 months. Owing to the COVID-19 pandemic, participant recruitment and data collection were limited and terminated earlier than expected. The training group comprised 50% (20/40) of older adults (mean age 65.65, SD 4.8 years; 14/20, 70% were women). The control group comprised 50% (20/40) of older adults (mean age 69.05, SD 3.8 years; 13/20, 65% were women) from the study by Baharav et al [<xref ref-type="bibr" rid="ref54">54</xref>]. All participants met the research inclusion criteria (refer to <xref ref-type="boxed-text" rid="box1">Textbox 1</xref> for details). As shown in <xref ref-type="table" rid="table1">Table 1</xref>, the 2 groups had similar gender distribution (<italic>P</italic>=.74). Hearing acuity (pure tone average), years of education, and forward digit span scores did not differ significantly between the 2 groups (<italic>P</italic>=.51, <italic>P</italic>=.74, and <italic>P</italic>=.76, respectively). However, participants in the training group were slightly younger (<italic>t</italic><sub>38</sub>=2.48; <italic>P</italic>=.02). All the participants provided written informed consent.</p>
        <boxed-text id="box1" position="float">
          <title>Inclusion criteria for participant recruitment.</title>
          <p>
            <bold>Language background</bold>
          </p>
          <p>High proficiency Hebrew speakers (no early bilinguals were included), assessed by a self-report and a score within the normal range in the Wechsler Adult Intelligence Scale–3 Hebrew vocabulary subtest</p>
          <p>
            <bold>Hearing</bold>
          </p>
          <p>Symmetrical air conduction hearing thresholds, expressed as pure tone averages of ≤25 dB hearing level in each ear (0.5 kHz, 1 kHz, and 2 kHz), and no reported history of auditory pathology</p>
          <p>
            <bold>Vision</bold>
          </p>
          <p>Normal or corrected to normal visual acuity and color vision, assessed by the Landolt C charts and the Ishihara charts</p>
          <p>
            <bold>Cognition: working memory</bold>
          </p>
          <p>Clinically normal scores for their age range on the Montreal Cognitive Assessment cognitive screening test and on the forward and backward digit span subtests (Hebrew version of Wechsler Adult Intelligence Scale–3 [<xref ref-type="bibr" rid="ref46">46</xref>])</p>
        </boxed-text>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Demographic characteristics (N=40).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="380"/>
            <col width="170"/>
            <col width="170"/>
            <col width="90"/>
            <col width="120"/>
            <col width="70"/>
            <thead>
              <tr valign="top">
                <td>Characteristics</td>
                <td>Training group (n=20)</td>
                <td>Control group (n=20)</td>
                <td colspan="3">Group comparison</td>
              </tr>
              <tr valign="bottom">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td><italic>t</italic> test<sup>a</sup> (<italic>df</italic>)</td>
                <td>Chi-square (<italic>df</italic>)</td>
                <td><italic>P</italic> value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Age (years), mean (SD)</td>
                <td>65.65 (4.848)</td>
                <td>69.041 (3.605)</td>
                <td>2.478 (38)</td>
                <td>N/A<sup>b</sup></td>
                <td>.02</td>
              </tr>
              <tr valign="top">
                <td>Gender (women), n (%)</td>
                <td>14 (70)</td>
                <td>13 (65)</td>
                <td>N/A</td>
                <td>0.4 (1)</td>
                <td>.74</td>
              </tr>
              <tr valign="top">
                <td>Hearing (across 0.5 kHz, 1 kHz, and 2 kHz), mean (SD)</td>
                <td>16.79 (4.939)</td>
                <td>17.85 (4.913)</td>
                <td>0.672 (37)</td>
                <td>N/A</td>
                <td>.51</td>
              </tr>
              <tr valign="top">
                <td>Education (years), mean (SD)</td>
                <td>16.42 (2.244)</td>
                <td>16.18 (2.69)</td>
                <td>0.339 (34)</td>
                <td>N/A</td>
                <td>.74</td>
              </tr>
              <tr valign="top">
                <td>Digit span, mean (SD)</td>
                <td>9.9 (1.714)</td>
                <td>9.75 (1.333)</td>
                <td>0.309 (38)</td>
                <td>N/A</td>
                <td>.76</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>The <italic>t</italic> test was 2-tailed.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>N/A: not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>Ethics approval for this study was obtained from the Reichman University (Interdisciplinary Center Herzliya) institutional review board (P_1920119). This study was conducted in line with the CONSORT-EHEALTH (Consolidated Standards of Reporting Trials of Electronic and Mobile Health Applications and Online Telehealth) checklist (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p>
      </sec>
      <sec>
        <title>Stimuli</title>
        <sec>
          <title>Auditory Stimuli</title>
          <p>Auditory stimuli were taken from the study by Nitsan et al [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>] and contained both the object names describing the visual stimuli and the sentence, “point at the ___ [target word]” in Hebrew using a plural non–gender-specific form. All object names were disyllabic. The average target word duration, including the Hebrew definition article <italic>ha-</italic> (the), was 1078 (SD 91) milliseconds. The root mean square intensity was equated across all recorded sentences. Files were mixed with a continuous speech spectrum noise at a fixed 0 dB signal-to-noise ratio based on values for the discrimination timeline in the study by Ben-David et al [<xref ref-type="bibr" rid="ref55">55</xref>]. Stimuli were presented binaurally at 50 dB above the individual pure tone average via a MAICO MA-51 (MAICO) audiometer using TDH 39 supra-aural headphones (Telephonics).</p>
        </sec>
        <sec>
          <title>Visual Display</title>
          <p>In each trial, the participants were presented with a 3×3 grid with 4 images of objects positioned at the grid corners (<xref rid="figure1" ref-type="fig">Figure 1</xref>). The stimuli (images) were previously used in the studies by Hadar et al [<xref ref-type="bibr" rid="ref56">56</xref>] and Nitsan et al [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>] and were confirmed to be clearly identifiable and highly familiar. In all the trials, 1 of the 4 image names corresponded with the spoken target word. In critical trials, a second image name was a phonological competitor: sharing the initial syllable (onset overlap) or the final syllable (offset overlap) with the spoken target word. The remaining 2 objects presented on the screen were phonologically and semantically unrelated to both the target word and phonological competitor. In addition to critical trials, filler trials were used to diminish participant expectation of onset phonetic resemblance between the depicted object names. In filler trials, all 3 distractors were phonologically and semantically unrelated to the target word.</p>
          <p>The original image database was divided into two to create 2 image sets, which were counterbalanced between participants for testing sessions 1 and 2. Within each testing session, objects were presented twice: once as a critical trial and once as a filler trial in which one of the 2 phonetically <italic>unrelated</italic> items was used as the target word. To prevent implicit spatial learning within a single testing session, object positions on the screen were rotated at each presentation.</p>
        </sec>
      </sec>
      <sec>
        <title>Procedure</title>
        <sec>
          <title>Overview</title>
          <p>The study comprised 2 experimental sessions, all conducted individually in a dedicated experimental laboratory complex at the Reichman University. In the first session, participants signed an informed consent form, and the inclusion criteria measures were collected. The E-WINDMIL paradigm was administered (as presented in the following section) to determine the participants’ baseline performance. To maintain experimenter blindness to the conditions, 2 different research assistants conducted the experiment. One research assistant conducted the E-WINDMIL and auditory testing, and the other research assistant assigned participants to each group and presented the participants in the training group with a web address providing access to the <italic>Effectivate</italic> SGCTA and instructed them to train at least three times a week for a duration of 5 weeks, after which they returned for the second experimental session. They were called once a week to verify <italic>Effectivate</italic> training. In the control group, participants were asked to maintain their daily routine and return within 2 to 4 weeks. In the second session, the same E-WINDMIL task was administered and the participants were debriefed. All participants were aware of the academic affiliation of the researchers. Participants in the experimental condition were not blinded to the name of the SGCTA company; however, the product was still in the beta stages and, as such, was not publicly available or marketed at the time.</p>
        </sec>
        <sec>
          <title>E-WINDMIL Paradigm</title>
          <p>The experiment was administered individually in a dedicated sound-attenuated booth (IAC Acoustics). Participants were seated 60 cm away from the computer screen, with their heads placed on the designated eye-tracker chin rest to minimize head movement. Each participant’s dominant eye was calibrated to ensure that their real-time eye gaze position was recorded throughout the course of the trial. A table-mounted SR Eyelink 1000 eye tracker (SR Research Ltd) in the <italic>tower mount</italic> configuration was used. Eye gaze position was recorded using the Eyelink software at a rate of 500 Hz.</p>
          <p>Trials began with a visual cue of a black <italic>play</italic> triangle centered on the screen, immediately followed by the auditory presentation of either 1-digit preload (low-load condition) or 4-digit preload (high-load condition) through headphones. Participants were told to memorize these digits (in the order presented) for later recall. Subsequently, a 3×3 grid with the 4 images appeared (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Participants were given 2 seconds to view the object positions, after which a fixation cross appeared in the center of the screen. Once the participants pressed the fixation cross to initiate the trial, the instruction sentence, “point at the ___ [target word],” was presented via the headphones. Selection of a named object was indicated by touching the object’s picture on the touch screen. Following the participant’s selection of a stimulus, a visual feedback signal appeared in the square of the selected image: red highlight for an incorrect answer or green highlight for a correct answer. Finally, the visual display was cleared, and a visual cue of a black circle appeared on the screen, signaling participants to recall aloud the digit preload from the beginning of the trial. Then, the experimenter coded the response (either correct or incorrect) in real time. Participants were instructed that the speed and accuracy of both the object selection and digit recall were equally important.</p>
          <p>In a given testing session, participants completed 68 trials, split into 2 trial blocks for each digit preload condition (low load: 1 digit; high load: 4 digits). Each condition contained 34 trials, of which 2 (6%) were practice trials, and 32 (94%) were experimental trials. The 32 trials in each condition were split such that 16 (50%) were <italic>filler</italic> trials, indicating that the target object’s name did not share any phonology with the surrounding objects, and 16 (50%) were <italic>critical</italic> trials, indicating that the target object’s name shared phonology with a surrounding object name. 50% (8/16) were phonological onset competitors (eg, <italic>/ar.nav/-/ar.gaz/</italic>), and 50% (8/16) were phonological offset competitors (eg, /xalon/-/balon/).</p>
          <p>Although participants in the experimental group were aware of the intervention, the <italic>Visual World</italic> covert eye-tracking design was found to account for participants trying to outperform in an overt choice of the target (eg, with a button press). In other words, participants cannot control eye gaze fixations toward the alternatives versus fixations toward the target once saccades have been initiated. Indeed, in the visual world paradigm, eye movements were affected by implicit task goals and relatively immune to intentions and social desirability [<xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref59">59</xref>].</p>
        </sec>
        <sec>
          <title>SGCTA Effectivate</title>
          <p>Following baseline testing, participants in the training group completed at-home web-based training, using a PC or tablet. A minimum of 15 training sessions were completed with approximately 8 minutes of active training per session (range 3-15 minutes). Each training session comprised 2 to 10 exercises, which were selected from a bank of 10 tasks. The difficulty level was individually adjusted for each participant and calibrated separately for each task using various measures (eg, exposure time, reaction time window, and number of objects). Each training task targeted at least one of the following cognitive functions: processing speed, WM, executive control, attentional control, sustained attention, spatial attention, binding, semantic memory, and training of several mnemonic methods. <xref rid="figure2" ref-type="fig">Figure 2</xref> presents an example of such an exercise.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>An example of a slide from the Effectivate serious game for cognitive training in aging—the exercise, The Last One Counts, is based on the ‘n-back’ task. In this exercise, the users were presented with a sequence of shapes and asked to decide whether each shape is identical to the one previously presented. Task difficulty changed gradually by updating different parameters, such as exposure latencies. In advanced levels, users were asked to decide whether the current shape is the same as, different from, or partially similar to the previously presented one. This additional level of complexity requires users to segregate the item’s different features (ie, color and shape) to selectively focus on some and inhibit others.</p>
            </caption>
            <graphic xlink:href="games_v10i3e32297_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Response Accuracy</title>
        <p><xref ref-type="table" rid="table2">Table 2</xref> presents the accuracy percentage for each experimental condition—the percentage of trials in which participants both correctly selected the corresponding object on the visual display (indicating correct spoken word recognition) and correctly recalled the preload digits (indicating correct digit recall). A Mann-Whitney independent-sample nonparametric test confirmed that WM load, test session, and participant group did not have significant effects on accuracy, with <italic>P</italic>&#62;.17 for all 4 tests.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Mean percentage (and SDs) of trials in which the target word was correctly selected and digits were correctly recalled<sup>a</sup>.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="470"/>
            <col width="0"/>
            <col width="250"/>
            <col width="0"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Participant group and WM<sup>b</sup> load</td>
                <td colspan="2">First session (%), mean (SD)</td>
                <td>Second session (%), mean (SD)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="6">
                  <bold>Training</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Low</td>
                <td colspan="2">99.4 (2.8)</td>
                <td colspan="2">98.1 (4.6)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>High</td>
                <td colspan="2">91.2 (14.1)</td>
                <td colspan="2">88.1 (15.4)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Control</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Low</td>
                <td colspan="2">98.9 (3.7)</td>
                <td colspan="2">99.4 (4.6)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>High</td>
                <td colspan="2">87.5 (13.4)</td>
                <td colspan="2">92.6 (10.7)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>Low working memory and high working memory indicate the two preload conditions, 1 digit and 4 digits, respectively.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>WM: working memory.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Eye Gaze Analysis</title>
        <p>We analyzed target discrimination scores (following the methodology of previous studies [<xref ref-type="bibr" rid="ref60">60</xref>-<xref ref-type="bibr" rid="ref63">63</xref>]) reflecting the listeners’ ability to discriminate the target word from its phonological competitor. The proportion of fixations on the competitor was subtracted from the proportion of fixations on the target within 250-millisecond time bins, starting from 250 milliseconds after the word onset to 1500 milliseconds. In this measure, the higher the value, the better listeners can discriminate the target from its competitor; values approaching 0 reflect an inability to discriminate between the target and competitor objects. Mixed-design repeated-measures ANOVAs were conducted for each 250-millisecond time bin, with three within-participants factors—WM load (high and low), test session (first and second), and condition (onset vs offset sound sharing)—and one between-participant factor—participant group (training and control). In each analysis, planned comparisons compared the effect of the participant group on discrimination scores in the first and second test sessions to verify whether differences between groups were related to the intervention (ie, significant effect only in the second session). Significant interactions of the test session with the participant group were noted in two of the five tested time bins: early processing 250 to 500 milliseconds and late processing 1250 to 1500 milliseconds, as discussed in the following section (<xref rid="figure3" ref-type="fig">Figures 3</xref> and <xref rid="figure4" ref-type="fig">4</xref>). The remaining three time bins (500-750 milliseconds, 750-1000 milliseconds, and 1000-1250 milliseconds) did not show any significant interaction; thus, they will not be discussed further.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>First test session. Mean target discrimination scores (with SE bars) for the training and control groups. Target discrimination scores are the proportion of fixations on the competitor subtracted from the proportion of fixations on the target within 250-millisecond time bins, starting from 250 milliseconds after the word onset to 1500 milliseconds.</p>
          </caption>
          <graphic xlink:href="games_v10i3e32297_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Second test session. Mean target discrimination scores (with SE bars) for the training and control groups. Target discrimination scores are the proportion of fixations on the competitor subtracted from the proportion of fixations on the target within 250-millisecond time bins, starting from 250 milliseconds after the word onset to 1500 milliseconds. *Significant effect.</p>
          </caption>
          <graphic xlink:href="games_v10i3e32297_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Early Processing: 250- to 500-Millisecond Time Bin</title>
        <p>The interaction between the test session and the participant group was found to be approaching significance (<italic>F</italic><sub>1,38</sub>=3.881; <italic>P</italic>=.06; <italic>η</italic><sub>p</sub><sup>2</sup>=0.093). Planned comparison indicated no significant difference between the 2 groups in the first session (<italic>F</italic><sub>1,38</sub>=0.056; <italic>P</italic>=.81), whereas the second session produced higher discrimination scores in the training group (<italic>F</italic><sub>1,38</sub>=7.371; <italic>P</italic>=.01; <italic>η</italic><sub>p</sub><sup>2</sup>=0.162). This suggests that improved performance can be related to the intervention itself. The effect of the participant group was marginally significant (<italic>F</italic><sub>1,38</sub>=3.048; <italic>P</italic>=.07; <italic>η</italic><sub>p</sub><sup>2</sup>=0.085), with slightly higher discrimination scores in the training group (as in the previous analysis, this difference can be related to the second session) and no significant main effect for test session (<italic>F</italic><sub>1,38</sub>=0.224; <italic>P</italic>=.64). No significant triple interactions were found for the participant group, test session, or any other tested variables (WM load or condition).</p>
      </sec>
      <sec>
        <title>Late Processing: 1250- to 1500-Millisecond Time Bin</title>
        <p>A significant interaction between the test session and the participant group was found (<italic>F</italic><sub>1,38</sub>=4.220; <italic>P</italic>&#60;.05; <italic>η</italic><sub>p</sub><sup>2</sup>=0.100). Planned comparisons indicated that although the 2 groups did not significantly differ in the first session (<italic>F</italic><sub>1,38</sub>=1.689; <italic>P</italic>=.20), the second session showed higher discrimination scores in the training group than in the control group (<italic>F</italic><sub>1,38</sub>=9.003; <italic>P</italic>=.005; <italic>η</italic><sub>p</sub><sup>2</sup>=0.192), suggesting that improved performance could be related to the intervention itself. A significant main effect of the participant group was noted (<italic>F</italic><sub>1,38</sub>=6.722; <italic>P</italic>=.01; <italic>η</italic><sub>p</sub><sup>2</sup>=0.150), with higher discrimination scores in the training group (emanating from higher scores in the second session) and no significant main effect for the test session (<italic>F</italic><sub>1,38</sub>=0.108; <italic>P</italic>=.74). No significant triple interactions were found for the participant group, test session, or any other tested variables (WM load or condition).</p>
        <p>In summary, in early and late processing (250-500 milliseconds and 1250-1500 milliseconds after word onset, respectively), performance did not differ between the 2 groups in the first test session. However, in the second test session after training with the <italic>Effectivate</italic> SGCTA, the training group surpassed the control group. These effects can be taken to suggest that the SGCTA training improved performance, over and above any effect of test-retest repetition.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>In this exploratory study, we aimed to validate an eye-tracking paradigm, the E-WINDMIL, which tests real-time speech processing in adverse conditions as a gauge for the far-transfer efficacy of SGCTAs. Specifically, we tested whether training EFs in the visual modality with the SGCTA <italic>Effectivate</italic> generalizes to improved speech processing in adverse listening conditions (auditory modality) for older adults.</p>
        <p>The training group, with 50% (20/40) of the older adults, was tested before and after 6 weeks of training on the SGCTA <italic>Effectivate</italic>. The control group, with another 50% (20/40) of the older adults, did not undergo any specific cognitive training. Before training, no significant differences in E-WINDMIL performance were noted between the control group and the training group. However, after training with <italic>Effectivate,</italic> the training group outperformed the control group in early word processing (indicated by eye movements, 250-500 milliseconds after word onset) and late word processing (1250-1500 milliseconds after word onset). The early processing advantage may suggest improved stream segregation between the spoken target word and noise [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>], when WM was otherwise occupied. The late processing advantage alludes to improved decision-making processes (ie, using accumulated evidence) once the word had been completely heard [<xref ref-type="bibr" rid="ref55">55</xref>]. Our results provide early support for the efficacy of the E-WINDMIL speech processing paradigm as a far-transfer measure of cognitive training with the SGCTA <italic>Effectivate</italic>. This is of special interest as the tested SGCTA did not train any auditory task or spoken language processing.</p>
      </sec>
      <sec>
        <title>Speech Processing as a Far-Transfer Gauge for Cognitive Training</title>
        <p>Challenges in determining the effectiveness of any cognitive intervention stem from the ongoing debate: Do we use near-transfer or far-transfer metrics? [<xref ref-type="bibr" rid="ref14">14</xref>] In other words, is it sufficient to indicate improved performance on the trained task or should research indicate improved performance on a <italic>daily</italic> task, far from training, to suggest the <italic>generalizability</italic> of training? This exploratory study demonstrates the efficacy of using a far-transfer measure that involves speech processing in adverse conditions to discern the impact EF training has on daily life activities.</p>
        <p>Speech processing in adverse conditions presents an excellent gauge of the generalizability of cognitive training. As speech processing is resource demanding, the fewer resources listeners have, the more they will be affected by adverse conditions such as background noise. Speech processing involves holding ongoing speech strings in memory and integrating words and phrases to create coherent meaning; thus, it is considered to be dependent on WM and other attentional resources [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]. The results of this study suggest the prowess of training to create a generalized cognitive effect, as a few weeks of training on the <italic>Effectivate</italic> SGCTA was sufficient to improve speech processing in adverse conditions (above and beyond test-retest learning effects).</p>
        <p>This improvement can be interpreted in light of the crucial role of EFs, especially WM, in speech processing in adverse conditions. According to the Ease of Language Understanding model [<xref ref-type="bibr" rid="ref44">44</xref>], explicit WM resources are drawn from a central pool to compensate for the loss of automatic matching between the input and lexical representations when the sound input is degraded by adverse listening conditions. Other studies have demonstrated a direct link between WM capacity and the ability to inhibit irrelevant information. This ability is necessary to separate the speech signal from background noise and reject competing words in the mental lexicon. Thus, our results suggest that training EFs using SGCTAs might have a generalized effect on real-life daily tasks. Returning to our example in the introduction, with improved WM capacity, the older adult will be better able to understand his grandson saying, “Grandpa, have you seen the DOLL?” rather than <italic>DOG</italic> (sound-sharing alternative) while driving a car (WM load) with the radio playing (adverse listening conditions) in the following ways:</p>
        <list list-type="order">
          <list-item>
            <p>Improve speech segregation—separating the spoken message from the background task-irrelevant noise (eg, radio and engine noise)</p>
          </list-item>
          <list-item>
            <p>Effectively inhibit the activation of competing similar-sounding words in the mental lexicon (eg, <italic>DOG</italic>)</p>
          </list-item>
          <list-item>
            <p>Allocate enough resources to use context and information in long and short memory from a cognitive resource pool, which is already depleted by the concurrent task of driving</p>
          </list-item>
        </list>
        <p>Given the pivotal role of speech processing in successful aging [<xref ref-type="bibr" rid="ref64">64</xref>], this change may have a lasting positive impact on the quality of life in older age.</p>
      </sec>
      <sec>
        <title>E-WINDMIL as a Far-Transfer Gauge for Cognitive Training</title>
        <p>The advantage of using the adapted visual world paradigm E-WINDMIL lies in its increased ecological validity—measuring a daily task (speech processing) that is important to the perseverance of well-being and performance in older age [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. Eye tracking used by E-WINDMIL is better suited to test older adults’ speech processing than more traditional speech tests involving overt responses, such as verbal or keypress response. It is not influenced by an age-related slowing of motor speed, which often affects non–real-time speech tests [<xref ref-type="bibr" rid="ref65">65</xref>]. Unlike many other speech processing tasks that assess the processing of a single word in <italic>ideal listening</italic> conditions, the E-WINDMIL asks listeners to retain digits for later recall (a task designed to weigh on WM resources) while presenting speech in noise. In this way, the E-WINDMIL paradigm acknowledges that speech in real-life scenarios is often experienced along with noise while the listener is engaged in other cognitively demanding tasks (eg, following the context of the sentence as it unfolds and driving). Moreover, eye tracking has been shown to be a sensitive measure for speech processing in various studies, suggesting that speech processing is costly in terms of WM processing and perhaps even mediated by it [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref67">67</xref>].</p>
      </sec>
      <sec>
        <title>Training-Related Advantage in the 1250- to 1500-Millisecond Time Bin</title>
        <p>Previous adaptations of E-WINDMIL found eye tracking to be very sensitive to differences in cognitive reserve. Hadar et al [<xref ref-type="bibr" rid="ref56">56</xref>] found that minimizing available cognitive resources can slow down processing in this task. Nitsan et al [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>] found that individuals with higher cognitive reserve outperform individuals with lower reserve while using E-WINDMIL. This advantage, attributed by the authors to the use of cognitive resources for speech processing, was indicated in the later time bins—similar to the current findings. Other studies also found that differences in cognition were indicated in later word processing with older listeners in particular [<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref68">68</xref>]. A recent study by Harel-Arbeli et al [<xref ref-type="bibr" rid="ref46">46</xref>] attributed the advantages seen in later time bins to decision-making processes. In their study, using a similar eye-tracking paradigm, the spoken target word was preceded by a spoken predictive context presented in a quiet environment. An advantage of young adults over older adults, based on the age-related difference in cognitive resources, was present mainly in the late time bin when the full word had been spoken. Taken together, it appears that improved processing in the late time bin may reflect improved cognitive resources (eg, WM and inhibition).</p>
      </sec>
      <sec>
        <title>Training-Related Advantage in the 250- to 500-Millisecond Time Bin</title>
        <p>The current data also indicated training-related advantages in processing during early time bins, when only the first phoneme of the word is being processed. This suggests that cognitive training improved target word stream formation and auditory stream segregation between the target word and noise [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref69">69</xref>]. Indeed, this early process of stream segregation has been linked not only to sensory processes but also to the deployment of cognitive resources. Cognition is necessary for the inhibition of the noise stream and selective focus on the target word stream, leading to stream segregation [<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref71">71</xref>]. Stream segregation is essential for speech processing and represents one of the major hurdles for older adults in social interactions [<xref ref-type="bibr" rid="ref72">72</xref>]. Indeed, age-related auditory sensory degradation can specifically impair processes related to stream segregation in aging [<xref ref-type="bibr" rid="ref73">73</xref>]. This early time bin training advantage may also be related to the early benefits noted in the literature as a result of removing background noise [<xref ref-type="bibr" rid="ref60">60</xref>] and increasing the lexical frequency of the spoken word [<xref ref-type="bibr" rid="ref74">74</xref>] using similar eye-tracking paradigms. In summary, the performance advantage in the early time bin associated with SGCTA training may reflect an increase in cognitive reserve.</p>
      </sec>
      <sec>
        <title>Caveats and Future Studies</title>
        <p>This study should be taken as a first step in supporting the effectiveness of the tested SGCTA, and it does not serve as a recommendation or suggestion to use SGCTAs in general or specifically the <italic>Effectivate</italic> SGCTA. This study was ongoing at the beginning of the COVID-19 pandemic and was halted because of national quarantine. Therefore, we were unable to amass a larger group of participants. Moreover, we were unable to recruit an active control group to undergo an alternative form of cognitive training. Future studies should attempt to replicate the results with an active control to ensure that the observed effects were not related to possible social desirability or lack of participant blinding but to the specific cognitive training, <italic>Effectivate</italic>. However, we note that the experimenters administering the study were blinded to the condition, and the experimental tool was relatively immune to social desirability. Such replications should also more carefully match participants across all groups. Indeed, on average, participants in the control group were older by a few years than those in the training group. We also note that participants in this study did not form a representative sample of the older adult population, specifically given the cognitive and linguistic inclusion criteria. Although future studies should aim to include more diverse samples, these criteria are common in research with this population [<xref ref-type="bibr" rid="ref75">75</xref>-<xref ref-type="bibr" rid="ref77">77</xref>].</p>
        <p>We demonstrated that the <italic>Effectivate</italic> SGCTA is sufficiently powerful to induce changes, even in cognitively healthy older adults, and that the E-WINDMIL test is sufficiently sensitive to detect such changes. Our preliminary results are the first step, suggesting the ability of the SGCTA <italic>Effectivate</italic> to engender far cognitive transfer. Future studies should also try to relate our results to other more traditional cognitive measures and questionnaires tapping users’ subjective evaluation of their quality of life.</p>
      </sec>
      <sec>
        <title>Summary and Implications</title>
        <p>This exploratory study presents an early foray into the potential of speech processing in adverse conditions as a far-transfer gauge of SGCTAs. This is in line with previous studies that used gamification in cognitive decline research [<xref ref-type="bibr" rid="ref78">78</xref>-<xref ref-type="bibr" rid="ref81">81</xref>]. Results present a preliminary indicator of the SGCTA <italic>Effectivate’s</italic> potential to engender such far transfer from visual cognitive training to auditory speech processing after only a few weeks of training. Following training, older adults were better able to differentiate between the spoken target word and its sound-sharing competitor under adverse conditions (noise and digit memorization). We suggest that this change in performance represents a real-world improvement in a daily task that is directly related to successful aging. Thus, it shows the potential of the training to have a significant impact on the user’s daily life. We advocate that cognitive training should showcase evidence-based improvement in daily far-transfer tasks that can change the user’s quality of life, as opposed to merely showing changes in traditional pen-and-paper cognitive measures. As serious games are a means of improving performance in other tasks, games developed to the highest standards should seek out far-transfer validation methods. We hope that the increased demand for far-transfer metrics will bolster research efforts within the academic community to develop new far-transfer gauges of cognitive ability and call on serious game developers to adopt far-transfer metrics, such as E-WINDMIL, into their gauges for validity and success.</p>
        <p>This study investigated aging through the lens of speech processing, a novel vantage point, which can illuminate interconnected attentional mechanisms known to be affected by aging. Most importantly, speech processing is an essential daily task performed across social interactions, leisure, and employment [<xref ref-type="bibr" rid="ref72">72</xref>]. Impaired speech processing may have severe implications for older adults across all aspects of life. Therefore, we encourage adding tests of speech processing, especially in adverse conditions, to the arsenal of tools used to test the efficacy of EF training in aging. Furthermore, we suggest paying attention to speech processing in aging when considering accessibility and inclusion in serious game design.</p>
        <p>In addition to being a novel and important test metric for aging, real-time speech processing metrics may also prove beneficial to testing other populations such as children with the neurodevelopmental disorder, attention-deficit/hyperactivity disorder (ADHD). As the most prevalent neurodevelopmental disorder in children, ADHD is associated with lifelong impairment, with symptoms reflecting a deficit in EFs such as inhibitory control, attentional regulation, and WM [<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref83">83</xref>]. Given ADHD’s high prevalence and detrimental effect on the quality of life and well-being, many serious games are being developed to train EFs in ADHD. As is the case with SGCTAs, there is much debate in the literature regarding their efficacy [<xref ref-type="bibr" rid="ref84">84</xref>]. Expanding on our findings, we suggest further exploration using E-WINDMIL to test the far-transfer efficacy of serious computerized games designed for children and adults with ADHD along with other promising populations that could benefit. We hope that the creation and use of universally accepted far-transfer metrics will determine gold standard serious games that will help us prolong cognitive functions and improve well-being with age and throughout life.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>CONSORT-eHEALTH checklist (V 1.6.1).</p>
        <media xlink:href="games_v10i3e32297_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 2969 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">ADHD</term>
          <def>
            <p>attention-deficit/hyperactivity disorder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CONSORT-EHEALTH</term>
          <def>
            <p>Consolidated Standards of Reporting Trials of Electronic and Mobile Health Applications and Online Telehealth</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">E-WINDMIL</term>
          <def>
            <p>Eye tracking of Word Identification in Noise Under Memory Increased Load</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">EF</term>
          <def>
            <p>executive function</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">SGCTA</term>
          <def>
            <p>serious game for cognitive training in aging</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">WM</term>
          <def>
            <p>working memory</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This study was partially supported by a grant from <italic>Effectivate</italic> (Acerar Ltd). <italic>Effectivate</italic> was not involved in the study design; collection, analysis, and interpretation of data; writing of this paper; or decision to submit it for publication. The authors had full liberty in all of these aspects. The authors wish to acknowledge the assistance of the following students and staff in gathering the data: Maya Mentzel, Dana Yehezkel, and Tali Livne.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
      <fn fn-type="other">
        <p>
          <bold>Editorial Notice</bold>
        </p>
        <p>This randomized study was not registered as the local Institutional Review Board did not believe it was needed. The editor granted an exception from ICMJE rules mandating prospective registration of randomized trials, because the risk of bias appears low, the study was considered exploratory, and the authors' Institutional Review Board provided a letter of approval which recommended that registration not be mandated for this study. However, it should be noted such exceptions are uncommon and readers are advised to carefully assess the validity of any potential explicit or implicit claims related to primary outcomes or effectiveness.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Ageing and health</article-title>
          <source>World Health Organization</source>
          <access-date>2019-07-19</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.who.int/news-room/fact-sheets/detail/ageing-and-health">https://www.who.int/news-room/fact-sheets/detail/ageing-and-health</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eggleston</surname>
              <given-names>KN</given-names>
            </name>
            <name name-style="western">
              <surname>Fuchs</surname>
              <given-names>VR</given-names>
            </name>
          </person-group>
          <article-title>The new demographic transition: most gains in life expectancy now realized late in life</article-title>
          <source>J Econ Perspect</source>
          <year>2012</year>
          <month>08</month>
          <day>01</day>
          <volume>26</volume>
          <issue>3</issue>
          <fpage>137</fpage>
          <lpage>56</lpage>
          <pub-id pub-id-type="doi">10.1257/jep.26.3.137</pub-id>
          <pub-id pub-id-type="medline">25076810</pub-id>
          <pub-id pub-id-type="pmcid">PMC4112481</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="book">
          <source>The Handbook of Aging and Cognition</source>
          <year>2007</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Psychology Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>A sensory origin for color-word Stroop effects in aging: a meta-analysis</article-title>
          <source>Neuropsychol Dev Cogn B Aging Neuropsychol Cogn</source>
          <year>2009</year>
          <month>09</month>
          <day>02</day>
          <volume>16</volume>
          <issue>5</issue>
          <fpage>505</fpage>
          <lpage>34</lpage>
          <pub-id pub-id-type="doi">10.1080/13825580902855862</pub-id>
          <pub-id pub-id-type="medline">19479479</pub-id>
          <pub-id pub-id-type="pii">911771785</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>A sensory origin for color-word stroop effects in aging: simulating age-related changes in color-vision mimics age-related changes in Stroop</article-title>
          <source>Neuropsychol Dev Cogn B Aging Neuropsychol Cogn</source>
          <year>2010</year>
          <month>11</month>
          <day>08</day>
          <volume>17</volume>
          <issue>6</issue>
          <fpage>730</fpage>
          <lpage>46</lpage>
          <pub-id pub-id-type="doi">10.1080/13825585.2010.510553</pub-id>
          <pub-id pub-id-type="medline">21058053</pub-id>
          <pub-id pub-id-type="pii">929083261</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="book">
          <article-title>Ageism and neuropsychological tests</article-title>
          <source>Contemporary Perspectives on Ageism</source>
          <year>2018</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Erel</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Goy</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>"Older is always better": age-related differences in vocabulary scores across 16 years</article-title>
          <source>Psychol Aging</source>
          <year>2015</year>
          <month>12</month>
          <volume>30</volume>
          <issue>4</issue>
          <fpage>856</fpage>
          <lpage>62</lpage>
          <pub-id pub-id-type="doi">10.1037/pag0000051</pub-id>
          <pub-id pub-id-type="medline">26652725</pub-id>
          <pub-id pub-id-type="pii">2015-55483-006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morley</surname>
              <given-names>JE</given-names>
            </name>
          </person-group>
          <article-title>The top 10 hot topics in aging</article-title>
          <source>J Gerontol A Biol Sci Med Sci</source>
          <year>2004</year>
          <month>01</month>
          <day>01</day>
          <volume>59</volume>
          <issue>1</issue>
          <fpage>24</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1093/gerona/59.1.m24</pub-id>
          <pub-id pub-id-type="medline">14718483</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rowe</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Kahn</surname>
              <given-names>RL</given-names>
            </name>
          </person-group>
          <article-title>Successful aging 2.0: conceptual expansions for the 21st Century</article-title>
          <source>J Gerontol B Psychol Sci Soc Sci</source>
          <year>2015</year>
          <month>07</month>
          <day>15</day>
          <volume>70</volume>
          <issue>4</issue>
          <fpage>593</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1093/geronb/gbv025</pub-id>
          <pub-id pub-id-type="medline">25878054</pub-id>
          <pub-id pub-id-type="pii">gbv025</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="book">
          <source>Ear and Hearing Care Planning and Monitoring of National Strategies</source>
          <year>2015</year>
          <publisher-loc>Geneva</publisher-loc>
          <publisher-name>World Health Organization</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="web">
          <article-title>Active ageing : a policy framework</article-title>
          <source>World Health Organization</source>
          <access-date>2015-05-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://apps.who.int/iris/handle/10665/67215">https://apps.who.int/iris/handle/10665/67215</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="web">
          <article-title>Stanford Center on longevity letter</article-title>
          <source>Cognitive Training Data</source>
          <access-date>2022-07-10</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cognitivetrainingdata.org/the-controversy-does-brain-training-work/stanford-letter/">https://www.cognitivetrainingdata.org/the-controversy-does-brain-training-work/stanford-letter/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="web">
          <article-title>Cognitive training data response letter</article-title>
          <source>Cognitive Training Data</source>
          <access-date>2022-07-10</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cognitivetrainingdata.org/the-controversy-does-brain-training-work/response-letter/">https://www.cognitivetrainingdata.org/the-controversy-does-brain-training-work/response-letter/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Simons</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Boot</surname>
              <given-names>WR</given-names>
            </name>
            <name name-style="western">
              <surname>Charness</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gathercole</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Chabris</surname>
              <given-names>CF</given-names>
            </name>
            <name name-style="western">
              <surname>Hambrick</surname>
              <given-names>DZ</given-names>
            </name>
            <name name-style="western">
              <surname>Stine-Morrow</surname>
              <given-names>EA</given-names>
            </name>
          </person-group>
          <article-title>Do "Brain-Training" programs work?</article-title>
          <source>Psychol Sci Public Interest</source>
          <year>2016</year>
          <month>10</month>
          <volume>17</volume>
          <issue>3</issue>
          <fpage>103</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1177/1529100616661983</pub-id>
          <pub-id pub-id-type="medline">27697851</pub-id>
          <pub-id pub-id-type="pii">17/3/103</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Livingston</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Sommerlad</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Orgeta</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Costafreda</surname>
              <given-names>SG</given-names>
            </name>
            <name name-style="western">
              <surname>Huntley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ames</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ballard</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Burns</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen-Mansfield</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cooper</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Fox</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gitlin</surname>
              <given-names>LN</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kales</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Larson</surname>
              <given-names>EB</given-names>
            </name>
            <name name-style="western">
              <surname>Ritchie</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rockwood</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sampson</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Samus</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>LS</given-names>
            </name>
            <name name-style="western">
              <surname>Selbæk</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Teri</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mukadam</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Dementia prevention, intervention, and care</article-title>
          <source>Lancet</source>
          <year>2017</year>
          <month>12</month>
          <volume>390</volume>
          <issue>10113</issue>
          <fpage>2673</fpage>
          <lpage>734</lpage>
          <pub-id pub-id-type="doi">10.1016/s0140-6736(17)31363-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="book">
          <article-title>Effects of senescent changes in audition and cognition on spoken language comprehension</article-title>
          <source>The Aging Auditory System</source>
          <year>2010</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>McMahon</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Pichora-Fuller</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Russ</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Olusanya</surname>
              <given-names>BO</given-names>
            </name>
            <name name-style="western">
              <surname>Chadha</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tremblay</surname>
              <given-names>KL</given-names>
            </name>
          </person-group>
          <article-title>Aging and hearing health: the life-course approach</article-title>
          <source>Gerontologist</source>
          <year>2016</year>
          <month>04</month>
          <volume>56 Suppl 2</volume>
          <fpage>S256</fpage>
          <lpage>67</lpage>
          <pub-id pub-id-type="doi">10.1093/geront/gnw033</pub-id>
          <pub-id pub-id-type="medline">26994265</pub-id>
          <pub-id pub-id-type="pii">gnw033</pub-id>
          <pub-id pub-id-type="pmcid">PMC6283365</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karpa</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Gopinath</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Beath</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rochtchina</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cumming</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Mitchell</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Associations between hearing impairment and mortality risk in older persons: the Blue Mountains Hearing Study</article-title>
          <source>Ann Epidemiol</source>
          <year>2010</year>
          <month>06</month>
          <volume>20</volume>
          <issue>6</issue>
          <fpage>452</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1016/j.annepidem.2010.03.011</pub-id>
          <pub-id pub-id-type="medline">20470972</pub-id>
          <pub-id pub-id-type="pii">S1047-2797(10)00057-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pichora-Fuller</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mick</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Reed</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Hearing, cognition, and healthy aging: social and public health implications of the links between age-related declines in hearing and cognition</article-title>
          <source>Semin Hear</source>
          <year>2015</year>
          <month>08</month>
          <day>9</day>
          <volume>36</volume>
          <issue>3</issue>
          <fpage>122</fpage>
          <lpage>39</lpage>
          <pub-id pub-id-type="doi">10.1055/s-0035-1555116</pub-id>
          <pub-id pub-id-type="medline">27516713</pub-id>
          <pub-id pub-id-type="pii">00675</pub-id>
          <pub-id pub-id-type="pmcid">PMC4906310</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rönnberg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Holmer</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Rudner</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Cognitive hearing science and ease of language understanding</article-title>
          <source>Int J Audiol</source>
          <year>2019</year>
          <month>05</month>
          <day>03</day>
          <volume>58</volume>
          <issue>5</issue>
          <fpage>247</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="doi">10.1080/14992027.2018.1551631</pub-id>
          <pub-id pub-id-type="medline">30714435</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="web">
          <article-title>Cognition and hearing – you can’t test one with the other!</article-title>
          <source>Audiology Feature</source>
          <access-date>2021-03-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.entandaudiologynews.com/media/23455/entma21-ben-david.pdf">https://www.entandaudiologynews.com/media/23455/entma21-ben-david.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van Tilburg</surname>
              <given-names>TG</given-names>
            </name>
            <name name-style="western">
              <surname>Steinmetz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stolte</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>van der Roest</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>de Vries</surname>
              <given-names>DH</given-names>
            </name>
          </person-group>
          <article-title>Loneliness and mental health during the COVID-19 pandemic: a study among Dutch older adults</article-title>
          <source>J Gerontol B Psychol Sci Soc Sci</source>
          <year>2021</year>
          <month>08</month>
          <day>13</day>
          <volume>76</volume>
          <issue>7</issue>
          <fpage>e249</fpage>
          <lpage>55</lpage>
          <pub-id pub-id-type="doi">10.1093/geronb/gbaa111</pub-id>
          <pub-id pub-id-type="medline">32756931</pub-id>
          <pub-id pub-id-type="pii">5881273</pub-id>
          <pub-id pub-id-type="pmcid">PMC7454922</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Palgi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bergman</surname>
              <given-names>YS</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bodner</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>No psychological vaccination: vaccine hesitancy is associated with negative psychiatric outcomes among Israelis who received COVID-19 vaccination</article-title>
          <source>J Affect Disord</source>
          <year>2021</year>
          <month>05</month>
          <day>15</day>
          <volume>287</volume>
          <fpage>352</fpage>
          <lpage>3</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jad.2021.03.064</pub-id>
          <pub-id pub-id-type="medline">33819733</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(21)00290-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC7997161</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Keisari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Palgi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ring</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Folkman</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>"Post-lockdown depression": adaptation difficulties, depressive symptoms, and the role of positive solitude when returning to routine after the lifting of nation-wide COVID-19 social restrictions</article-title>
          <source>Front Psychiatry</source>
          <year>2022</year>
          <month>3</month>
          <day>11</day>
          <volume>13</volume>
          <fpage>838903</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyt.2022.838903"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyt.2022.838903</pub-id>
          <pub-id pub-id-type="medline">35360132</pub-id>
          <pub-id pub-id-type="pmcid">PMC8963186</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>D'cruz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>'An invisible human rights crisis': the marginalization of older adults during the COVID-19 pandemic - An advocacy review</article-title>
          <source>Psychiatry Res</source>
          <year>2020</year>
          <month>10</month>
          <volume>292</volume>
          <fpage>113369</fpage>
          <pub-id pub-id-type="doi">10.1016/j.psychres.2020.113369</pub-id>
          <pub-id pub-id-type="medline">32795754</pub-id>
          <pub-id pub-id-type="pii">S0165-1781(20)32481-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC7397988</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Palgi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shrira</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ring</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bodner</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Avidor</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bergman</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen-Fridel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Keisari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hoffman</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>The loneliness pandemic: loneliness and other concomitants of depression, anxiety and their comorbidity during the COVID-19 outbreak</article-title>
          <source>J Affect Disord</source>
          <year>2020</year>
          <month>10</month>
          <day>01</day>
          <volume>275</volume>
          <fpage>109</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jad.2020.06.036</pub-id>
          <pub-id pub-id-type="medline">32658811</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(20)32394-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC7330569</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salthouse</surname>
              <given-names>TA</given-names>
            </name>
          </person-group>
          <article-title>The processing-speed theory of adult age differences in cognition</article-title>
          <source>Psychol Rev</source>
          <year>1996</year>
          <month>07</month>
          <volume>103</volume>
          <issue>3</issue>
          <fpage>403</fpage>
          <lpage>28</lpage>
          <pub-id pub-id-type="doi">10.1037/0033-295x.103.3.403</pub-id>
          <pub-id pub-id-type="medline">8759042</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salthouse</surname>
              <given-names>TA</given-names>
            </name>
          </person-group>
          <article-title>The aging of working memory</article-title>
          <source>Neuropsychology</source>
          <year>1994</year>
          <volume>8</volume>
          <issue>4</issue>
          <fpage>535</fpage>
          <lpage>43</lpage>
          <pub-id pub-id-type="doi">10.1037/0894-4105.8.4.535</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="book">
          <article-title>Working memory</article-title>
          <source>Psychology of Learning and Motivation</source>
          <year>1974</year>
          <publisher-loc>Amsterdam, Netherlands</publisher-loc>
          <publisher-name>Elsevier Science</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McCabe</surname>
              <given-names>DP</given-names>
            </name>
            <name name-style="western">
              <surname>Roediger</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>McDaniel</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Balota</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Hambrick</surname>
              <given-names>DZ</given-names>
            </name>
          </person-group>
          <article-title>The relationship between working memory capacity and executive functioning: evidence for a common executive attention construct</article-title>
          <source>Neuropsychology</source>
          <year>2010</year>
          <month>03</month>
          <volume>24</volume>
          <issue>2</issue>
          <fpage>222</fpage>
          <lpage>43</lpage>
          <pub-id pub-id-type="doi">10.1037/a0017619</pub-id>
          <pub-id pub-id-type="medline">20230116</pub-id>
          <pub-id pub-id-type="pii">2010-04449-010</pub-id>
          <pub-id pub-id-type="pmcid">PMC2852635</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Postle</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Working memory as an emergent property of the mind and brain</article-title>
          <source>Neuroscience</source>
          <year>2006</year>
          <month>04</month>
          <day>28</day>
          <volume>139</volume>
          <issue>1</issue>
          <fpage>23</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neuroscience.2005.06.005</pub-id>
          <pub-id pub-id-type="medline">16324795</pub-id>
          <pub-id pub-id-type="pii">S0306-4522(05)00620-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC1428794</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gordon-Salant</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yeni-Komshian</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Fitzgibbons</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Barrett</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Age-related differences in identification and discrimination of temporal cues in speech segments</article-title>
          <source>J Acoust Soc Am</source>
          <year>2006</year>
          <month>04</month>
          <volume>119</volume>
          <issue>4</issue>
          <fpage>2455</fpage>
          <lpage>66</lpage>
          <pub-id pub-id-type="doi">10.1121/1.2171527</pub-id>
          <pub-id pub-id-type="medline">16642858</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="book">
          <article-title>The contribution of auditory and cognitive factors to intelligibility of words and sentences in noise</article-title>
          <source>Physiology, Psychoacoustics and Cognition in Normal and Impaired Hearing</source>
          <year>2016</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Benichov</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cox</surname>
              <given-names>LC</given-names>
            </name>
            <name name-style="western">
              <surname>Tun</surname>
              <given-names>PA</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Word recognition within a linguistic context:  effects of age, hearing acuity, verbal ability, and cognitive function</article-title>
          <source>Ear Hear</source>
          <year>2012</year>
          <volume>33</volume>
          <issue>2</issue>
          <fpage>250</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1097/AUD.0b013e31822f680f</pub-id>
          <pub-id pub-id-type="medline">21918453</pub-id>
          <pub-id pub-id-type="pmcid">PMC3253325</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ezzatian</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Pichora-Fuller</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>Delayed stream segregation in older adults: more than just informational masking</article-title>
          <source>Ear Hear</source>
          <year>2015</year>
          <volume>36</volume>
          <issue>4</issue>
          <fpage>482</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1097/AUD.0000000000000139</pub-id>
          <pub-id pub-id-type="medline">25587669</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vogel</surname>
              <given-names>EK</given-names>
            </name>
            <name name-style="western">
              <surname>McCollough</surname>
              <given-names>AW</given-names>
            </name>
            <name name-style="western">
              <surname>Machizawa</surname>
              <given-names>MG</given-names>
            </name>
          </person-group>
          <article-title>Neural measures reveal individual differences in controlling access to working memory</article-title>
          <source>Nature</source>
          <year>2005</year>
          <month>11</month>
          <day>24</day>
          <volume>438</volume>
          <issue>7067</issue>
          <fpage>500</fpage>
          <lpage>3</lpage>
          <pub-id pub-id-type="doi">10.1038/nature04171</pub-id>
          <pub-id pub-id-type="medline">16306992</pub-id>
          <pub-id pub-id-type="pii">nature04171</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Janse</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>A non-auditory measure of interference predicts distraction by competing speech in older adults</article-title>
          <source>Neuropsychol Dev Cogn B Aging Neuropsychol Cogn</source>
          <year>2012</year>
          <month>11</month>
          <volume>19</volume>
          <issue>6</issue>
          <fpage>741</fpage>
          <lpage>58</lpage>
          <pub-id pub-id-type="doi">10.1080/13825585.2011.652590</pub-id>
          <pub-id pub-id-type="medline">22293017</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salthouse</surname>
              <given-names>TA</given-names>
            </name>
          </person-group>
          <article-title>Resource-reduction interpretations of cognitive aging</article-title>
          <source>Development Rev</source>
          <year>1988</year>
          <month>9</month>
          <volume>8</volume>
          <issue>3</issue>
          <fpage>238</fpage>
          <lpage>72</lpage>
          <pub-id pub-id-type="doi">10.1016/0273-2297(88)90006-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lash</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A Bruner-Potter effect in audition? Spoken word recognition in adult aging</article-title>
          <source>Psychol Aging</source>
          <year>2014</year>
          <month>12</month>
          <volume>29</volume>
          <issue>4</issue>
          <fpage>907</fpage>
          <lpage>12</lpage>
          <pub-id pub-id-type="doi">10.1037/a0037829</pub-id>
          <pub-id pub-id-type="medline">25244463</pub-id>
          <pub-id pub-id-type="pii">2014-39085-001</pub-id>
          <pub-id pub-id-type="pmcid">PMC4268394</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lash</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>CS</given-names>
            </name>
            <name name-style="western">
              <surname>Zoller</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Expectation and entropy in spoken word recognition: effects of age and hearing acuity</article-title>
          <source>Exper Aging Res</source>
          <year>2013</year>
          <month>05</month>
          <volume>39</volume>
          <issue>3</issue>
          <fpage>235</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1080/0361073x.2013.779175</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pichora-Fuller</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Kramer</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Eckert</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hornsby</surname>
              <given-names>BW</given-names>
            </name>
            <name name-style="western">
              <surname>Humes</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Lemke</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Lunner</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Matthen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mackersie</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Naylor</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Richter</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rudner</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sommers</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Tremblay</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Hearing impairment and cognitive energy: the framework for understanding effortful listening (FUEL)</article-title>
          <source>Ear Hear</source>
          <year>2016</year>
          <volume>37 Suppl 1</volume>
          <fpage>5S</fpage>
          <lpage>27S</lpage>
          <pub-id pub-id-type="doi">10.1097/AUD.0000000000000312</pub-id>
          <pub-id pub-id-type="medline">27355771</pub-id>
          <pub-id pub-id-type="pii">00003446-201607001-00002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Faulkner</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Word recognition: age differences in contextual facilitation effects</article-title>
          <source>Br J Psychol</source>
          <year>1983</year>
          <month>05</month>
          <volume>74</volume>
          <issue>Pt 2</issue>
          <fpage>239</fpage>
          <lpage>51</lpage>
          <pub-id pub-id-type="doi">10.1111/j.2044-8295.1983.tb01860.x</pub-id>
          <pub-id pub-id-type="medline">6883014</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dubno</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Ahlstrom</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Horwitz</surname>
              <given-names>AR</given-names>
            </name>
          </person-group>
          <article-title>Use of context by young and aged adults with normal hearing</article-title>
          <source>J Acoust Soc Am</source>
          <year>2000</year>
          <month>01</month>
          <volume>107</volume>
          <issue>1</issue>
          <fpage>538</fpage>
          <lpage>46</lpage>
          <pub-id pub-id-type="doi">10.1121/1.428322</pub-id>
          <pub-id pub-id-type="medline">10641662</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rönnberg</surname>
              <given-names>j</given-names>
            </name>
            <name name-style="western">
              <surname>Lunner</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zekveld</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sörqvist</surname>
              <given-names>p</given-names>
            </name>
            <name name-style="western">
              <surname>Danielsson</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lyxell</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dahlström</surname>
              <given-names>o</given-names>
            </name>
            <name name-style="western">
              <surname>Signoret</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Stenfelt</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pichora-Fuller</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Rudner</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The Ease of Language Understanding (ELU) model: theoretical, empirical, and clinical advances</article-title>
          <source>Front Syst Neurosci</source>
          <year>2013</year>
          <volume>7</volume>
          <fpage>31</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fnsys.2013.00031"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnsys.2013.00031</pub-id>
          <pub-id pub-id-type="medline">23874273</pub-id>
          <pub-id pub-id-type="pmcid">PMC3710434</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="book">
          <article-title>Working memory, comprehension, and aging: a review and a new view</article-title>
          <source>The Psychology of Learning and Motivation</source>
          <year>2003</year>
          <publisher-loc>Amsterdam, Netherlands</publisher-loc>
          <publisher-name>Elsevier Science</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harel-Arbeli</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Palgi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>Age-related differences in the online processing of spoken semantic context and the effect of semantic competition: evidence from eye gaze</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2021</year>
          <month>02</month>
          <day>17</day>
          <volume>64</volume>
          <issue>2</issue>
          <fpage>315</fpage>
          <lpage>27</lpage>
          <pub-id pub-id-type="doi">10.1044/2020_jslhr-20-00142</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Livingston</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Huntley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sommerlad</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ames</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ballard</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brayne</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Burns</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen-Mansfield</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cooper</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Costafreda</surname>
              <given-names>SG</given-names>
            </name>
            <name name-style="western">
              <surname>Dias</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fox</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gitlin</surname>
              <given-names>LN</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kales</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Kivimäki</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Larson</surname>
              <given-names>EB</given-names>
            </name>
            <name name-style="western">
              <surname>Ogunniyi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Orgeta</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ritchie</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rockwood</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sampson</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Samus</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>LS</given-names>
            </name>
            <name name-style="western">
              <surname>Selbæk</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Teri</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mukadam</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Dementia prevention, intervention, and care: 2020 report of the Lancet Commission</article-title>
          <source>Lancet</source>
          <year>2020</year>
          <month>08</month>
          <volume>396</volume>
          <issue>10248</issue>
          <fpage>413</fpage>
          <lpage>46</lpage>
          <pub-id pub-id-type="doi">10.1016/s0140-6736(20)30367-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>FR</given-names>
            </name>
            <name name-style="western">
              <surname>Albert</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Hearing loss and dementia - who is listening?</article-title>
          <source>Aging Ment Health</source>
          <year>2014</year>
          <volume>18</volume>
          <issue>6</issue>
          <fpage>671</fpage>
          <lpage>3</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/24875093"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/13607863.2014.915924</pub-id>
          <pub-id pub-id-type="medline">24875093</pub-id>
          <pub-id pub-id-type="pmcid">PMC4075051</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deal</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Goman</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Albert</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Arnold</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Burgard</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chisolm</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Couper</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Glynn</surname>
              <given-names>NW</given-names>
            </name>
            <name name-style="western">
              <surname>Gmelin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hayden</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Mosley</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pankow</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Reed</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Sanchez</surname>
              <given-names>VA</given-names>
            </name>
            <name name-style="western">
              <surname>Richey Sharrett</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>SD</given-names>
            </name>
            <name name-style="western">
              <surname>Coresh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>FR</given-names>
            </name>
          </person-group>
          <article-title>Hearing treatment for reducing cognitive decline: design and methods of the Aging and Cognitive Health Evaluation in Elders randomized controlled trial</article-title>
          <source>Alzheimers Dement (N Y)</source>
          <year>2018</year>
          <month>10</month>
          <day>05</day>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>499</fpage>
          <lpage>507</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2352-8737(18)30050-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.trci.2018.08.007</pub-id>
          <pub-id pub-id-type="medline">30364572</pub-id>
          <pub-id pub-id-type="pii">S2352-8737(18)30050-7</pub-id>
          <pub-id pub-id-type="pmcid">PMC6197326</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pratt</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dodd</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Welsh</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Growing older does not always mean moving slower: examining aging and the saccadic motor system</article-title>
          <source>J Motor Behav</source>
          <year>2006</year>
          <month>09</month>
          <volume>38</volume>
          <issue>5</issue>
          <fpage>373</fpage>
          <lpage>82</lpage>
          <pub-id pub-id-type="doi">10.3200/jmbr.38.5.373-382</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanenhaus</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Spivey-Knowlton</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Eberhard</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Sedivy</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Integration of visual and linguistic information in spoken language comprehension</article-title>
          <source>Science</source>
          <year>1995</year>
          <month>06</month>
          <day>16</day>
          <volume>268</volume>
          <issue>5217</issue>
          <fpage>1632</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1126/science.7777863</pub-id>
          <pub-id pub-id-type="medline">7777863</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nitsan</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lavie</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>Differences in working memory capacity affect online spoken word recognition: evidence from eye movements</article-title>
          <source>Trends Hear</source>
          <year>2019</year>
          <month>04</month>
          <day>23</day>
          <volume>23</volume>
          <fpage>2331216519839624</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/2331216519839624?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/2331216519839624</pub-id>
          <pub-id pub-id-type="medline">31010398</pub-id>
          <pub-id pub-id-type="pmcid">PMC6480998</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nitsan</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Banai</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>One size does not fit all: examining the effects of working memory capacity on spoken word recognition in older adults using eye tracking</article-title>
          <source>Front Psychol</source>
          <year>2022</year>
          <month>4</month>
          <day>11</day>
          <volume>13</volume>
          <fpage>841466</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyg.2022.841466"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyg.2022.841466</pub-id>
          <pub-id pub-id-type="medline">35478743</pub-id>
          <pub-id pub-id-type="pmcid">PMC9037998</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baharav</surname>
              <given-names>Shai</given-names>
            </name>
            <name name-style="western">
              <surname>Nitsan</surname>
              <given-names>Gal</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>Boaz M</given-names>
            </name>
          </person-group>
          <article-title>Commentary: Working Memory Load Affects Processing Time in Spoken Word Recognition: Test Retest Reliability of the E-WINDMIL Eyetracking Paradigm</article-title>
          <source>Front Neurosci</source>
          <year>2021</year>
          <volume>15</volume>
          <fpage>663930</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.frontiersin.org/articles/10.3389/fnins.2021.663930/full"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnins.2021.663930</pub-id>
          <pub-id pub-id-type="medline">34177448</pub-id>
          <pub-id pub-id-type="pmcid">PMC8224167</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Tse</surname>
              <given-names>VY</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>Does it take older adults longer than younger adults to perceptually segregate a speech target from a background masker?</article-title>
          <source>Hear Res</source>
          <year>2012</year>
          <month>08</month>
          <volume>290</volume>
          <issue>1-2</issue>
          <fpage>55</fpage>
          <lpage>63</lpage>
          <pub-id pub-id-type="doi">10.1016/j.heares.2012.04.022</pub-id>
          <pub-id pub-id-type="medline">22609772</pub-id>
          <pub-id pub-id-type="pii">S0378-5955(12)00109-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hadar</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Skrzypek</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>Working memory load affects processing time in spoken word recognition: evidence from eye-movements</article-title>
          <source>Front Neurosci</source>
          <year>2016</year>
          <month>05</month>
          <day>19</day>
          <volume>10</volume>
          <fpage>221</fpage>
          <pub-id pub-id-type="doi">10.3389/fnins.2016.00221</pub-id>
          <pub-id pub-id-type="medline">27242424</pub-id>
          <pub-id pub-id-type="pmcid">PMC4871876</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salverda</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>Tanenhaus</surname>
              <given-names>MK</given-names>
            </name>
          </person-group>
          <article-title>The visual world paradigm</article-title>
          <source>Research Methods in Psycholinguistics and the Neurobiology of Language: A Practical Guide</source>
          <year>2017</year>
          <publisher-loc>Hoboken, New Jersey, United States</publisher-loc>
          <publisher-name>Wiley</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Knuth</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lahey</surname>
              <given-names>JN</given-names>
            </name>
            <name name-style="western">
              <surname>Palma</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Does eye-tracking have an effect on economic behavior?</article-title>
          <source>PLoS One</source>
          <year>2021</year>
          <month>8</month>
          <day>5</day>
          <volume>16</volume>
          <issue>8</issue>
          <fpage>e0254867</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0254867"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0254867</pub-id>
          <pub-id pub-id-type="medline">34351921</pub-id>
          <pub-id pub-id-type="pii">PONE-D-21-14391</pub-id>
          <pub-id pub-id-type="pmcid">PMC8341649</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rashbass</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>The relationship between saccadic and smooth tracking eye movements</article-title>
          <source>J Physiol</source>
          <year>1961</year>
          <month>12</month>
          <volume>159</volume>
          <fpage>326</fpage>
          <lpage>38</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://onlinelibrary.wiley.com/resolve/openurl?genre=article&#38;sid=nlm:pubmed&#38;issn=0022-3751&#38;date=1961&#38;volume=159&#38;spage=326"/>
          </comment>
          <pub-id pub-id-type="doi">10.1113/jphysiol.1961.sp006811</pub-id>
          <pub-id pub-id-type="medline">14490422</pub-id>
          <pub-id pub-id-type="pmcid">PMC1359508</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Chambers</surname>
              <given-names>CG</given-names>
            </name>
            <name name-style="western">
              <surname>Daneman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pichora-Fuller</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Reingold</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>Effects of aging and noise on real-time spoken word recognition: evidence from eye movements</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2011</year>
          <month>02</month>
          <volume>54</volume>
          <issue>1</issue>
          <fpage>243</fpage>
          <lpage>62</lpage>
          <pub-id pub-id-type="doi">10.1044/1092-4388(2010/09-0233)</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arnold</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Fagnano</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tanenhaus</surname>
              <given-names>MK</given-names>
            </name>
          </person-group>
          <article-title>Disfluencies signal Theee, Um, New Information</article-title>
          <source>J Psycholinguist Res</source>
          <year>2003</year>
          <volume>32</volume>
          <fpage>25</fpage>
          <lpage>36</lpage>
          <pub-id pub-id-type="doi">10.1023/A:1021980931292</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brown-Schmidt</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The role of executive function in perspective taking during online language comprehension</article-title>
          <source>Psychonomic Bulletin Rev</source>
          <year>2009</year>
          <month>10</month>
          <volume>16</volume>
          <issue>5</issue>
          <fpage>893</fpage>
          <lpage>900</lpage>
          <pub-id pub-id-type="doi">10.3758/pbr.16.5.893</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kaiser</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Trueswell</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Interpreting pronouns and demonstratives in Finnish: evidence for a form-specific approach to reference resolution</article-title>
          <source>Language Cognit Processes</source>
          <year>2008</year>
          <month>08</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>709</fpage>
          <lpage>48</lpage>
          <pub-id pub-id-type="doi">10.1080/01690960701771220</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>FR</given-names>
            </name>
          </person-group>
          <article-title>Hearing loss and cognition among older adults in the United States</article-title>
          <source>J Gerontol A Biol Sci Med Sci</source>
          <year>2011</year>
          <month>10</month>
          <day>18</day>
          <volume>66</volume>
          <issue>10</issue>
          <fpage>1131</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/21768501"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/gerona/glr115</pub-id>
          <pub-id pub-id-type="medline">21768501</pub-id>
          <pub-id pub-id-type="pii">glr115</pub-id>
          <pub-id pub-id-type="pmcid">PMC3172566</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayasse</surname>
              <given-names>ND</given-names>
            </name>
            <name name-style="western">
              <surname>Lash</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wingfield</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Effort not speed characterizes comprehension of spoken sentences by older adults with mild hearing impairment</article-title>
          <source>Front Aging Neurosci</source>
          <year>2016</year>
          <month>01</month>
          <day>10</day>
          <volume>8</volume>
          <fpage>329</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fnagi.2016.00329"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnagi.2016.00329</pub-id>
          <pub-id pub-id-type="medline">28119598</pub-id>
          <pub-id pub-id-type="pmcid">PMC5222878</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huettig</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Janse</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Individual differences in working memory and processing speed predict anticipatory spoken language processing in the visual world</article-title>
          <source>Language Cognition Neuroscience</source>
          <year>2015</year>
          <month>09</month>
          <day>21</day>
          <volume>31</volume>
          <issue>1</issue>
          <fpage>80</fpage>
          <lpage>93</lpage>
          <pub-id pub-id-type="doi">10.1080/23273798.2015.1047459</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rönnberg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rudner</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Foo</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lunner</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Cognition counts: a working memory system for ease of language understanding (ELU)</article-title>
          <source>Int J Audiol</source>
          <year>2008</year>
          <month>11</month>
          <day>07</day>
          <volume>47 Suppl 2</volume>
          <issue>sup2</issue>
          <fpage>S99</fpage>
          <lpage>105</lpage>
          <pub-id pub-id-type="doi">10.1080/14992020802301167</pub-id>
          <pub-id pub-id-type="medline">19012117</pub-id>
          <pub-id pub-id-type="pii">905583321</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Helfer</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Staub</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Competing speech perception in older and younger adults: behavioral and eye-movement evidence</article-title>
          <source>Ear Hear</source>
          <year>2014</year>
          <volume>35</volume>
          <issue>2</issue>
          <fpage>161</fpage>
          <lpage>70</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/24351611"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/AUD.0b013e3182a830cf</pub-id>
          <pub-id pub-id-type="medline">24351611</pub-id>
          <pub-id pub-id-type="pmcid">PMC3944060</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Avivi-Reich</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>Does the degree of linguistic experience (native versus nonnative) modulate the degree to which listeners can benefit from a delay between the onset of the maskers and the onset of the target speech?</article-title>
          <source>Hear Res</source>
          <year>2016</year>
          <month>11</month>
          <volume>341</volume>
          <fpage>9</fpage>
          <lpage>18</lpage>
          <pub-id pub-id-type="doi">10.1016/j.heares.2016.07.016</pub-id>
          <pub-id pub-id-type="medline">27496539</pub-id>
          <pub-id pub-id-type="pii">S0378-5955(15)30027-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Snyder</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Alain</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Toward a neurophysiological theory of auditory stream segregation</article-title>
          <source>Psychol Bull</source>
          <year>2007</year>
          <month>09</month>
          <volume>133</volume>
          <issue>5</issue>
          <fpage>780</fpage>
          <lpage>99</lpage>
          <pub-id pub-id-type="doi">10.1037/0033-2909.133.5.780</pub-id>
          <pub-id pub-id-type="medline">17723030</pub-id>
          <pub-id pub-id-type="pii">2007-12463-004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Snyder</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Alain</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Picton</surname>
              <given-names>TW</given-names>
            </name>
          </person-group>
          <article-title>Effects of attention on neuroelectric correlates of auditory stream segregation</article-title>
          <source>J Cogn Neurosci</source>
          <year>2006</year>
          <month>01</month>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>13</lpage>
          <pub-id pub-id-type="doi">10.1162/089892906775250021</pub-id>
          <pub-id pub-id-type="medline">16417678</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heinrich</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gagne</surname>
              <given-names>J-P</given-names>
            </name>
            <name name-style="western">
              <surname>Viljanen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>Effective communication as a fundamental aspect of active aging and well-being: paying attention to the challenges older adults face in noisy environments</article-title>
          <source>Social Inquiry Well-Being</source>
          <year>2016</year>
          <volume>2</volume>
          <issue>1</issue>
          <pub-id pub-id-type="doi">10.13165/SIIW-16-2-1-05</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="book">
          <article-title>The physiology of cochlear presbycusis</article-title>
          <source>The Aging Auditory System. Springer Handbook of Auditory Research</source>
          <year>2010</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Revill</surname>
              <given-names>KP</given-names>
            </name>
            <name name-style="western">
              <surname>Spieler</surname>
              <given-names>DH</given-names>
            </name>
          </person-group>
          <article-title>The effect of lexical frequency on spoken word recognition in young and older listeners</article-title>
          <source>Psychol Aging</source>
          <year>2012</year>
          <month>03</month>
          <volume>27</volume>
          <issue>1</issue>
          <fpage>80</fpage>
          <lpage>7</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/21707175"/>
          </comment>
          <pub-id pub-id-type="doi">10.1037/a0024113</pub-id>
          <pub-id pub-id-type="medline">21707175</pub-id>
          <pub-id pub-id-type="pii">2011-13123-001</pub-id>
          <pub-id pub-id-type="pmcid">PMC3289730</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nagar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mikulincer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nitsan</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>Safe and sound: the effects of experimentally priming the sense of attachment security on pure-tone audiometric thresholds among young and older adults</article-title>
          <source>Psychol Sci</source>
          <year>2022</year>
          <month>03</month>
          <day>17</day>
          <volume>33</volume>
          <issue>3</issue>
          <fpage>424</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1177/09567976211042008</pub-id>
          <pub-id pub-id-type="medline">35175871</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Keisari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Feniger-Schaal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Palgi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Golland</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Gesser-Edelsburg</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Synchrony in old age: playing the mirror game improves cognitive performance</article-title>
          <source>Clin Gerontol</source>
          <year>2022</year>
          <month>08</month>
          <day>06</day>
          <volume>45</volume>
          <issue>2</issue>
          <fpage>312</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1080/07317115.2020.1799131</pub-id>
          <pub-id pub-id-type="medline">32762289</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ben-David</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Icht</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The effect of practice and visual feedback on oral-diadochokinetic rates for younger and older adults</article-title>
          <source>Lang Speech</source>
          <year>2018</year>
          <month>03</month>
          <day>14</day>
          <volume>61</volume>
          <issue>1</issue>
          <fpage>113</fpage>
          <lpage>34</lpage>
          <pub-id pub-id-type="doi">10.1177/0023830917708808</pub-id>
          <pub-id pub-id-type="medline">28610466</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frost</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Porat</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Malhotra</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Picinali</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>A novel auditory-cognitive training app for delaying or preventing the onset of dementia: participatory design with stakeholders</article-title>
          <source>JMIR Hum Factors</source>
          <year>2020</year>
          <month>09</month>
          <day>30</day>
          <volume>7</volume>
          <issue>3</issue>
          <fpage>e19880</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://humanfactors.jmir.org/2020/3/e19880/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/19880</pub-id>
          <pub-id pub-id-type="medline">32996884</pub-id>
          <pub-id pub-id-type="pii">v7i3e19880</pub-id>
          <pub-id pub-id-type="pmcid">PMC7557448</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Masurovsky</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Controlling for placebo effects in computerized cognitive training studies with healthy older adults from 2016-2018: systematic review</article-title>
          <source>JMIR Serious Games</source>
          <year>2020</year>
          <month>06</month>
          <day>26</day>
          <volume>8</volume>
          <issue>2</issue>
          <fpage>e14030</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2020/2/e14030/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/14030</pub-id>
          <pub-id pub-id-type="medline">32589159</pub-id>
          <pub-id pub-id-type="pii">v8i2e14030</pub-id>
          <pub-id pub-id-type="pmcid">PMC7381254</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Israsena</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Jirayucharoensak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hemrungrojn</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pan-Ngum</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Brain exercising games with consumer-grade single-channel electroencephalogram neurofeedback: pre-post intervention study</article-title>
          <source>JMIR Serious Games</source>
          <year>2021</year>
          <month>06</month>
          <day>15</day>
          <volume>9</volume>
          <issue>2</issue>
          <fpage>e26872</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2021/2/e26872/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/26872</pub-id>
          <pub-id pub-id-type="medline">34128816</pub-id>
          <pub-id pub-id-type="pii">v9i2e26872</pub-id>
          <pub-id pub-id-type="pmcid">PMC8277357</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Krebs</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Falkner</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Niklaus</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Persello</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Klöppel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nef</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Urwyler</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Application of eye tracking in puzzle games for adjunct cognitive markers: pilot observational study in older adults</article-title>
          <source>JMIR Serious Games</source>
          <year>2021</year>
          <month>03</month>
          <day>22</day>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>e24151</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2021/1/e24151/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/24151</pub-id>
          <pub-id pub-id-type="medline">33749607</pub-id>
          <pub-id pub-id-type="pii">v9i1e24151</pub-id>
          <pub-id pub-id-type="pmcid">PMC8078028</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Castellanos</surname>
              <given-names>FX</given-names>
            </name>
            <name name-style="western">
              <surname>Sonuga-Barke</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Milham</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Tannock</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Characterizing cognition in ADHD: beyond executive dysfunction</article-title>
          <source>Trends Cogn Sci</source>
          <year>2006</year>
          <month>03</month>
          <volume>10</volume>
          <issue>3</issue>
          <fpage>117</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.1016/j.tics.2006.01.011</pub-id>
          <pub-id pub-id-type="medline">16460990</pub-id>
          <pub-id pub-id-type="pii">S1364-6613(06)00028-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barkley</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Behavioral inhibition, sustained attention, and executive functions: constructing a unifying theory of ADHD</article-title>
          <source>Psychol Bull</source>
          <year>1997</year>
          <month>01</month>
          <volume>121</volume>
          <issue>1</issue>
          <fpage>65</fpage>
          <lpage>94</lpage>
          <pub-id pub-id-type="doi">10.1037/0033-2909.121.1.65</pub-id>
          <pub-id pub-id-type="medline">9000892</pub-id>
          <pub-id pub-id-type="pii">1997-02112-004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dovis</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Van der Oord</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wiers</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Prins</surname>
              <given-names>PJ</given-names>
            </name>
          </person-group>
          <article-title>Improving executive functioning in children with ADHD: training multiple executive functions within the context of a computer game. a randomized double-blind placebo controlled trial</article-title>
          <source>PLoS One</source>
          <year>2015</year>
          <month>4</month>
          <day>6</day>
          <volume>10</volume>
          <issue>4</issue>
          <fpage>e0121651</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0121651"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0121651</pub-id>
          <pub-id pub-id-type="medline">25844638</pub-id>
          <pub-id pub-id-type="pii">PONE-D-14-16366</pub-id>
          <pub-id pub-id-type="pmcid">PMC4386826</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
