<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JSG</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Serious Games</journal-id>
      <journal-title>JMIR Serious Games</journal-title>
      <issn pub-type="epub">2291-9279</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i1e21620</article-id>
      <article-id pub-id-type="pmid">33427677</article-id>
      <article-id pub-id-type="doi">10.2196/21620</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Pupillary Responses for Cognitive Load Measurement to Classify Difficulty Levels in an Educational Video Game: Empirical Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Zary</surname>
            <given-names>Nabil</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Rankin</surname>
            <given-names>Debbie</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Li</surname>
            <given-names>Jinfeng</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Aghaei</surname>
            <given-names>Zahra</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Khaleghi</surname>
            <given-names>Ali</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Paderewski</surname>
            <given-names>Patricia</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Mitre-Hernandez</surname>
            <given-names>Hugo</given-names>
          </name>
          <degrees>PhD, MSc, BSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2840-3998</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Covarrubias Carrillo</surname>
            <given-names>Roberto</given-names>
          </name>
          <degrees>BSc, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1875-5287</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Lara-Alvarez</surname>
            <given-names>Carlos</given-names>
          </name>
          <degrees>PhD, MSc, BSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Center for Research in Mathematics</institution>
            <addr-line>Calle Lasec y Andador Galileo Galilei</addr-line>
            <addr-line>Quantum, Ciudad del Conocimiento</addr-line>
            <addr-line>Zacatecas, 98160</addr-line>
            <country>Mexico</country>
            <phone>52 4929980 ext 1105</phone>
            <email>c.alberto.lara@gmail.com</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7856-7398</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Center for Research in Mathematics</institution>
        <addr-line>Zacatecas</addr-line>
        <country>Mexico</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Center for Research and Advanced Studies of the National Polytechnic Institute, Tamaulipas</institution>
        <addr-line>Ciudad Victoria</addr-line>
        <country>Mexico</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Carlos Lara-Alvarez <email>c.alberto.lara@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Jan-Mar</season>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>11</day>
        <month>1</month>
        <year>2021</year>
      </pub-date>
      <volume>9</volume>
      <issue>1</issue>
      <elocation-id>e21620</elocation-id>
      <history>
        <date date-type="received">
          <day>19</day>
          <month>6</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>5</day>
          <month>8</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>25</day>
          <month>9</month>
          <year>2020</year>
        </date>
        <date date-type="accepted">
          <day>5</day>
          <month>11</month>
          <year>2020</year>
        </date>
      </history>
      <copyright-statement>©Hugo Mitre-Hernandez, Roberto Covarrubias Carrillo, Carlos Lara-Alvarez. Originally published in JMIR Serious Games (http://games.jmir.org), 11.01.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Serious Games, is properly cited. The complete bibliographic information, a link to the original publication on http://games.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="http://games.jmir.org/2021/1/e21620/" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>A learning task recurrently perceived as easy (or hard) may cause poor learning results. Gamer data such as errors, attempts, or time to finish a challenge are widely used to estimate the perceived difficulty level. In other contexts, pupillometry is widely used to measure cognitive load (mental effort); hence, this may describe the perceived task difficulty.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to assess the use of task-evoked pupillary responses to measure the cognitive load measure for describing the difficulty levels in a video game. In addition, it proposes an image filter to better estimate baseline pupil size and to reduce the screen luminescence effect.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We conducted an experiment that compares the baseline estimated from our filter against that estimated from common approaches. Then, a classifier with different pupil features was used to classify the difficulty of a data set containing information from students playing a video game for practicing math fractions.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We observed that the proposed filter better estimates a baseline. Mauchly’s test of sphericity indicated that the assumption of sphericity had been violated (χ<sup>2</sup><sub>14</sub>=0.05; <italic>P</italic>=.001); therefore, a Greenhouse-Geisser correction was used (ε=0.47). There was a significant difference in mean pupil diameter change (MPDC) estimated from different baseline images with the scramble filter (<italic>F</italic><sub>5,78</sub>=30.965; <italic>P</italic>&#60;.001). Moreover, according to the Wilcoxon signed rank test, pupillary response features that better describe the difficulty level were MPDC (<italic>z</italic>=−2.15; <italic>P</italic>=.03) and peak dilation (<italic>z</italic>=−3.58; <italic>P</italic>&#60;.001). A random forest classifier for easy and hard levels of difficulty showed an accuracy of 75% when the gamer data were used, but the accuracy increased to 87.5% when pupillary measurements were included.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The screen luminescence effect on pupil size is reduced with a scrambled filter on the background video game image. Finally, pupillary response data can improve classifier accuracy for the perceived difficulty of levels in educational video games.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>video games</kwd>
        <kwd>pupil</kwd>
        <kwd>metacognitive monitoring</kwd>
        <kwd>educational technology</kwd>
        <kwd>machine learning</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Overview</title>
        <p>An <italic>educational video game</italic> (EVG) is a video game that provides learning or training value to the player. Potential contributions of video games cover each of the three main fields of psychology: the affective (awakening feelings), the connate (aggressive or impulsive behavior), and the cognitive (learning-related skills) [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
        <p>Video games have been demonstrated to be effective for improving working memory, mental rotation skills, and geometry performance [<xref ref-type="bibr" rid="ref2">2</xref>]. Some of the effective features of educational video games include a clear goal, an adequate level of difficulty, quick-moving stimuli, and integrated instructions [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
        <p>Several works have used EVGs to foster fraction understanding and to assess students [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. However, our research focuses on the cognitive load (mental effort) generated by reasoning tasks [<xref ref-type="bibr" rid="ref6">6</xref>] about math fractions; this is a direct way to measure the difficulty perceived by the EVG's player.</p>
        <p>Video game difficulty refers to the amount of skill required by the player to progress through the game experience. Studying how to set an adequate difficulty level has attracted particular attention in the educational video games field [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. Basic approaches to setting difficulty include allowing users to manually select levels and increasing the difficulty at a steady rate over the course of the game, with earlier levels being easier and later levels being harder [<xref ref-type="bibr" rid="ref9">9</xref>]. Manually adapting difficulty or designing an incremental-difficulty solution could cause serious problems; for instance, the player may not know how they will perform before playing a given level, or the predefined change rate could be slower or faster than required by the player.</p>
        <p>On the other hand, <italic>dynamic difficulty adjustment</italic> or <italic>dynamic difficulty balancing</italic> changes the game behavior according to the skill level of the players. For this purpose, the dynamic difficulty adjustment requires evaluation of the player's performance (through game scores, time, number of errors, player's decisions, etc) and adjustment of a set of game variables that regulate difficulty [<xref ref-type="bibr" rid="ref10">10</xref>]. It has been shown that a dynamic approach that uses gamer behavior data presents better learning outcomes than an incremental difficulty approach [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
        <p>As a step toward finding an imperceptible difficulty control, this paper proposes to use pupil dilation to detect very easy (or hard) activities. It is known that pupil dilation reflects activity in the brain as cognitive load—that is, the total amount of mental effort (information processing) induced by reasoning tasks or involving memory resources [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref11">11</xref>].</p>
      </sec>
      <sec>
        <title>Background</title>
        <sec>
          <title>The Impact of Difficulty on Learning</title>
          <p>The flow experience model, proposed by Csikszentmihalyi [<xref ref-type="bibr" rid="ref12">12</xref>], marks an achieved balance of arousal-increasing and arousal-decreasing processes. As shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>, the flow model describes this balance in terms of the fit between perceived challenges and skills: an activity wherein challenges predominate increases arousal; an activity wherein skills predominate reduces arousal. Thus, a synchrony of challenges and skills permits a state of deep involvement, while the pitfalls of either over- or under-arousal (ie, anxiety or boredom) are avoided [<xref ref-type="bibr" rid="ref12">12</xref>].</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>Flow experience model of Mihaly Csikszentmihalyi [<xref ref-type="bibr" rid="ref12">12</xref>].</p>
            </caption>
            <graphic xlink:href="games_v9i1e21620_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>The dynamic flow passing through states <italic>a</italic> → <italic>c</italic> → <italic>e</italic> shown in <xref rid="figure1" ref-type="fig">Figure 1</xref> is the optimal path for increasing difficulty. However, <italic>b<sub>1</sub></italic> → <italic>d<sub>1</sub></italic> are states of anxiety that demand new learning skills to return to optimal flow. Moreover, <italic>b<sub>2</sub></italic> → <italic>d<sub>2</sub></italic> are states of boredom that need more challenges to return to optimal flow. Several studies have supported that the rate of change of pupil diameter is related to task difficulty.</p>
        </sec>
        <sec>
          <title>Pupillary Responses</title>
          <p>The eye can be seen as a camera, with the pupil as the eye aperture, and it involves the iris activity [<xref ref-type="bibr" rid="ref13">13</xref>]. The iris movement is controlled by the activity of two muscles, the dilator and the sphincter. Sphincter activation causes the pupil to constrict (ie, miosis), and this is largely under parasympathetic control, while the dilator muscle receives mostly sympathetic innervation and causes the pupil to dilate (ie, mydriasis) [<xref ref-type="bibr" rid="ref14">14</xref>].</p>
          <p>Light has a relevant role in the retina and the pupil response. The size of the iris determines the amount of light that is captured by the system. The ambient light level largely determines the steady-state size of the pupil. Rapid increments in light flux on the retina cause a brisk constriction of the pupil. This constriction will depend on the size of the light stimulus, its luminance contrast, onset temporal characteristics, and location in the visual field [<xref ref-type="bibr" rid="ref14">14</xref>].</p>
          <p>Health factors also affect pupillary responses. Pupillary constriction is decreased in major depression [<xref ref-type="bibr" rid="ref15">15</xref>]. Schizophrenia is associated with a significant decline in working memory capacity, and an additional moderate decline is associated with aging, but pupillary responses evoked by a working memory task were not related to schizophrenia severity [<xref ref-type="bibr" rid="ref16">16</xref>]. Among other factors, the consumption of caffeine or alcoholic beverages was associated with significant increases in pupil size [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Finally, pupil dilation can be caused by amphetamines and diphenhydramine, and pupil constriction by clonidine and opioids [<xref ref-type="bibr" rid="ref19">19</xref>].</p>
          <p>For good observation of pupil response during EVG tasks, all these conditions must be carefully observed in the experiment design.</p>
        </sec>
        <sec>
          <title>Cognitive Load and Pupillary Response</title>
          <p>The cognitive load (mental activity) imposed by tasks has a pupillary response, known as a task-evoked pupillary response (TEPR) [<xref ref-type="bibr" rid="ref20">20</xref>]. TEPRs occurs shortly after the onset of a task and subside quickly after the mental activity is terminated. The TEPR depends on several factors; for instance, the response is greater for novice participants doing an arithmetic task than for an expert because novices require more mental effort [<xref ref-type="bibr" rid="ref21">21</xref>]. Then, through pupillometry (measuring the pupil diameter), one can decide whether a challenge is adequate for the skills of a learner (<xref rid="figure1" ref-type="fig">Figure 1</xref>); that is, we can balance a video game to maximize the learning outcomes.</p>
          <p>Pupil diameter is widely used to study cognitive load. Researchers have studied this relationship in different tasks, such as driving a vehicle while listening to a dialog, reasoning through math exercises, memorizing numbers, and perceiving visual stimuli [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>].</p>
          <p>Concerning industrial areas, cognitive load has been used in automotive and healthcare applications to optimize user's decision-making tasks [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. Most studies in these fields are oriented to discover how to preserve attention and mental work on primary tasks and how to reduce it on secondary tasks to avoid critical errors. In addition, cognitive load has been used in video game studies without significant results, mainly due to changes in screen luminescence.</p>
          <p>Playing EVG involves memorization and reasoning tasks that are associated with cognitive load. This paper uses pupillary response data to assess cognitive load in educational video games.</p>
          <p>Beatty [<xref ref-type="bibr" rid="ref6">6</xref>] points out that pupillary responses occur at short latencies following the onset of mental processing and subside quickly once processing is terminated. Most of the latency is due to slow iris muscle constriction. Different features have been used to evaluate cognitive load with pupillary responses such as mean pupil diameter change (MPDC), average percentage change in pupil size (APCPS), peak dilation (PD), and latency to peak (LP) [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>].</p>
        </sec>
        <sec>
          <title>Estimating Pupillary Responses</title>
          <p>Individual differences in pupil size have been well documented; for example, pupil size decreases linearly as a function of age at all illuminance levels, and students high in cognitive ability have a larger pupil size [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. These differences must be considered when studying factors that dilate the pupil; for this purpose, researchers calculate a pupil baseline interval for each individual separately. Then, the pupil change is estimated by contrasting information from the baseline and testing intervals. In the baseline period, users fixate on a predefined screen before the stimulus is presented. Baseline duration ranges from 400 milliseconds to 10 seconds [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref32">32</xref>]. In general, the variation in the baseline duration should play no substantial role in reporting pupil dilation [<xref ref-type="bibr" rid="ref33">33</xref>]. Unsworth et al [<xref ref-type="bibr" rid="ref32">32</xref>] suggest that better results can be obtained by using a longer duration; hence, they use 5 seconds to estimate the baseline.</p>
          <p>A common practice is to use a neutral image, either black, gray, or white [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]; a gray image is more effective to reduce screen luminescence [<xref ref-type="bibr" rid="ref35">35</xref>]. Using a neutral image is good enough for controlled tests that use luminance-controlled images, but there are significant changes in pupil size due to luminance when participants play video games [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. Studying the pupil dilation induced by mental activity when participants are exposed to environmental illumination changes is a challenge. For instance, several authors have reported that pupillary response features are directly correlated to cognitive load. Other authors, however, do not observe such correlations, and they suggest that this effect could be caused by luminance changes [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>].</p>
          <p>Obtaining a baseline for each trial rather than for a whole test session is a common practice [<xref ref-type="bibr" rid="ref33">33</xref>]; this is an applicable solution for settings where the screen luminance remains stable for certain periods (eg, for a video game stage that is mainly dominated by the background). For these cases, the baseline is usually calculated from data generated by observing a scrambled image (ie, one image obtained by applying a scrambling scheme to a representative image in the period test).</p>
          <p>Image scrambling [<xref ref-type="bibr" rid="ref40">40</xref>] has two objectives: to transform a meaningful image into a meaningless or disordered image and to have the same mean intensity for the scrambled and original images.</p>
          <p>The nonlinear relationship between luminance changes and pupil size is one of the main difficulties when studying cognitive load in real conditions. Wong et al [<xref ref-type="bibr" rid="ref41">41</xref>] study four approaches (ignoring, excluding, compensating, or using pupillary light reflex features) to mitigate the luminance change in cognitive load measurements. They found that ignoring the luminance change is the worst option. This paper proposes an initial solution for studying cognitive load in real scenarios that is complementary to the approaches in the aforementioned study [<xref ref-type="bibr" rid="ref41">41</xref>].</p>
          <p>We hypothesize that a better baseline can be estimated from an image that maintains both the mean and local intensity. We tested grid scrambled images for obtaining the baseline. A grid scrambled image is generated by selecting a representative image within the measurement period, splitting it into a <italic>n</italic> × <italic>m</italic> grid (<italic>n</italic> columns and <italic>m</italic> rows), and finally, scrambling each region to conform the image.</p>
          <p>The contribution of this paper is twofold: we propose a grid scramble filter to reduce the effect of screen luminescence, and we test the hypothesis that using pupillary response data improves the classification of easy (or hard) difficulty levels.</p>
          <p/>
          <p>The rest of this paper is organized as follows: the Methods section describes the experimental setup, including materials, participants, metrics, and procedure; the Results section discusses the results of each experiment; and finally, the Conclusions and Further Work section concludes this paper.</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <p>The goal of this study is to analyze the pupillary response and gamer data for different difficulty levels in a math EVG to evaluate the significant differences in perceived difficulty for participants with intermediate math skills. Selected relevant features are used to classify difficulty.</p>
      <sec>
        <title>Materials</title>
        <p>An <italic>eye-tracking</italic> device, the “EyeTribe” model ET1000 with 60 Hz sampling frequency, was used in a screen (24“ extended monitor) with a resolution of 1440 × 960 pixels, and both were connected to a laptop.</p>
        <p>The eye tracker was located 50-60 cm from the participant’s face. A calibration was done before each test/play session by using the EyeTribe software development kit (twelve points). To remove atypical values, a Hampel filter was used in the preprocessing stage.</p>
        <p>To avoid pupil dilation caused by sunlight, the windows in the testing room were covered with blackout curtains, which have a high light-blocking effect. We used the same brightness and settings of the screen throughout. In addition, no sounds and visitors were allowed in the experimentation area.</p>
        <p>The educational <italic>Refraction video game</italic> [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>] was used in the experiments, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. For research, “Refraction” is of particular interest because it is open-access, it provides a natural context for students to create fractions through splitting, and the log data for the game allows the use of learning analytics methods to examine the splitting process in detail [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. Moreover, the design of the game allows us to modify mathematical and game difficulty semi-independently [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
        <p>This game focuses on teaching fractions and discovering optimal learning pathways for math learning. It let gamers bend, split, and redirect lasers to power spaceships filled with lost animals. The general integrated instruction is “Help free as many animals as you can by expanding your knowledge of fractions.” As shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>, game elements in Refraction are <italic>origins</italic>, which generate laser beams; <italic>targets</italic>, which receive the laser beams and contain spaceships with lost animals waiting to be released; <italic>pipe bends</italic> that change the laser direction; <italic>2- or 3-way splitters</italic> that split the laser into two or three equal parts (eg, the operation of a 3-way splitter over half of a laser is ½ ÷ 3 =<inline-graphic xlink:href="games_v9i1e21620_fig6.png" xlink:type="simple" mimetype="image"/>); and <italic>obstacles</italic> that prevent the passage of any laser beams.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>The Refraction EVG developed by the research group of the Center for Game Science [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. The game mechanic is to use the pieces on the right to split lasers into fractional pieces and redirect them to the target spaceships.</p>
          </caption>
          <graphic xlink:href="games_v9i1e21620_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Four levels of the Refraction game were selected for experiments and organized into two worlds: world A (levels <italic>L1<sub>a</sub></italic> and <italic>L2<sub>a</sub></italic>), and world B (levels <italic>L3<sub>b</sub></italic> and <italic>L4<sub>b</sub></italic>). As shown in <xref ref-type="table" rid="table1">Table 1</xref>, levels that almost have the same number of game elements were grouped into the same world (ie, <italic>L1<sub>a</sub></italic> and <italic>L2<sub>a</sub></italic> have about the same difficulty level).</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Number of game elements in the selected levels.</p>
          </caption>
          <table border="1" rules="groups" cellpadding="5" frame="hsides" width="1000" cellspacing="0">
            <col width="240"/>
            <col width="190"/>
            <col width="190"/>
            <col width="190"/>
            <col width="190"/>
            <thead>
              <tr valign="top">
                <td rowspan="3">Element</td>
                <td colspan="4">Level</td>
              </tr>
              <tr valign="top">
                <td colspan="2">World A</td>
                <td colspan="2">World B</td>
              </tr>
              <tr valign="top">
                <td>L1<sub>a</sub></td>
                <td>L2<sub>a</sub></td>
                <td>L3<sub>b</sub></td>
                <td>L4<sub>b</sub></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Origins</td>
                <td>1</td>
                <td>1</td>
                <td>1</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>Targets</td>
                <td>1</td>
                <td>2</td>
                <td>2</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>Two-way splitter (orange)</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>1</td>
              </tr>
              <tr valign="top">
                <td>Three-way splitter (orange)</td>
                <td>1</td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>Pipe bends (blue)</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>Obstacles</td>
                <td>10</td>
                <td>10</td>
                <td>13</td>
                <td>10</td>
              </tr>
              <tr valign="top">
                <td>Total elements</td>
                <td>18</td>
                <td>19</td>
                <td>22</td>
                <td>23</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Experiment 1</title>
        <p>The objective of this experiment was to select the best baseline image (ie, a baseline image without semantic information that results in a smaller pupil-size change after the transition from the baseline image to the in-test image). Instances of tested baseline images are shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>; they included the widely used white, black, and scramble backgrounds, but also grid scramble images of different sizes: 8×6, 10×10, and 20×20.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Baseline images tested. (Left) Baseline images can be uniform such as (a) black and (b) white, or can depend on the initial image like (c) scramble, (d) 8×6 grid scramble, (e) 10×10 grid scramble, and (f) 20×20 grid scramble. (Right) The in-test image.</p>
          </caption>
          <graphic xlink:href="games_v9i1e21620_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <sec>
          <title>Participants</title>
          <p>All participants were asked about their general health and were excluded if they wore contact lenses or glasses with more than one power, had eye surgery or abnormalities (eg, lazy eye, strabismus, nystagmus), or used medication or drugs. All participants were Hispanic and brown-eyed. Participants were not asked for personal information to preserve anonymity. A total of 14 volunteers (4 female, 10 male) between 16 and 37 years old (mean 21.81, SD 7.2) participated in this experiment.</p>
        </sec>
        <sec>
          <title>Procedure</title>
          <p>As illustrated in <xref rid="figure4" ref-type="fig">Figure 4</xref>, participants observed a randomly selected baseline image (an image from <xref rid="figure3" ref-type="fig">Figure 3</xref>) for 8 seconds (pupillary response data collected in the last 2 seconds are used as the baseline interval), and then they observed the in-test image for 8 seconds (pupillary data from the last 2 seconds are used as the testing interval).</p>
          <p>The MPDC is used to select the best baseline image (the MPDC definition is shown in <xref ref-type="table" rid="table2">Table 2</xref>). This procedure was repeated until all the baseline images were shown to participants.</p>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>The procedure used to generate pupillary response data for evaluating baselines images. First, the baseline image was shown on the screen for 8 seconds, and then the in-test image was shown.</p>
            </caption>
            <graphic xlink:href="games_v9i1e21620_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Pupillary and gamer features studied in this experiment.</p>
            </caption>
            <table border="1" rules="groups" cellpadding="5" frame="hsides" width="1000" cellspacing="0">
              <col width="400"/>
              <col width="600"/>
              <thead>
                <tr valign="top">
                  <td>Feature</td>
                  <td>Description</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>TE</td>
                  <td><italic>Total errors</italic> (TE) is the number of events performed in the wrong way (eg, the laser beam value does not match with the input value) on a level.</td>
                </tr>
                <tr valign="top">
                  <td>TC</td>
                  <td><italic>Time to complete a stage</italic> (TC) is the time required to complete a given level.</td>
                </tr>
                <tr valign="top">
                  <td>CP</td>
                  <td><italic>Number of changes of position</italic> (CP). A <italic>change of position</italic> is defined as the movement of a game element once it has been introduced in the gameplay—the area where the video game elements are dragged and dropped.</td>
                </tr>
                <tr valign="top">
                  <td>A</td>
                  <td><italic>Attempts</italic> (A) is the number of attempts used by the gamer to complete a given level.</td>
                </tr>
                <tr valign="top">
                  <td>MPDC</td>
                  <td>The <italic>mean pupil diameter change</italic> is obtained by averaging the relevant data points in the measurement interval (time of the stage) and subtracting the mean diameter obtained in the baseline period [<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>].</td>
                </tr>
                <tr valign="top">
                  <td>PD</td>
                  <td><italic>Peak dilation</italic> (PD) is defined as the maximal dilation obtained in the measurement interval time of the level [<xref ref-type="bibr" rid="ref13">13</xref>]. First, mean baseline is established, then the single maximum value from the set of data points in the measurement interval time of level is selected.</td>
                </tr>
                <tr valign="top">
                  <td>LP</td>
                  <td><italic>Latency to peak</italic> (LP) reflects the amount of time elapsed between the beginning of the measurement interval and emergence of peak dilation [<xref ref-type="bibr" rid="ref13">13</xref>].</td>
                </tr>
                <tr valign="top">
                  <td>APCPS</td>
                  <td><italic>Percentage change in pupil size</italic> (PCPS) is calculated as the difference between the measured pupil size and a baseline pupil size divided by the baseline pupil size [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. The <italic>average PCPS</italic> (APCPS) is the average of PCPS in the measurement interval time of the selected level.</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
        <sec>
          <title>Statistical Analysis</title>
          <p>After Mauchly's test of sphericity, repeated-measures analysis of variance was performed on the normally distributed variables among MPDC values to explore the difference between the black, white, scramble, scramble 8×6, scramble 10×10, and scramble 20×20 baseline images. The Bonferroni test was used to make post hoc pairwise comparisons.</p>
        </sec>
      </sec>
      <sec>
        <title>Experiment 2</title>
        <p>The objective of this experiment was twofold: to evaluate which features are more related to the difficulty level, and to test the classification accuracy obtained by using different subsets of features. Studied features (both pupillary and gamer) of the video game levels (L1<sub>a</sub>, L2<sub>a</sub>, L3<sub>b</sub>, and L4<sub>b</sub>) are defined in <xref ref-type="table" rid="table2">Table 2</xref>.</p>
        <sec>
          <title>Participants</title>
          <p>A total of 20 volunteers (9 female, 11 male) between 23 and 31 years old (mean 27.16, SD 2.6) participated in experiment 2. As in the first experiment, we did not include volunteers with some characteristics that would make pupil-size estimation difficult. None of the subjects who participated in experiment 2 also participated in experiment 1.</p>
        </sec>
        <sec>
          <title>Procedure</title>
          <p>As shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>, the procedure consists of four phases: (1) participants observed the baseline image of world A for 8 seconds; (2) participants played the world A levels (L1<sub>a</sub> and L2<sub>a</sub>) without time restrictions; (3) participants observed the baseline image of world B for 8 seconds; and finally, (4) they played the world B levels (L3<sub>b</sub> and L4<sub>b</sub>) without time restrictions. The pupil baseline was estimated from the data of the last 2 seconds before playing a new world. Pupil size and gamer behavior data were collected along with each play session.</p>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>The procedure used to evaluate features against difficulty levels in World A (easy), World B (hard).</p>
            </caption>
            <graphic xlink:href="games_v9i1e21620_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>After obtaining features, all information was integrated into a data set <italic>τ = {(X<sub>i</sub>, Y<sub>i</sub>), i = 1,...,n}</italic>, where <italic>X<sub>i</sub></italic> corresponds to the uniform-length vector containing features <italic>X<sub>i</sub> = (TE<sub>i</sub>, TC<sub>i</sub>, CP<sub>i</sub>, A<sub>i</sub>, MPDC<sub>i</sub>, PD<sub>i</sub>, LP<sub>i</sub>, APCPS<sub>i</sub>)</italic> and <italic>Y<sub>i</sub></italic> corresponds to the label associated to each level difficulty of the world A and world B. Each register of this data set is generated from a player and a single level. The following sets were defined: <italic>G = {TE, TC, CP, A},</italic> which includes all game behavior data features, and <italic>S = {MPDC, PD, LP, APCPS},</italic> which includes all pupillary features. Let <italic>G’</italic>⊆<italic>G</italic> and <italic>S’</italic>⊆<italic>S</italic> be the sets of features with a significant difference between worlds A and B.</p>
          <p>From the 20 participants, 3 (15%) were randomly selected, and their registers in <italic>τ</italic> were used to train a random forest classifier [<xref ref-type="bibr" rid="ref46">46</xref>] using different sets of features. Random forest classifier was selected because it is an ensemble meta-algorithm that improves accuracy and avoids overfitting by training on different random samples of the data. Registers in <italic>τ</italic> associated with the rest of the participants were then used as the testing set.</p>
        </sec>
        <sec>
          <title>Statistical Analysis</title>
          <p>Features were tested for normality; in this case, the Shapiro-Wilk test was used (because of the low size of the sample), Results show that the variables are not normally distributed. Then, the Wilcoxon signed rank test was used to detect significant differences in variables. Differences between values were considered significant when <italic>P</italic>&#60;.05.</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Experiment 1</title>
        <p>Mauchly's test of sphericity indicated that the assumption of sphericity had been violated (<italic>χ</italic><sup>2</sup><sub>14</sub>=0.05; <italic>P</italic>&#60;.01); therefore, a Greenhouse-Geisser correction was used (<italic>ε</italic>=0.47). The results show that there was a significant difference between MPDC estimated from different baseline images (<italic>F</italic><sub>5,78</sub>=30.965; <italic>P</italic>&#60;.001).</p>
        <p><xref ref-type="table" rid="table3">Table 3</xref> shows the descriptive statistics for MPDC calculated for each baseline image. As expected, the 20×20 scrambled filter has the lowest average MPDC (0.32 pixels) as it more closely resembles the original image. Post hoc analyses using the Bonferroni post hoc criterion for significance indicated that there were no MPDC differences for different grid sizes, but there were significant MPDC differences between the group of images generated by the grid scrambled filter, and the group of conventional images used to estimate the baseline (white, black, and scrambled). We choose the 8×6 grid scramble operation for generating baseline images in experiment 2 because there are no differences in MPDC between grid scramble images, and it better obscures the meaning of the in-test image.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Results for the baseline image test (experiment 1). Different superindices indicate significant intergroup differences.</p>
          </caption>
          <table border="1" rules="groups" cellpadding="5" frame="hsides" width="1000" cellspacing="0">
            <col width="400"/>
            <col width="600"/>
            <thead>
              <tr valign="top">
                <td>Baseline image</td>
                <td>MPDC<sup>a</sup> (pixels), mean (SD)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>White<sup>1</sup></td>
                <td>3.356 (2.122)</td>
              </tr>
              <tr valign="top">
                <td>Black<sup>2</sup></td>
                <td>−1.754 (1.452)</td>
              </tr>
              <tr valign="top">
                <td>Scramble<sup>3</sup></td>
                <td>1.620 (0.746)</td>
              </tr>
              <tr valign="top">
                <td>Grid scramble 8×6<sup>4</sup></td>
                <td>0.471 (0.891)</td>
              </tr>
              <tr valign="top">
                <td>Grid scramble 10×10<sup>4</sup></td>
                <td>0.455 (1.392)</td>
              </tr>
              <tr valign="top">
                <td>Grid scramble 20×20<sup>4</sup></td>
                <td>0.320 (0.856)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>MPDC: mean pupil diameter change.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Experiment 2</title>
        <p>We did not find any feature with significant differences in measurements between levels of the same world, neither in the levels of world A (L1<sub>a</sub>, L2<sub>a</sub>) nor in the levels of world B (L3<sub>b</sub>, L4<sub>b</sub>). However, significant differences between worlds were found for the following features: TE between world A (median 0.00) and world B (median 2.50) (<italic>z</italic>=−2.9; <italic>P</italic>=.004); TC between world A (median 43,486) and world B (median 83,970) (<italic>z</italic>=−3.198; <italic>P</italic>=.001); MPDC between world A (median 2.25) and world B (median 2.90) (<italic>z</italic>=−2.159; <italic>P</italic>=.03); and PD between world A (median 5.1) and world B (median 18) (<italic>z</italic>=−3.587; <italic>P</italic>&#60;.001). <xref ref-type="table" rid="table4">Table 4</xref> summarizes the statistics for pupillary and gamer features and the Wilcoxon signed rank results.</p>
        <p>On the other hand, <xref ref-type="table" rid="table5">Table 5</xref> summarizes the accuracy of the random tree classifier. As can be seen, the PD feature alone gives an accuracy of 62.5%. The best accuracy was obtained by using the <italic>G’</italic> ∪ <italic>P’</italic> features, with an accuracy of 87.5%.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Median values for pupillary and gamer measurements, and the Wilcoxon signed rank results.</p>
          </caption>
          <table border="1" rules="groups" cellpadding="5" frame="hsides" width="1000" cellspacing="0">
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td>Feature</td>
                <td>World A, median</td>
                <td>World B, median</td>
                <td>
                  <italic>z</italic>
                </td>
                <td><italic>P</italic> value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>TE</td>
                <td>0.00</td>
                <td>2.5</td>
                <td>−2.900</td>
                <td>.004</td>
              </tr>
              <tr valign="top">
                <td>TC</td>
                <td>43,486</td>
                <td>83,970</td>
                <td>−3.198</td>
                <td>.001</td>
              </tr>
              <tr valign="top">
                <td>CP</td>
                <td>0.00</td>
                <td>1.00</td>
                <td>−0.382</td>
                <td>.70</td>
              </tr>
              <tr valign="top">
                <td>A</td>
                <td>0.50</td>
                <td>1.00</td>
                <td>−0.282</td>
                <td>.78</td>
              </tr>
              <tr valign="top">
                <td>MPDC</td>
                <td>2.25</td>
                <td>2.90</td>
                <td>−2.159</td>
                <td>.03</td>
              </tr>
              <tr valign="top">
                <td>PD</td>
                <td>5.10</td>
                <td>18.00</td>
                <td>−3.587</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>LP</td>
                <td>40.50</td>
                <td>51.50</td>
                <td>−0.973</td>
                <td>.33</td>
              </tr>
              <tr valign="top">
                <td>APCPS</td>
                <td>0.135</td>
                <td>0.136</td>
                <td>−0.926</td>
                <td>.36</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Results for a random forest classifier using different sets of features.</p>
          </caption>
          <table border="1" rules="groups" cellpadding="5" frame="hsides" width="1000" cellspacing="0">
            <col width="400"/>
            <col width="300"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td>Set</td>
                <td>Features</td>
                <td>Accuracy (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>
                  <italic>G</italic>
                </td>
                <td>TE, TC, CP, A</td>
                <td>75.0</td>
              </tr>
              <tr valign="top">
                <td>
                  <italic>G’</italic>
                </td>
                <td>TE, TC</td>
                <td>75.0</td>
              </tr>
              <tr valign="top">
                <td>
                  <italic>P</italic>
                </td>
                <td>MPDC, PD, LP, APCPS</td>
                <td>50.0</td>
              </tr>
              <tr valign="top">
                <td>
                  <italic>P’</italic>
                </td>
                <td>PD</td>
                <td>62.5</td>
              </tr>
              <tr valign="top">
                <td><italic>G’</italic> ∪ <italic>P’</italic></td>
                <td>TE, TC, PD</td>
                <td>87.5</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Experiment 1</title>
        <p>Pupil-size changes at the beginning of the EVG (when going from the baseline image to the in-test image) can cause the participant's pupil to expand. A change caused by the screen luminescence would hide the change caused by the cognitive load produced by the reasoning task. This change was analyzed using the MPDC in experiment 1; it was found that baseline images with uniform colors (white and black) result in larger changes in pupil size (<xref ref-type="table" rid="table3">Table 3</xref>). The sign values of the MPDC are aligned with the optics of the human eye, as it is posited that pupil size increases when the intensity of environmental light decreases (in the case of black or white images); these changes occur even if baseline images resembles the general illumination conditions of the testing scenario such as the scrambled operation.</p>
        <p>One could expect that a grayscale image, with the same average intensities as the in-test images, gives a good baseline estimator. Results of experiment 1 show that the conventional scrambled image (which has about the same intensities) just gives a rough estimation of the baseline. Alternatively, the proposed grid scrambled operation better estimates the baseline in comparison to the conventional scramble image. A possible explanation is that retinal ganglion cells (the output neurons of the retina) adapt to both image contrast (the range of image intensities) and to spatial correlations within the scene, even at constant mean intensity [<xref ref-type="bibr" rid="ref47">47</xref>]. Hence, predicting the pupil size of an individual in different image scenes is challenging. John et al [<xref ref-type="bibr" rid="ref48">48</xref>] propose a calibration protocol where the participant sees uniform slides of varying grayscale intensities in the range 0-255. We state that a better model could be found by using local and global information from the images.</p>
      </sec>
      <sec>
        <title>Experiment 2</title>
        <p>Many studies have shown that splitting objects is a promising way to teach fractions [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. In any context, splitting items into halves is much more common than dividing into thirds; this could explain why the students prefer halving and struggle with creating thirds [<xref ref-type="bibr" rid="ref43">43</xref>]. The Refraction game uses the process of splitting to teach fractions. As shown in <xref ref-type="table" rid="table1">Table 1</xref>, levels of world A (easy) have fewer 3-way splitters than levels of world B (hard). This means that participants must solve more operations that involve thirds in world B. The difficulty of the Refraction game not only depends on the mathematical operations but on the spatial difficulty. The spatial difficulty is directly correlated to the number of sources and targets; the number of source/target elements is smaller in the world A than in the world B. Results also evidence this change of difficulty, as we observed statistical differences in features G’—including TE and TC.</p>
        <p>A random tree classifier that only uses the best game features, G’, only gives an accuracy of 75.0%. This accuracy was improved to 87.5% by using the <italic>peak dilation.</italic> The maximal dilation obtained in the measurement interval is a natural feature of many factors that dilate the pupil, including the cognitive load.</p>
        <p>Pupillary features can be classified into subtractive (those that eliminate individual differences by subtracting the baseline value from the measurement interval, such as <italic>MPDC, PD,</italic> and <italic>LP</italic>) and divisive (those that calculate a ratio of a measurement value to baseline, such as <italic>APCPS</italic>). Subtractive features can be categorized into <italic>size-related</italic>, such as <italic>MPDC</italic> and <italic>PD</italic>, or <italic>time-related</italic>, such as <italic>LP</italic>. Results show that the subtractive size-related features, <italic>MPDC</italic> and <italic>PD,</italic> better describe the difficulty level.</p>
        <p>Hunicke [<xref ref-type="bibr" rid="ref50">50</xref>] states that difficulty adjustments must be implemented in a way such that users do not perceive difficulty changes. However, gamer data are recorded after human perception of difficulty; that is, a control that uses gamer data collected after the player finished each level could not completely fulfill the requirement of being imperceptible.</p>
        <p>The proposed approach improves the accuracy of classification of the perceived difficulty to 87.5%, in contrast to 62% with only pupillometry. These results are aligned to other studies that suggest the relationship between pupil change and the level of a game; for instance, by using the Akaike Information Criterion, Strauch et al [<xref ref-type="bibr" rid="ref51">51</xref>] propose that the pupil change is a quadratic function of the levels of Pong.</p>
        <p>Video game difficulty adjustment is game data–dependent (ie, different games require different features). We argue that a generic framework for dynamic difficulty adjustment could be designed by fusing generic game features (such as score, elapsed time, etc) with the information provided by pupillometry. In this way, we can take advantage of ocular data as a general, noninvasive, near real-time option to sense the user perception of difficulty.</p>
        <p>In a traditional pupillometry experiment, the researcher maintains tight control over luminance while manipulating a specific cognitive variable. Reilly et al [<xref ref-type="bibr" rid="ref52">52</xref>] conducted the reverse approach (ie, holding cognitive task demands constant while manipulating luminance). We believe that the reverse approach must be used to obtain a model of the participants’ pupil size in the initial calibration stage by using the grid scrambled images, and then a subtractive approach should be used during the gameplay stage.</p>
      </sec>
      <sec>
        <title>Conclusions and Further Work</title>
        <p>This paper proposes a grid scramble filter to obtain a baseline image that reduces the effect of the screen light reflex on a participant's pupil size. This filter simulates both the local and the mean luminance of a given image. To hide the meaning of an image, the 8×6 grid scramble filter can be used for tests that reasonably keep the same background in each interval. We consider that a more general baseline can be obtained by modeling luminescence factors that affect pupil size. Such a model could be used to estimate cognitive factors that affect the pupils in any setting (eg, a commercial video game).</p>
        <p>Gamer data are a valuable resource for estimating the difficulty of EVGs, but adding cognitive load data measured by pupillary response data improves the accuracy of classifying the difficulty of game levels.</p>
        <p>Using the human perception features from ocular data such as blinks, eye-fixations, and eye-saccade to measure the cognitive load may improve the classification accuracy of difficulty levels and gather imperceptible changes that gamer data can omit [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>].</p>
        <p>A key issue with approaches that estimate a baseline, like the proposed one, is that indoor light conditions and monitor brightness must be the same during the game time. Playing a game in specific conditions is restrictive; to address this, we are working on a model that relates luminescence to different screen configurations (instead of a baseline) This approach can be used in virtual reality headsets. The proposed approach can be included in a more elaborated calibration stage that tests different models of pupil change due to luminance, as in a previous study by Lara-Alvarez and Gonzalez-Herrera [<xref ref-type="bibr" rid="ref55">55</xref>].</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">A</term>
          <def>
            <p>attempts</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">APCPS</term>
          <def>
            <p>average percentage change in pupil size</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CP</term>
          <def>
            <p>number of changes of position</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">EVG</term>
          <def>
            <p>educational video game</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">LP</term>
          <def>
            <p>latency to peak</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">MPDC</term>
          <def>
            <p>mean pupil diameter change</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">PD</term>
          <def>
            <p>peak dilation</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">TC</term>
          <def>
            <p>time to complete a stage</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">TE</term>
          <def>
            <p>total errors</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">TEPR</term>
          <def>
            <p>task-evoked pupillary response</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>We thank the support given through the FORDECYT 296737 project ”Consorcio en Inteligencia Artificial“ for the publication of this work.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>de Aguilera</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mendiz</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Video games and education: (Education in the Face of a “Parallel School”)</article-title>
          <source>Comput Entertain</source>
          <year>2003</year>
          <month>10</month>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>10</lpage>
          <pub-id pub-id-type="doi">10.1145/950566.950583</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Novak</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Tassell</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Using video game play to improve education-majors’ mathematical performance: An experimental study</article-title>
          <source>Computers in Human Behavior</source>
          <year>2015</year>
          <month>12</month>
          <volume>53</volume>
          <fpage>124</fpage>
          <lpage>130</lpage>
          <pub-id pub-id-type="doi">10.1016/j.chb.2015.07.001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Robillard</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mayer-Crittenden</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Use of Technology as an Innovative Approach to Non-Linguistic Cognitive Therapy</article-title>
          <source>International Journal ofTechnologies in Learning</source>
          <year>2014</year>
          <volume>20</volume>
          <fpage>267</fpage>
          <lpage>78</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kiili</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Koskinen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lindstedt</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ninaus</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Extending a Digital Fraction Game Piece by Piece with Physical Manipulatives</article-title>
          <source>Games and Learning Alliance</source>
          <year>2019</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer International Publishing</publisher-name>
          <fpage>157</fpage>
          <lpage>166</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ninaus</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kiili</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>McMullen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Moeller</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Assessing fraction knowledge by a digital game</article-title>
          <source>Computers in Human Behavior</source>
          <year>2017</year>
          <month>05</month>
          <volume>70</volume>
          <fpage>197</fpage>
          <lpage>206</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/https://doi.org/10.1016/j.chb.2017.01.004"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.chb.2017.01.004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Beatty</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Task-evoked pupillary responses, processing load, and the structure of processing resources</article-title>
          <source>Psychol Bull</source>
          <year>1982</year>
          <month>03</month>
          <volume>91</volume>
          <issue>2</issue>
          <fpage>276</fpage>
          <lpage>92</lpage>
          <pub-id pub-id-type="medline">7071262</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sampayo-Vargas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cope</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Byrne GJ (2013) The effectiveness of adaptive difficulty adjustments on students' motivation and learning in an educational computer game</article-title>
          <source>Comput Educ</source>
          <year>2013</year>
          <volume>69</volume>
          <fpage>452</fpage>
          <lpage>462</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nebel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Beege</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rey</surname>
              <given-names>Gd</given-names>
            </name>
          </person-group>
          <article-title>Competitive Agents and Adaptive Difficulty Within Educational Video Games</article-title>
          <source>Front Educ</source>
          <year>2020</year>
          <month>7</month>
          <day>21</day>
          <publisher-loc>Switzerland</publisher-loc>
          <publisher-name>Frontiers Media S.A</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Burke</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Using Player Profiling to Enhance Dynamic Difficulty Adjustment in Video Games</source>
          <year>2012</year>
          <month>12</month>
          <access-date>2020-11-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://digitalcommons.calpoly.edu/cgi/viewcontent.cgi?article=1078&#38;context=cpesp">https://digitalcommons.calpoly.edu/cgi/viewcontent.cgi?article=1078&#38;context=cpesp</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zohaib</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Dynamic Difficulty Adjustment (DDA) in Computer Games: A Review</article-title>
          <source>Advances in Human-Computer Interaction</source>
          <year>2018</year>
          <month>11</month>
          <day>01</day>
          <volume>2018</volume>
          <fpage>1</fpage>
          <lpage>12</lpage>
          <pub-id pub-id-type="doi">10.1155/2018/5681652</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gavas</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tripathy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chatterjee</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sinha</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Cognitive load and metacognitive confidence extraction from pupillary response</article-title>
          <source>Cognitive Systems Research</source>
          <year>2018</year>
          <month>12</month>
          <volume>52</volume>
          <fpage>325</fpage>
          <lpage>334</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/https://doi.org/10.1016/j.cogsys.2018.07.021"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cogsys.2018.07.021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Csikszentmihalyi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <source>Applications of Flow in Human Development Education</source>
          <year>2014</year>
          <publisher-loc>Dordrecht</publisher-loc>
          <publisher-name>Springer Netherlands</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Beatty</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lucero-Wagoner</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Cacioppo</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Tassinary</surname>
              <given-names>LG</given-names>
            </name>
            <name name-style="western">
              <surname>Berntson</surname>
              <given-names>GG</given-names>
            </name>
          </person-group>
          <article-title>The pupillary system</article-title>
          <source>Handbook of psychophysiology</source>
          <year>2012</year>
          <publisher-loc>Cambridge</publisher-loc>
          <publisher-name>Cambridge University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barbur</surname>
              <given-names>John</given-names>
            </name>
          </person-group>
          <article-title>Learning from the pupil-studies of basic mechanisms and clinical applications</article-title>
          <source>Vis Neurosci</source>
          <year>2004</year>
          <volume>1</volume>
          <fpage>641</fpage>
          <lpage>656</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laurenzo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kardon</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ledolter</surname>
              <given-names>Johannes</given-names>
            </name>
            <name name-style="western">
              <surname>Poolman</surname>
              <given-names>Pieter</given-names>
            </name>
            <name name-style="western">
              <surname>Schumacher</surname>
              <given-names>Ashley M</given-names>
            </name>
            <name name-style="western">
              <surname>Potash</surname>
              <given-names>James B</given-names>
            </name>
            <name name-style="western">
              <surname>Full</surname>
              <given-names>Jan M</given-names>
            </name>
            <name name-style="western">
              <surname>Rice</surname>
              <given-names>Olivia</given-names>
            </name>
            <name name-style="western">
              <surname>Ketcham</surname>
              <given-names>Anna</given-names>
            </name>
            <name name-style="western">
              <surname>Starkey</surname>
              <given-names>Cole</given-names>
            </name>
            <name name-style="western">
              <surname>Fiedorowicz</surname>
              <given-names>Jess G</given-names>
            </name>
          </person-group>
          <article-title>Pupillary response abnormalities in depressive disorders</article-title>
          <source>Psychiatry Res</source>
          <year>2016</year>
          <month>12</month>
          <day>30</day>
          <volume>246</volume>
          <fpage>492</fpage>
          <lpage>499</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/27821359"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.psychres.2016.10.039</pub-id>
          <pub-id pub-id-type="medline">27821359</pub-id>
          <pub-id pub-id-type="pii">S0165-1781(16)30903-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC5161673</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morris</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Granholm</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sarkin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jeste</surname>
              <given-names>D V</given-names>
            </name>
          </person-group>
          <article-title>Effects of schizophrenia and aging on pupillographic measures of working memory</article-title>
          <source>Schizophr Res</source>
          <year>1997</year>
          <month>10</month>
          <day>30</day>
          <volume>27</volume>
          <issue>2-3</issue>
          <fpage>119</fpage>
          <lpage>28</lpage>
          <pub-id pub-id-type="doi">10.1016/S0920-9964(97)00065-0</pub-id>
          <pub-id pub-id-type="medline">9416642</pub-id>
          <pub-id pub-id-type="pii">S0920-9964(97)00065-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abokyi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Owusu-Mensah</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Osei</surname>
              <given-names>K A</given-names>
            </name>
          </person-group>
          <article-title>Caffeine intake is associated with pupil dilation and enhanced accommodation</article-title>
          <source>Eye (Lond)</source>
          <year>2017</year>
          <month>04</month>
          <volume>31</volume>
          <issue>4</issue>
          <fpage>615</fpage>
          <lpage>619</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/27983733"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/eye.2016.288</pub-id>
          <pub-id pub-id-type="medline">27983733</pub-id>
          <pub-id pub-id-type="pii">eye2016288</pub-id>
          <pub-id pub-id-type="pmcid">PMC5396006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kvamme</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pedersen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Overgaard</surname>
              <given-names>Morten</given-names>
            </name>
            <name name-style="western">
              <surname>Rømer Thomsen</surname>
              <given-names>Kristine</given-names>
            </name>
            <name name-style="western">
              <surname>Voon</surname>
              <given-names>Valerie</given-names>
            </name>
          </person-group>
          <article-title>Pupillary reactivity to alcohol cues as a predictive biomarker of alcohol relapse following treatment in a pilot study</article-title>
          <source>Psychopharmacology (Berl)</source>
          <year>2019</year>
          <month>04</month>
          <volume>236</volume>
          <issue>4</issue>
          <fpage>1233</fpage>
          <lpage>1243</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/30607476"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00213-018-5131-1</pub-id>
          <pub-id pub-id-type="medline">30607476</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00213-018-5131-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC6591462</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Slattery</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liebelt</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gaines</surname>
              <given-names>LaDonna A</given-names>
            </name>
          </person-group>
          <article-title>Common ocular effects reported to a poison control center after systemic absorption of drugs in therapeutic and toxic doses</article-title>
          <source>Curr Opin Ophthalmol</source>
          <year>2014</year>
          <month>11</month>
          <volume>25</volume>
          <issue>6</issue>
          <fpage>519</fpage>
          <lpage>523</lpage>
          <pub-id pub-id-type="doi">10.1097/ICU.0000000000000103</pub-id>
          <pub-id pub-id-type="medline">25226509</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hess</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Polt</surname>
              <given-names>J M</given-names>
            </name>
          </person-group>
          <article-title>Pupil Size in Relation to Mental Activity during Simple Problem-Solving</article-title>
          <source>Science</source>
          <year>1964</year>
          <month>03</month>
          <day>13</day>
          <volume>143</volume>
          <issue>3611</issue>
          <fpage>1190</fpage>
          <lpage>2</lpage>
          <pub-id pub-id-type="doi">10.1126/science.143.3611.1190</pub-id>
          <pub-id pub-id-type="medline">17833905</pub-id>
          <pub-id pub-id-type="pii">143/3611/1190</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Szulewski</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Roth</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Howes</surname>
              <given-names>Daniel</given-names>
            </name>
          </person-group>
          <article-title>The Use of Task-Evoked Pupillary Response as an Objective Measure of Cognitive Load in Novices and Trained Physicians: A New Tool for the Assessment of Expertise</article-title>
          <source>Acad Med</source>
          <year>2015</year>
          <month>07</month>
          <volume>90</volume>
          <issue>7</issue>
          <fpage>981</fpage>
          <lpage>7</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1097/ACM.0000000000000677"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/ACM.0000000000000677</pub-id>
          <pub-id pub-id-type="medline">25738386</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lallé</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Toker</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Conati</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Carenini</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Prediction of Users' Learning Curves for Adaptation while Using an Information Visualization</article-title>
          <year>2015</year>
          <conf-name>Atlanta, Georgia</conf-name>
          <conf-date>March, 2015</conf-date>
          <conf-loc>20th International Conference on Intelligent User Interfaces</conf-loc>
          <fpage>368</fpage>
          <lpage>368</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>A comparison of four methods for cognitive load measurement</article-title>
          <year>2011</year>
          <conf-name>23rd Australian Computer-Human Interaction Conference</conf-name>
          <conf-date>November, 2011</conf-date>
          <conf-loc>Canberra, Australia</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2071536.2071547</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kun</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Palinko</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Medenica</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Heeman</surname>
              <given-names>PA</given-names>
            </name>
          </person-group>
          <article-title>On the feasibility of using pupil diameter to estimate cognitive load changes for in-vehicle spoken dialogues</article-title>
          <year>2013</year>
          <conf-name>Annual Conference of the International Speech Communication Association, INTERSPEECH</conf-name>
          <conf-date>August 2013</conf-date>
          <conf-loc>Lyon, France</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Palinko</surname>
              <given-names>Oskar</given-names>
            </name>
            <name name-style="western">
              <surname>Kun</surname>
              <given-names>Andrew</given-names>
            </name>
            <name name-style="western">
              <surname>Shyrokov</surname>
              <given-names>Alexander</given-names>
            </name>
            <name name-style="western">
              <surname>Heeman</surname>
              <given-names>Peter</given-names>
            </name>
          </person-group>
          <article-title>Estimating Cognitive Load Using Remote Eye Tracking in a Driving Simulator</article-title>
          <year>2010</year>
          <conf-name>Symposium on Eye-Tracking Research &#38; Applications</conf-name>
          <conf-date>March, 2010</conf-date>
          <conf-loc>Austin Texas</conf-loc>
          <fpage>141</fpage>
          <lpage>144</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Palinko</surname>
              <given-names>Oskar</given-names>
            </name>
            <name name-style="western">
              <surname>Kun</surname>
              <given-names>Andrew L</given-names>
            </name>
          </person-group>
          <article-title>Exploring the effects of visual cognitive load illumination on pupil diameter in driving simulators</article-title>
          <year>2012</year>
          <conf-name>Symposium on Eye Tracking Research Applications</conf-name>
          <conf-date>March 2012</conf-date>
          <conf-loc>Santa Barbara California</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Winn</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Whitaker</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Elliott</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>N J</given-names>
            </name>
          </person-group>
          <article-title>Factors affecting light-adapted pupil size in normal human subjects</article-title>
          <source>Invest Ophthalmol Vis Sci</source>
          <year>1994</year>
          <month>03</month>
          <volume>35</volume>
          <issue>3</issue>
          <fpage>1132</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="medline">8125724</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tsukahara</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Harrison</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Engle</surname>
              <given-names>Randall W</given-names>
            </name>
          </person-group>
          <article-title>The relationship between baseline pupil size and intelligence</article-title>
          <source>Cogn Psychol</source>
          <year>2016</year>
          <month>12</month>
          <volume>91</volume>
          <fpage>109</fpage>
          <lpage>123</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cogpsych.2016.10.001</pub-id>
          <pub-id pub-id-type="medline">27821254</pub-id>
          <pub-id pub-id-type="pii">S0010-0285(16)30058-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Klingner</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tversky</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hanrahan</surname>
              <given-names>Pat</given-names>
            </name>
          </person-group>
          <article-title>Effects of visual and verbal presentation on cognitive load in vigilance, memory, and arithmetic tasks</article-title>
          <source>Psychophysiology</source>
          <year>2011</year>
          <month>03</month>
          <volume>48</volume>
          <issue>3</issue>
          <fpage>323</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1469-8986.2010.01069.x</pub-id>
          <pub-id pub-id-type="medline">20718934</pub-id>
          <pub-id pub-id-type="pii">PSYP1069</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bradley</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>PJ</given-names>
            </name>
          </person-group>
          <article-title>Memory, emotion, and pupil diameter: Repetition of natural scenes</article-title>
          <source>Psychophysiology</source>
          <year>2015</year>
          <month>09</month>
          <volume>52</volume>
          <issue>9</issue>
          <fpage>1186</fpage>
          <lpage>93</lpage>
          <pub-id pub-id-type="doi">10.1111/psyp.12442</pub-id>
          <pub-id pub-id-type="medline">25943211</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>Shamsi T</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Xianjun Sam</given-names>
            </name>
            <name name-style="western">
              <surname>Bailey</surname>
              <given-names>Brian P.</given-names>
            </name>
          </person-group>
          <article-title>Task-Evoked Pupillary Response to Mental Workload in Human-Computer Interaction</article-title>
          <year>2004</year>
          <conf-name>CHI</conf-name>
          <conf-date>24-29 April, 2004</conf-date>
          <conf-loc>Vienna, Austria</conf-loc>
          <fpage>1477</fpage>
          <lpage>1480</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Unsworth</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Robison</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>Ashley L</given-names>
            </name>
          </person-group>
          <article-title>Individual differences in baseline oculometrics: Examining variation in baseline pupil diameter, spontaneous eye blink rate, and fixation stability</article-title>
          <source>Cogn Affect Behav Neurosci</source>
          <year>2019</year>
          <month>08</month>
          <volume>19</volume>
          <issue>4</issue>
          <fpage>1074</fpage>
          <lpage>1093</lpage>
          <pub-id pub-id-type="doi">10.3758/s13415-019-00709-z</pub-id>
          <pub-id pub-id-type="medline">30888645</pub-id>
          <pub-id pub-id-type="pii">10.3758/s13415-019-00709-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Winn</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wendt</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Koelewijn</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kuchinsky</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>Best Practices and Advice for Using Pupillometry to Measure Listening Effort: An Introduction for Those Who Want to Get Started</article-title>
          <source>Trends Hear</source>
          <year>2018</year>
          <volume>22</volume>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/2331216518800869?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/2331216518800869</pub-id>
          <pub-id pub-id-type="medline">30261825</pub-id>
          <pub-id pub-id-type="pmcid">PMC6166306</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marquart</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Cabrall</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>de Winter</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Review of Eye-related Measures of Drivers’ Mental Workload</article-title>
          <source>Procedia Manufacturing</source>
          <year>2015</year>
          <volume>3</volume>
          <fpage>2854</fpage>
          <lpage>2861</lpage>
          <pub-id pub-id-type="doi">10.1016/j.promfg.2015.07.783</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kohn</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Clynes</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Color dynamics of the pupil</article-title>
          <source>Ann N Y Acad Sci</source>
          <year>1969</year>
          <month>04</month>
          <day>21</day>
          <volume>156</volume>
          <issue>2</issue>
          <fpage>931</fpage>
          <lpage>50</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1749-6632.1969.tb14024.x</pub-id>
          <pub-id pub-id-type="medline">5258025</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Attard-Johnson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bindemann</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ó Ciardha</surname>
              <given-names>Caoilte</given-names>
            </name>
          </person-group>
          <article-title>Pupillary Response as an Age-Specific Measure of Sexual Interest</article-title>
          <source>Arch Sex Behav</source>
          <year>2016</year>
          <month>05</month>
          <volume>45</volume>
          <issue>4</issue>
          <fpage>855</fpage>
          <lpage>870</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/26857377"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10508-015-0681-3</pub-id>
          <pub-id pub-id-type="medline">26857377</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10508-015-0681-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC4820473</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Can pupil size be measured to assess design products?</article-title>
          <source>International Journal of Industrial Ergonomics</source>
          <year>2014</year>
          <month>05</month>
          <volume>44</volume>
          <issue>3</issue>
          <fpage>436</fpage>
          <lpage>441</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/https://doi.org/10.1016/j.ergon.2014.01.009"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ergon.2014.01.009</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Evans</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fendley</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A multi-measure approach for connecting cognitive workload and automation</article-title>
          <source>International Journal of Human-Computer Studies</source>
          <year>2017</year>
          <month>01</month>
          <volume>97</volume>
          <fpage>182</fpage>
          <lpage>189</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2016.05.008</pub-id>
          <pub-id pub-id-type="medline">28764821</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Imamiya</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mao</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Using multiple data sources to get closer insights into user cost and task performance</article-title>
          <source>Interacting with Computers</source>
          <year>2008</year>
          <month>5</month>
          <volume>20</volume>
          <issue>3</issue>
          <fpage>364</fpage>
          <lpage>374</lpage>
          <pub-id pub-id-type="doi">10.1016/j.intcom.2007.12.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bojko</surname>
              <given-names>Aga</given-names>
            </name>
          </person-group>
          <source>Eye Tracking the User Experience: A Practical Guide to Research</source>
          <year>2013</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Rosenfeld Media</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A Comparison of Methods for Mitigating Within-Task Luminance Change for Eyewear-Based Cognitive Load Measurement</article-title>
          <source>IEEE Trans. Cogn. Dev. Syst</source>
          <year>2020</year>
          <month>12</month>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>681</fpage>
          <lpage>694</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/TCDS.2018.2876348"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TCDS.2018.2876348</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Andersen</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Optimizing Adaptivity in Educational Games</article-title>
          <year>2012</year>
          <conf-name>International Conference on the Foundations of Digital Games</conf-name>
          <conf-date>May, 2012</conf-date>
          <conf-loc>Raleigh North Carolina</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Petrick Smith</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Forsgren</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Aghababyan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Janisiewicz</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Learning Fractions by Splitting: Using Learning Analytics to Illuminate the Development of Mathematical Understanding</article-title>
          <source>Journal of the Learning Sciences</source>
          <year>2015</year>
          <month>08</month>
          <day>14</day>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>593</fpage>
          <lpage>637</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1080/10508406.2015.1078244"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/10508406.2015.1078244</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>The Handbook of Cognition and Assessment; Frameworks, Methodologies, and Applications</article-title>
          <source>Assessment in Education: Principles, Policy &#38; Practice</source>
          <year>2019</year>
          <month>03</month>
          <day>27</day>
          <volume>26</volume>
          <issue>5</issue>
          <fpage>630</fpage>
          <lpage>635</lpage>
          <pub-id pub-id-type="doi">10.1080/0969594X.2019.1597679</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>Shamsi T</given-names>
            </name>
            <name name-style="western">
              <surname>Adamczyk</surname>
              <given-names>Piotr D</given-names>
            </name>
            <name name-style="western">
              <surname>Xianjunsam</surname>
              <given-names>Zheng</given-names>
            </name>
            <name name-style="western">
              <surname>Bailey</surname>
              <given-names>Brian Patrick</given-names>
            </name>
          </person-group>
          <article-title>Towards an index of opportunity: understanding changes in mental workload during task execution</article-title>
          <year>2005</year>
          <conf-name>Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>April 2005</conf-date>
          <conf-loc>Portland, Oregon</conf-loc>
          <fpage>311</fpage>
          <lpage>320</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1145/1054972.1055016"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/1054972.1055016</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Breiman</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Random Forests</article-title>
          <source>Mach Learn</source>
          <year>2001</year>
          <volume>45</volume>
          <issue>1</issue>
          <fpage>32</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Smirnakis</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Berry</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Warland</surname>
              <given-names>D K</given-names>
            </name>
            <name name-style="western">
              <surname>Bialek</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Meister</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Adaptation of retinal processing to image contrast and spatial scale</article-title>
          <source>Nature</source>
          <year>1997</year>
          <month>03</month>
          <day>06</day>
          <volume>386</volume>
          <issue>6620</issue>
          <fpage>69</fpage>
          <lpage>73</lpage>
          <pub-id pub-id-type="doi">10.1038/386069a0</pub-id>
          <pub-id pub-id-type="medline">9052781</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>John</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Raiturkar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>An evaluation of pupillary light response models for 2D screens and VR HMDs</article-title>
          <year>2018</year>
          <conf-name>24th ACM Symposium on Virtual Reality Software and Technology</conf-name>
          <conf-date>November, 2018</conf-date>
          <conf-loc>Tokyo Japan</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Olive</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Steffe</surname>
              <given-names>LP</given-names>
            </name>
          </person-group>
          <article-title>The construction of an iterative fractional scheme: the case of Joe</article-title>
          <source>The Journal of Mathematical Behavior</source>
          <year>2001</year>
          <month>1</month>
          <volume>20</volume>
          <issue>4</issue>
          <fpage>413</fpage>
          <lpage>437</lpage>
          <pub-id pub-id-type="doi">10.1016/S0732-3123(02)00086-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hunicke</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>The case for dynamic difficulty adjustment in games</article-title>
          <year>2005</year>
          <conf-name>ACM SIGCHI International Conference on Advances in computer entertainment technology</conf-name>
          <conf-date>June, 2005</conf-date>
          <conf-loc>Valencia Spain</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Strauch</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Barthelmaes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Altgassen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Huckauf</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Pupil Dilation Fulfills the Requirements for Dynamic Difficulty Adjustment in Gaming on the Example of Pong</article-title>
          <year>2020</year>
          <conf-name>ACM Symposium on Eye Tracking Research and Applications</conf-name>
          <conf-date>2020</conf-date>
          <conf-loc>Virtual Event</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reilly</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kelly</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Jett</surname>
              <given-names>Savannah</given-names>
            </name>
            <name name-style="western">
              <surname>Zuckerman</surname>
              <given-names>Bonnie</given-names>
            </name>
          </person-group>
          <article-title>The human task-evoked pupillary response function is linear: Implications for baseline response scaling in pupillometry</article-title>
          <source>Behav Res Methods</source>
          <year>2019</year>
          <month>04</month>
          <volume>51</volume>
          <issue>2</issue>
          <fpage>865</fpage>
          <lpage>878</lpage>
          <pub-id pub-id-type="doi">10.3758/s13428-018-1134-4</pub-id>
          <pub-id pub-id-type="medline">30264368</pub-id>
          <pub-id pub-id-type="pii">10.3758/s13428-018-1134-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zargari Marandi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Madeleine</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Omland</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Vuillerme</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Samani</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Reliability of Oculometrics During a Mentally Demanding Task in Young and Old Adults</article-title>
          <source>IEEE Access</source>
          <year>2018</year>
          <volume>6</volume>
          <fpage>17500</fpage>
          <lpage>17517</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/ACCESS.2018.2819211"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2018.2819211</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Di Nocera</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Camilli</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Terenzi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Using the Distribution of Eye Fixations to Assess Pilots' Mental Workload</article-title>
          <source>Proceedings of the Human Factors and Ergonomics Society Annual Meeting</source>
          <year>2016</year>
          <month>11</month>
          <day>05</day>
          <conf-name>Proc Hum Factors Ergon Soc Annu Meet</conf-name>
          <conf-date>September  2016</conf-date>
          <conf-loc>Washington, DC</conf-loc>
          <fpage>63</fpage>
          <lpage>65</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1177/154193120605000114"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/154193120605000114</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lara-Alvarez</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gonzalez-Herrera</surname>
              <given-names>Fernando</given-names>
            </name>
          </person-group>
          <article-title>Testing multiple polynomial models for eye-tracker calibration</article-title>
          <source>Behav Res Methods</source>
          <year>2020</year>
          <month>12</month>
          <volume>52</volume>
          <issue>6</issue>
          <fpage>2506</fpage>
          <lpage>2514</lpage>
          <pub-id pub-id-type="doi">10.3758/s13428-020-01371-x</pub-id>
          <pub-id pub-id-type="medline">32468282</pub-id>
          <pub-id pub-id-type="pii">10.3758/s13428-020-01371-x</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
