<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JSG</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Serious Games</journal-id>
      <journal-title>JMIR Serious Games</journal-title>
      <issn pub-type="epub">2291-9279</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i4e40119</article-id>
      <article-id pub-id-type="pmid">36346658</article-id>
      <article-id pub-id-type="doi">10.2196/40119</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Impact of Personalized Avatars and Motion Synchrony on Embodiment and Users’ Subjective Experience: Empirical Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Zary</surname>
            <given-names>Nabil</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Han</surname>
            <given-names>Sanghoon</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Nöthiger</surname>
            <given-names>Christoph</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Jung</surname>
            <given-names>Myeongul</given-names>
          </name>
          <degrees>BS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9432-103X</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Sim</surname>
            <given-names>Sangyong</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7366-3900</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Jejoong</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1196-701X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Kwanguk</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Computer Science</institution>
            <institution>Hanyang University</institution>
            <addr-line>222, Wangsimni-ro, Seongdong-gu</addr-line>
            <addr-line>Seoul, 04763</addr-line>
            <country>Republic of Korea</country>
            <phone>82 222202319</phone>
            <email>kenny@hanyang.ac.kr</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-4184-2058</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Computer Science</institution>
        <institution>Hanyang University</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Psychology</institution>
        <institution>Duksung Woman’s University</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Kwanguk Kim <email>kenny@hanyang.ac.kr</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Oct-Dec</season>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>8</day>
        <month>11</month>
        <year>2022</year>
      </pub-date>
      <volume>10</volume>
      <issue>4</issue>
      <elocation-id>e40119</elocation-id>
      <history>
        <date date-type="received">
          <day>7</day>
          <month>6</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>1</day>
          <month>9</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>7</day>
          <month>9</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>11</day>
          <month>10</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Myeongul Jung, Sangyong Sim, Jejoong Kim, Kwanguk Kim. Originally published in JMIR Serious Games (https://games.jmir.org), 08.11.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Serious Games, is properly cited. The complete bibliographic information, a link to the original publication on https://games.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://games.jmir.org/2022/4/e40119" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Embodiment through a virtual avatar is a key element for people to feel that they are in the virtual world.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to elucidate the interaction between 2 methods of eliciting embodiment through a virtual avatar: motion synchronization and appearance similarity between a human and avatar, to understand embodiment (agency, body ownership, and self-location) and subjective experience (presence, simulator sickness, and emotion) in virtual reality.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>Using a full-body motion capture system, 24 participants experienced their virtual avatars with a 3D-scanned face and size-matched body from a first-person perspective. This study used a 2 (motion; sync and async) × 2 (appearance; personalized and generic) within-subject design.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The results indicated that agency and body ownership increased when motion and appearance were matched, whereas self-location, presence, and emotion were affected by motion only. Interestingly, if the avatar’s appearance was similar to the participants (personalized avatar), they formed an agency toward the avatar’s motion that was not performed by themselves.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Our findings would be applicable in the field of behavioral therapy, rehabilitation, and entertainment applications, by eliciting higher agency with a personalized avatar.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>embodiment</kwd>
        <kwd>virtual reality</kwd>
        <kwd>virtual avatar</kwd>
        <kwd>personalization</kwd>
        <kwd>personalized</kwd>
        <kwd>body motion</kwd>
        <kwd>presence</kwd>
        <kwd>simulator sickness</kwd>
        <kwd>simulator</kwd>
        <kwd>simulation</kwd>
        <kwd>avatar</kwd>
        <kwd>motion</kwd>
        <kwd>body ownership</kwd>
        <kwd>self location</kwd>
        <kwd>agency</kwd>
        <kwd>experience</kwd>
        <kwd>virtual world</kwd>
        <kwd>immersive</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Recent developments in technology have made humans citizens of the virtual world (widely known as the “metaverse”). People use virtual reality (VR) devices including head-mounted displays (HMDs) to wander around the digital world. Additionally, people use an entity such as a virtual avatar in VR to overcome limitations that their physical body cannot achieve. However, it is difficult for people to feel that they are the owner of this newly created body. This feeling of owning the body is called embodiment [<xref ref-type="bibr" rid="ref1">1</xref>]. Embodiment consists of 3 components: body ownership (feeling that the body undergoing a certain experience is subordinate to oneself [<xref ref-type="bibr" rid="ref2">2</xref>]), agency (feeling that “I” am the cause for body motion [<xref ref-type="bibr" rid="ref2">2</xref>]), and self-location (“My view is located at the place where it should be” [<xref ref-type="bibr" rid="ref1">1</xref>]).</p>
      <p>Previous studies have found that it is possible to form a sense of embodiment toward external objects such as a rubber hand [<xref ref-type="bibr" rid="ref3">3</xref>], virtual arm [<xref ref-type="bibr" rid="ref4">4</xref>], and virtual body [<xref ref-type="bibr" rid="ref5">5</xref>]. To induce this illusory feeling of embodiment, several studies [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref8">8</xref>] have used a motion-capture device for measuring a body movement and an HMD for observing a virtual avatar’s actions with first-person perspective to synchronize a human activity with the movement of the virtual avatar. These studies showed that motion synchrony increased the degree of agency and found that people felt an ownership toward the virtual body used in the experiments. In addition, increased agency could make people feel a greater presence with less simulator sickness in VR [<xref ref-type="bibr" rid="ref7">7</xref>]. Various studies have used the motion synchronization method to induce size perception [<xref ref-type="bibr" rid="ref6">6</xref>] and emotion [<xref ref-type="bibr" rid="ref5">5</xref>], as well as reduce racial bias [<xref ref-type="bibr" rid="ref9">9</xref>].</p>
      <p>Other studies have developed a method of enhancing embodiment by increasing appearance similarity between a real body and virtual body [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. For instance, Kim et al [<xref ref-type="bibr" rid="ref11">11</xref>] demonstrated a body size–matching method using 4 measurements of height, shoulder width, belly width, and hip width. Likewise, Gorisse et al [<xref ref-type="bibr" rid="ref10">10</xref>] attempted to increase appearance similarity by attaching a 3D-scanned head to a virtual body, and furthermore, Waltemate et al [<xref ref-type="bibr" rid="ref12">12</xref>] presented a “personalized avatar” by attaching a 3D-scanned head to a participant-sized body. These studies found that the more an avatar’s body resembled a human’s, the more people felt ownership of the virtual avatar’s body [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. This increment in ownership resulted in higher presence [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>] and decreased simulator sickness [<xref ref-type="bibr" rid="ref11">11</xref>], which could increase people’s virtual experience. In line with this, Jung et al [<xref ref-type="bibr" rid="ref8">8</xref>] showed that it is possible to measure one’s body-related perception by using a virtual body resembling the individual’s physical characteristics.</p>
      <p>Although embodiment can be enhanced with motion synchrony (factor 1) and appearance similarity (factor 2), there has been little research to discover the interaction effect between these 2 factors. Kim et al [<xref ref-type="bibr" rid="ref1">1</xref>] demonstrated that the 2 factors affected body ownership independently using a dot avatar and body size–matched avatar. Moreover, they found that the factors affecting the subcomponents of embodiment could boost each other, which can be beneficial to future applications of VR. As we can create more personalized avatars using technologies such as 3D scanning [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>], it is worth investigating whether the boosting effect between the 2 factors would be maintained with more individualized avatars.</p>
      <p>This study aimed to detect the effects of 2 factors (motion synchrony and appearance similarity) on embodiment and their secondary effect on virtual experiences. Our first hypothesis is that motion synchronization would increase the feeling of agency and body ownership toward the virtual body. The second hypothesis is that the appearance similarity would increase body ownership toward the virtual body. We also speculate that there are potential interactions between the 2 factors. Third, in the higher embodiment condition, the participants would have a more positive subjective experience toward VR comprising presence, simulator sickness, and emotion.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Participants</title>
        <p>In all, 24 participants (age: mean 23.29, SD 1.97 years; female: n=12, 50%) were recruited for the study. A power analysis using PASS power analysis and sample size software (version 2019; NCSS) was run to determine sample size. All participants provided written informed consent and were compensated with US $40 for their cooperation.</p>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>All experimental protocols were approved by the Hanyang University Institutional Review Board (HYUIRB-202107-014-1). Photographed individuals in the figures provided written informed consent to publish the case details.</p>
      </sec>
      <sec>
        <title>Hardware Setup</title>
        <p>To generate synchrony between the participant and avatar, a motion-capture system (Motive; version 2.0.2; Natural Point) and 18 Flex 13 cameras (Natural Point) were used. Participants wore a full-body motion-capture suit with 37 reflective markers. To generate a 3D-scanned face, images of the participant’s face were captured using a mobile device (iPhone SE2; Apple Inc). With the captured images, the participant’s 3D face model was generated through the Metashape (Agisoft LLC) and Blender (Blender Institute) software. The virtual environment of the experiment was implemented using Unity software (version 2018.3.0f2; Unity Technologies). The software was run on a desktop PC (Windows 10 OS; Microsoft) with Intel Core i7-6700 (Intel) CPU, 16GB RAM (Samsung), and NVIDIA GeForce GTX 3070 (Nvidia) GPU.</p>
      </sec>
      <sec>
        <title>Virtual Environment</title>
        <p>Participants wore a motion-capture suit and HMD (HTC Vive Pro eye; HTC) to experience the virtual environment. The virtual environment was a small room, about 4 m (width) × 4 m (length) × 2.5 m (height), and a virtual mirror was set in front of the participant’s location. A participant could observe the virtual avatar’s body either directly through first-person perspective or by looking in the mirror. The virtual environment and experimental settings are presented in <xref rid="figure1" ref-type="fig">Figure 1</xref>.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Virtual environment (A) and experimental settings (B) of this study. (A) illustrates the virtual environment from the participant’s perspective. A virtual mirror was set in front of the participant’s location. (B) illustrates the experimental setting of each participant, where each participant wears a motion-capture suit with 37 reflective markers attached and a head-mounted display for observing their virtual avatar’s movement in first-person perspective.</p>
          </caption>
          <graphic xlink:href="games_v10i4e40119_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Procedures</title>
        <sec>
          <title>Avatar Creation Phase</title>
          <p>In this phase, the personalized avatar was created through the 3D scanning of participants’ faces. Before the scanning, each participant completed the questionnaires for demographic information. After completing the questionnaires, the experimenter measured the participant’s height, shoulder width, belly width, and hip width. They then underwent face scanning, where the experimenter recorded the participant’s face while rotating around them in diverse angles for approximately 1 minute. A total of 300 frame images (3840 × 2160 pixels of resolution) of the participant’s face in different angles were selected. Subsequently, images were processed by the Metashape program to build 3D point-cloud data. The point-cloud data were then simplified into 3D geometry and texture of each participant’s face, and then their 3D face model was merged with an existing virtual avatar body. Finally, the participants’ body sizes were applied to a virtual avatar’s body to produce a personalized avatar. <xref rid="figure2" ref-type="fig">Figure 2</xref> shows the process of the avatar creation.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Procedure for avatar creation. (A) 300 facial image data of diverse angles in 4K resolution are acquired through a mobile device. (B) Point-cloud data are extracted from the images and processed into a face model by the Metashape program. (C) Each participant’s face model is merged with an avatar body, and the avatar’s body size is adjusted to that of the participant.</p>
            </caption>
            <graphic xlink:href="games_v10i4e40119_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Experimental Phase</title>
          <p>This experiment had a 2 (motion; sync vs async) × 2 (appearance; personalized vs generic) within-subject design to examine the main effects of motion synchrony and appearance similarity and the interaction effect between them. In the motion “sync” condition, the virtual avatar’s body moved according to the participant’s movement, whereas in the motion “async” condition, the virtual avatar moved according to prerecorded movements, regardless of the participant’s movement. For appearance similarity (or personalized appearance), a virtual avatar consisting of a participant’s 3D-scanned head with a size- and gender-matched body was used. In the generic appearance condition, only a gender-matched avatar was used. <xref rid="figure3" ref-type="fig">Figure 3</xref> shows examples of personalized and generic avatars used for a male participant.</p>
          <p>During the experimental phase, participants underwent 4 blocks of VR experiences (ie, sync-personalized, sync-generic, async-personalized, and async-generic); the order of the blocks was counterbalanced using the Latin-square method. Participants were asked to move freely and observed a virtual avatar for 5 minutes during each block. After each block, the participants completed the similarity, embodiment, and virtual experience questionnaires listed below. Between blocks, participants rested until they felt ready to start the next block. The entire procedure lasted approximately 90 minutes.</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>Example of a (A) generic avatar and (B) personalized avatar for the (C) real person.</p>
            </caption>
            <graphic xlink:href="games_v10i4e40119_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Dependent Measures</title>
        <sec>
          <title>Similarity Questionnaire</title>
          <p>The Similarity Questionnaire (SQ) from a recent study [<xref ref-type="bibr" rid="ref13">13</xref>] was applied in this study. The SQ consists of four 7-point Likert scale questions assessing participants’ feeling on how similar the body parts of the virtual avatar are akin to theirs. The scale ranges from –3 (fully disagree) to +3 (fully agree); <xref ref-type="table" rid="table1">Table 1</xref> shows the 4 questions in detail. The average score of the questions was used as the similarity score.</p>
          <table-wrap position="float" id="table1">
            <label>Table 1</label>
            <caption>
              <p>List of items used in the Similarity Questionnaire<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="300"/>
              <col width="700"/>
              <thead>
                <tr valign="top">
                  <td>Question</td>
                  <td>Statement</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Face</td>
                  <td>I felt the face of virtual body was similar to mine.</td>
                </tr>
                <tr valign="top">
                  <td>Torso</td>
                  <td>I felt the torso of virtual body was similar to mine.</td>
                </tr>
                <tr valign="top">
                  <td>Arms</td>
                  <td>I felt the arm of virtual body was similar to mine.</td>
                </tr>
                <tr valign="top">
                  <td>Legs</td>
                  <td> I felt the leg of virtual body was similar to mine</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table1fn1">
                <p><sup>a</sup>Scale ranges from –3 (fully disagree) to +3 (fully agree).</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Embodiment Questionnaire</title>
          <p>The Embodiment Questionnaire (EQ) from recent studies [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref11">11</xref>] was modified and used in this study. The EQ consists of three 7-point Likert scale questions on agency, body ownership, and self-location. The EQ scale ranges from –3 (fully disagree) to +3 (fully agree). Each question is detailed in <xref ref-type="table" rid="table2">Table 2</xref>.</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>List of items used in the Embodiment Questionnaire<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="300"/>
              <col width="700"/>
              <thead>
                <tr valign="top">
                  <td>Question</td>
                  <td>Statement</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Agency</td>
                  <td>I felt that the movement of the virtual body was caused by my own movements.</td>
                </tr>
                <tr valign="top">
                  <td>Body ownership</td>
                  <td>I felt that the virtual body I saw when looking at myself in the mirror was my own body.</td>
                </tr>
                <tr valign="top">
                  <td>Self-location</td>
                  <td>I felt that I was inside the virtual body.</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table2fn1">
                <p><sup>a</sup>Scale ranges from –3 (fully disagree) to +3 (fully agree).</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Virtual Experience Questionnaire</title>
          <p>The Virtual Experience Questionnaire consisted of 3 subjective virtual experiences: presence, simulator sickness, and emotion. Participants’ presence was assessed with the Presence Questionnaire [<xref ref-type="bibr" rid="ref15">15</xref>], which consists of 21 questions using a 7-point Likert scale ranging from 1 (not at all) to 7 (completely). Participants’ simulator sickness was assessed with the Simulator Sickness Questionnaire [<xref ref-type="bibr" rid="ref16">16</xref>], which has 16 questions for checking symptoms. Each question was scored using a 4-point Likert scale ranging from 0 (not at all) to 3 (severe). Participants’ emotions were assessed with the self-assessment manikin [<xref ref-type="bibr" rid="ref17">17</xref>]. It consists of 2 subscales of arousal and valence. A visual representation of a manikin with varying levels of emotional expression was used to rate participants’ emotional arousal, from 1 (extremely calm) to 9 (extremely excited), and valence, from 1 (extremely negative) to 9 (extremely positive).</p>
        </sec>
      </sec>
      <sec>
        <title>Data Analysis</title>
        <p>All data were analyzed using SPSS statistical software (version 27.0; IBM Corp). The evaluations of normality were performed using the skewness, kurtosis, and Kolmogorov-Smirnov tests. A 2 (sync vs async) × 2 (personalized vs generic) repeated measures ANOVA was conducted on all dependent measures to examine the effects of motion synchrony, appearance similarity, and interaction between the 2 variables. Post hoc analyses then followed. The level of statistically significant <italic>P</italic> value was set to .05.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Similarity</title>
        <p>The average similarity score from the SQ did not show a significant main effect for motion (<italic>F</italic><sub>1,23</sub>=1.448; <italic>P</italic>=.24; sync mean .14; async mean –.01), whereas a significant main effect was demonstrated for appearance (<italic>F</italic><sub>1,23</sub>=32.920; <italic>P</italic>&#60;.001; η<sup>2</sup>=.589; personalized mean .92; generic mean –.79). The interaction effect between the 2 factors was not significant (<italic>F</italic><sub>1,23</sub>=1.178; <italic>P</italic>=.29).</p>
      </sec>
      <sec>
        <title>Embodiment</title>
        <p>The result of embodiment is illustrated in <xref ref-type="table" rid="table3">Table 3</xref>.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Embodiment scores of agency, body ownership, and self-location.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="370"/>
            <col width="230"/>
            <col width="250"/>
            <col width="120"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Embodiment, appearance</td>
                <td colspan="2">Motion</td>
                <td><italic>P</italic> value</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td>Sync, mean (SE)</td>
                <td>Async, mean (SE)</td>
                <td>
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Agency</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personalized</td>
                <td>2.38 (.13)</td>
                <td>–.79 (.42)</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generic</td>
                <td>2.25 (.22)</td>
                <td>–1.75 (.36)</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sync (personalized vs generic)</td>
                <td>N/A<sup>a</sup></td>
                <td>N/A</td>
                <td>.59</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Async (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.02</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Body ownership</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personalized</td>
                <td>1.54 (.20)</td>
                <td>.00 (.41)</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generic</td>
                <td>.96 (.33)</td>
                <td>–1.08 (.42)</td>
                <td>.002</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sync (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.08</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Async (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.01</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Self-location</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personalized</td>
                <td>1.58 (.26)</td>
                <td>–.29 (.42)</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generic</td>
                <td>1.25 (.34)</td>
                <td>–.29 (.42)</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sync (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.22</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Async (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>&#62;.99</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>N/A: not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <sec>
          <title>Agency</title>
          <p>The agency score displayed a significant main effect for motion (<italic>F</italic><sub>1,23</sub>=91.653; <italic>P</italic>&#60;.001; η<sup>2</sup>=.799) and appearance (<italic>F</italic><sub>1,23</sub>=4.442; <italic>P</italic>=.046; η<sup>2</sup>=.162). A significant interaction effect was present between the 2 factors (<italic>F</italic><sub>1,23</sub>=5.867; <italic>P</italic>=.02; η<sup>2</sup>=.203). Post hoc analysis revealed that the participants reported higher agency in the sync condition than the async condition, regardless of appearance (personalized-sync vs personalized-async: <italic>P</italic>&#60;.001; generic-sync generic-async: <italic>P</italic>&#60;.001). Although the virtual avatar moved regardless of a participant’s body motion in the async condition, participants felt higher agency toward the personalized avatar’s movement (<italic>P</italic>=.02).</p>
        </sec>
        <sec>
          <title>Body Ownership</title>
          <p>The body ownership score exhibited a significant main effect for motion (<italic>F</italic><sub>1,23</sub>=22.876; <italic>P</italic>&#60;.001; η<sup>2</sup>=.499) and appearance (<italic>F</italic><sub>1,23</sub>=10.047; <italic>P</italic>=.004; η<sup>2</sup>=.312). The interaction effect between the 2 factors was not significant (<italic>F</italic><sub>1,23</sub>=1.078; <italic>P</italic>=.31).</p>
        </sec>
        <sec>
          <title>Self-location</title>
          <p>The self-location score showed a significant main effect for motion (<italic>F</italic><sub>1,23</sub>=31.306; <italic>P</italic>&#60;.001; η<sup>2</sup>=.576); however, no main effect for appearance was found (<italic>F</italic><sub>1,23</sub>=.416; <italic>P</italic>=.53). There was no significant interaction effect between the 2 factors (<italic>F</italic><sub>1,23</sub>=.358; <italic>P</italic>=.56).</p>
        </sec>
      </sec>
      <sec>
        <title>Virtual Experience</title>
        <p>The result of virtual experience from the Virtual Experience Questionnaire is illustrated in <xref ref-type="table" rid="table4">Table 4</xref>.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Virtual experience scores of presence, simulator sickness, and emotion (arousal and valence).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="370"/>
            <col width="230"/>
            <col width="250"/>
            <col width="120"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Virtual experience, appearance</td>
                <td colspan="2">Motion</td>
                <td><italic>P</italic> value</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td>Sync, mean (SE)</td>
                <td>Async, mean (SE)</td>
                <td>
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Presence</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personalized</td>
                <td>74.21 (2.42)</td>
                <td>55.71 (3.68)</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generic</td>
                <td>74.71 (3.01)</td>
                <td>54.92 (4.00)</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sync (personalized vs generic)</td>
                <td>N/A<sup>a</sup></td>
                <td>N/A</td>
                <td>.77</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Async (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.71</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Simulator sickness</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personalized</td>
                <td>23.53 (5.62)</td>
                <td>24.00 (5.46)</td>
                <td>.86</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generic</td>
                <td>25.09 (6.18)</td>
                <td>27.12 (6.29)</td>
                <td>.52</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sync (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.58</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Async (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.43</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Arousal</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personalized</td>
                <td>4.13 (.39)</td>
                <td>4.08 (.40)</td>
                <td>.86</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generic</td>
                <td>3.96 (.41)</td>
                <td>3.96 (.33)</td>
                <td>&#62;.99</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sync (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.57</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Async (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.61</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Valance</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personalized</td>
                <td>6.38 (.27)</td>
                <td>5.63 (.37)</td>
                <td>.004</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generic</td>
                <td>5.75 (.31)</td>
                <td>5.54 (.29)</td>
                <td>.41</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sync (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.07</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Async (personalized vs generic)</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>.81</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>N/A: not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <sec>
          <title>Presence</title>
          <p>The presence score showed a significant main effect for motion (<italic>F</italic><sub>1,23</sub>=40.126; <italic>P</italic>&#60;.001; η<sup>2</sup>=.636) but not for appearance (<italic>F</italic><sub>1,23</sub>=.009; <italic>P</italic>=.93). Furthermore, the interaction effect between the 2 factors was not significant (<italic>F</italic><sub>1,23</sub>=.333; <italic>P</italic>=.57).</p>
        </sec>
        <sec>
          <title>Simulator Sickness</title>
          <p>The main effects of motion (<italic>F</italic><sub>1,23</sub>=.247; <italic>P</italic>=.62) and appearance were not significant (<italic>F</italic><sub>1,23</sub>=.608; <italic>P</italic>=.44). Additionally, the interaction effect between the 2 factors was not significant (<italic>F</italic><sub>1,23</sub>=.269; <italic>P</italic>=.61).</p>
        </sec>
        <sec>
          <title>Emotion</title>
          <p>Arousal did not indicate a significant main effect for motion (<italic>F</italic><sub>1,23</sub>=.023; <italic>P</italic>=.88) or appearance (<italic>F</italic><sub>1,23</sub>=.495; <italic>P</italic>=.49). There was no significant interaction effect between the 2 factors (<italic>F</italic><sub>1,23</sub>=.016; <italic>P</italic>=.90).</p>
          <p>Valence showed a significant main effect for motion (<italic>F</italic><sub>1,23</sub>=6.111; <italic>P</italic>=.02; η<sup>2</sup>=.210) and no main effect for appearance (<italic>F</italic><sub>1,23</sub>=1.325; <italic>P</italic>=.26). There was no significant interaction effect between the 2 factors (<italic>F</italic><sub>1,23</sub>=3.524; <italic>P</italic>=.07).</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This study investigated the effects of motion synchronization and appearance similarity on participants’ embodiment, perceived similarity, and subjective experience in VR. The results showed that participants experienced a higher level of agency and body ownership when body motion was synchronized and appearance was similar to theirs. Surprisingly, under the motion and appearance synchronizations, participants reported that the body motion of the virtual avatar was driven by them even when the virtual avatar moved independently, suggesting that the personalized appearance of the virtual avatar’s body can create an illusory agency toward the avatar’s movement that was not performed by the participant. Furthermore, our results indicated that the synchronization of motion could contribute to higher presence and induce more positive emotion.</p>
        <p>Two novel findings on embodiment can be listed from the results of this study. First, there was a statistically significant interaction between motion and appearance on agency (significant difference between async-personalized and async-generic conditions in agency). Prior studies have reported that synchronizing motion [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>] could contribute to form a higher level of agency. Results from this study support such previous findings, proving that the integration of visual-motion synchronization, which fulfills proprioception [<xref ref-type="bibr" rid="ref18">18</xref>], forms a feeling that “I am the only cause of the avatar’s body motion” [<xref ref-type="bibr" rid="ref1">1</xref>]. In addition to this finding, this study demonstrated that agency was associated with body motion that was not caused by the participant; therefore, the body motion could be felt as the participant’s movement when the virtual avatar resembled the participant’s appearance. This kind of attribution of behavior toward oneself has been shown in several previous studies. For example, Aymerich-Franch et al [<xref ref-type="bibr" rid="ref19">19</xref>] reported that participants felt shame and guilt for the misbehavior of a humanoid robot, which they did not perform. Likewise, Jun et al [<xref ref-type="bibr" rid="ref5">5</xref>] reported that participants’ emotions changed to happy, neutral, and sad according to a virtual avatar’s emotional status. Furthermore, this study extends previous findings that body movement can be attributed to oneself with a personalized avatar. In the motion sync condition, however, the agency difference was not significant between the personalized and generic avatars. We speculate that the reason behind this finding is a higher agency score (average score of 2.31 out 3), which might have caused a ceiling effect. The current result can be used in the field of cognitive behavioral therapy, such as behavior modeling, which involves observing others to learn desirable behaviors. Using a personalized avatar, which facilitates more agency toward the avatar’s action, can be more effective compared to using general avatars in behavior modeling.</p>
        <p>Second, it was evident that the 2 factors (ie, motion and appearance) in this study affected body ownership independently. Prior studies have found that synchronizing motion [<xref ref-type="bibr" rid="ref11">11</xref>] and personalized avatars [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>] could contribute to form a higher level of body ownership. The current findings support previous studies by showing that the integration of motion synchronization [<xref ref-type="bibr" rid="ref1">1</xref>] and visual aspects with higher fidelity [<xref ref-type="bibr" rid="ref10">10</xref>] form a feeling that the body is one’s own. Prior studies have revealed that the level of visual fidelity increased in the order of point-light avatar [<xref ref-type="bibr" rid="ref1">1</xref>], robot avatar [<xref ref-type="bibr" rid="ref10">10</xref>], human avatar [<xref ref-type="bibr" rid="ref5">5</xref>], size matched avatar [<xref ref-type="bibr" rid="ref11">11</xref>], and 3D-scanned avatar [<xref ref-type="bibr" rid="ref12">12</xref>]. This study extends these previous findings by revealing that body ownership can be further increased with a avatar that has a 3D-scanned face and size-matched body.</p>
        <p>In addition, it was evident that the factors of subjective virtual experience (ie, presence, simulator sickness, and emotion) were affected by motion synchrony. This result supports previous studies that confirmed higher presence [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref11">11</xref>] and positive emotion [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>] by motion synchronization. Furthermore, a moderate interaction between appearance similarity (personalized vs generic) and motion synchrony (sync vs async) in emotional valence (significant difference between sync and async in personalized avatar, whereas no difference in generic avatar condition in valence; <xref ref-type="table" rid="table4">Table 4</xref>) showed that motion synchronization with a personalized avatar could induce more positive emotions than a generic avatar. On the contrary, the effect on arousal and simulator sickness was not significant. Low arousal scores across the conditions suggest that the participants were in a calm state during the experimental tasks. We speculate that low arousal might have affected simulator sickness, resulting in a floor effect across conditions.</p>
        <p>In addition to the main findings addressed above, it is worth noting the methodology we used for generating personalized avatars in this study. In fact, many prior studies have attempted to create personalized avatars by 3D scanning a participant’s face and body [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. However, due to complex settings such as red-green-blue-depth camera [<xref ref-type="bibr" rid="ref14">14</xref>] or multiple time-synchronized red-green-blue cameras [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>], it is challenging to use this methodology to construct a personalized avatar; thus, it is not appropriate for general use. Inspired by Gorisse et al [<xref ref-type="bibr" rid="ref10">10</xref>] and Jung et al [<xref ref-type="bibr" rid="ref8">8</xref>], our study proposed a more convenient method for creating a personalized avatar using a single smartphone and body-size measuring. The result from the SQ showed that our method could enhance a feeling of appearance similarity between the person and avatar. Thus, in future metaverse applications, each user can easily generate their personalized avatar using their smartphone to dive into VR.</p>
        <p>There are a few limitations in this study. First, the participants were restricted to healthy young adults. Future studies should consider recruiting participants across diverse age groups and health statuses. Second, as the prerecorded animation of the async condition consisted of simple body movements that can be performed easily by the participants, it would be stimulating to use a body motion that is “impossible” to perform to generalize the effect of appearance similarity on agency in the follow-up studies. Third, this study asked participants to move freely in the main task and measured arousal and valence as emotional variables. Although appearance similarity did not affect participants’ emotions, future studies should consider participants who have negative feelings toward their body, such as individuals with eating disorders [<xref ref-type="bibr" rid="ref20">20</xref>]. An attitude toward one’s body should be considered for future medical applications.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>This study proposed a method for creating personalized avatars using a single smartphone camera and an avatar’s body size manipulation. Furthermore, this study’s result indicated that participants perceived that the virtual avatar’s appearance was similar to them. Furthermore, participants established a higher sense of embodiment toward the virtual avatar’s body that was similar to theirs in body motion and appearance. Moreover, we discovered that the synchronization of appearance could result in a sense of agency toward an avatar’s movement. We hope that the findings in the current study can contribute to the fields of physical activity promotion [<xref ref-type="bibr" rid="ref21">21</xref>], social cognition training [<xref ref-type="bibr" rid="ref22">22</xref>], and pain intervention [<xref ref-type="bibr" rid="ref23">23</xref>], suggesting that matching body motion and appearance can enhance the VR experience.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">EQ</term>
          <def>
            <p>Embodiment Questionnaire</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">HMD</term>
          <def>
            <p>head-mounted display</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">SQ</term>
          <def>
            <p>Similarity Questionnaire</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">VR</term>
          <def>
            <p>virtual reality</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work was supported by the National Research Foundation of Korea (NRF) and Institute of Information &#38; communications Technology Planning &#38; Evaluation (IITP) grant funded by the Korea government (Ministry of Science and ICT [Information and Communications Technology]; grant 2021R1A2C2013479 and 2021-0-00590, Decentralized High Performance Consensus for Large-Scale Blockchains). The funders were not involved in the design or conduction of the study; data collection; management; analysis; interpretation; or preparation, review, or approval of the manuscript.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>CS</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Controlling the sense of embodiment for virtual avatar applications: methods and empirical study</article-title>
          <source>JMIR Serious Games</source>
          <year>2020</year>
          <month>09</month>
          <day>22</day>
          <volume>8</volume>
          <issue>3</issue>
          <fpage>e21879</fpage>
          <pub-id pub-id-type="doi">10.2196/21879</pub-id>
          <pub-id pub-id-type="medline">32960174</pub-id>
          <pub-id pub-id-type="pii">v8i3e21879</pub-id>
          <pub-id pub-id-type="pmcid">PMC7539165</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gallagher</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Philosophical conceptions of the self: implications for cognitive science</article-title>
          <source>Trends Cogn Sci</source>
          <year>2000</year>
          <month>01</month>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>14</fpage>
          <lpage>21</lpage>
          <pub-id pub-id-type="doi">10.1016/s1364-6613(99)01417-5</pub-id>
          <pub-id pub-id-type="medline">10637618</pub-id>
          <pub-id pub-id-type="pii">S1364661399014175</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Botvinick</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Rubber hands 'feel' touch that eyes see</article-title>
          <source>Nature</source>
          <year>1998</year>
          <month>02</month>
          <day>19</day>
          <volume>391</volume>
          <issue>6669</issue>
          <fpage>756</fpage>
          <pub-id pub-id-type="doi">10.1038/35784</pub-id>
          <pub-id pub-id-type="medline">9486643</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Slater</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Perez-Marcos</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ehrsson</surname>
              <given-names>HH</given-names>
            </name>
            <name name-style="western">
              <surname>Sanchez-Vives</surname>
              <given-names>MV</given-names>
            </name>
          </person-group>
          <article-title>Towards a digital body: the virtual arm illusion</article-title>
          <source>Front Hum Neurosci</source>
          <year>2008</year>
          <month>08</month>
          <day>20</day>
          <volume>2</volume>
          <fpage>6</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/neuro.09.006.2008"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/neuro.09.006.2008</pub-id>
          <pub-id pub-id-type="medline">18958207</pub-id>
          <pub-id pub-id-type="pmcid">PMC2572198</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jun</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KK</given-names>
            </name>
          </person-group>
          <article-title>Full-body ownership illusion can change our emotion</article-title>
          <year>2018</year>
          <month>04</month>
          <day>21</day>
          <conf-name>CHI '18: the 2018 CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>April 21-26, 2018</conf-date>
          <conf-loc>Montreal, QC</conf-loc>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1145/3173574.3174175</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Banakou</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Slater</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Body ownership causes illusory self-attribution of speaking and influences subsequent real speaking</article-title>
          <source>Proc Natl Acad Sci U S A</source>
          <year>2014</year>
          <month>12</month>
          <day>09</day>
          <volume>111</volume>
          <issue>49</issue>
          <fpage>17678</fpage>
          <lpage>83</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/25422444"/>
          </comment>
          <pub-id pub-id-type="doi">10.1073/pnas.1414936111</pub-id>
          <pub-id pub-id-type="medline">25422444</pub-id>
          <pub-id pub-id-type="pii">1414936111</pub-id>
          <pub-id pub-id-type="pmcid">PMC4267370</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jun</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Heo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KK</given-names>
            </name>
          </person-group>
          <article-title>Effects of virtual-avatar motion-synchrony levels on full-body interaction</article-title>
          <year>2019</year>
          <month>04</month>
          <day>08</day>
          <conf-name>SAC '19: the 34th ACM/SIGAPP Symposium on Applied Computing</conf-name>
          <conf-date>April 8-12, 2019</conf-date>
          <conf-loc>Limassol, Cyprus</conf-loc>
          <fpage>701</fpage>
          <lpage>708</lpage>
          <pub-id pub-id-type="doi">10.1145/3297280.3297346</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Measuring recognition of body changes over time: a human-computer interaction tool using dynamic morphing and body ownership illusion</article-title>
          <source>PLoS One</source>
          <year>2020</year>
          <month>09</month>
          <day>18</day>
          <volume>15</volume>
          <issue>9</issue>
          <fpage>e0239322</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0239322"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0239322</pub-id>
          <pub-id pub-id-type="medline">32946504</pub-id>
          <pub-id pub-id-type="pii">PONE-D-20-00911</pub-id>
          <pub-id pub-id-type="pmcid">PMC7500668</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peck</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Seinfeld</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aglioti</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Slater</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Putting yourself in the skin of a black avatar reduces implicit racial bias</article-title>
          <source>Conscious Cogn</source>
          <year>2013</year>
          <month>09</month>
          <volume>22</volume>
          <issue>3</issue>
          <fpage>779</fpage>
          <lpage>87</lpage>
          <pub-id pub-id-type="doi">10.1016/j.concog.2013.04.016</pub-id>
          <pub-id pub-id-type="medline">23727712</pub-id>
          <pub-id pub-id-type="pii">S1053-8100(13)00059-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gorisse</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Christmann</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Houzangbe</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Richir</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>From robot to virtual doppelganger: impact of visual fidelity of avatars controlled in third-person perspective on embodiment and behavior in immersive virtual environments</article-title>
          <source>Front Robot AI</source>
          <year>2019</year>
          <month>02</month>
          <day>18</day>
          <volume>6</volume>
          <fpage>8</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/frobt.2019.00008"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/frobt.2019.00008</pub-id>
          <pub-id pub-id-type="medline">33501025</pub-id>
          <pub-id pub-id-type="pmcid">PMC7805911</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KK</given-names>
            </name>
          </person-group>
          <article-title>Impact of body size match to an avatar on the body ownership illusion and user's subjective experience</article-title>
          <source>Cyberpsychol Behav Soc Netw</source>
          <year>2020</year>
          <month>04</month>
          <volume>23</volume>
          <issue>4</issue>
          <fpage>234</fpage>
          <lpage>241</lpage>
          <pub-id pub-id-type="doi">10.1089/cyber.2019.0136</pub-id>
          <pub-id pub-id-type="medline">32074457</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Waltemate</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gall</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Roth</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Botsch</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Latoschik</surname>
              <given-names>ME</given-names>
            </name>
          </person-group>
          <article-title>The impact of avatar personalization and immersion on virtual body ownership, presence, and emotional response</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2018</year>
          <month>04</month>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>1643</fpage>
          <lpage>1652</lpage>
          <pub-id pub-id-type="doi">10.1109/TVCG.2018.2794629</pub-id>
          <pub-id pub-id-type="medline">29543180</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thaler</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Piryankova</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Stefanucci</surname>
              <given-names>JK</given-names>
            </name>
            <name name-style="western">
              <surname>Pujades</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>de la Rosa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Streuber</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Romero</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Black</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Mohler</surname>
              <given-names>BJ</given-names>
            </name>
          </person-group>
          <article-title>Visual perception and evaluation of photo-realistic self-avatars from 3D body scans in males and females</article-title>
          <source>Front ICT</source>
          <year>2018</year>
          <month>9</month>
          <day>4</day>
          <volume>5</volume>
          <issue>18</issue>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <pub-id pub-id-type="doi">10.3389/fict.2018.00018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zuo</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Gong</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>SparseFusion: dynamic human avatar modeling from sparse RGBD images</article-title>
          <source>IEEE Trans Multimedia</source>
          <year>2021</year>
          <volume>23</volume>
          <fpage>1617</fpage>
          <lpage>1629</lpage>
          <pub-id pub-id-type="doi">10.1109/tmm.2020.3001506</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Witmer</surname>
              <given-names>BG</given-names>
            </name>
            <name name-style="western">
              <surname>Singer</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>Measuring presence in virtual environments: a presence questionnaire</article-title>
          <source>Presence (Camb)</source>
          <year>1998</year>
          <month>06</month>
          <volume>7</volume>
          <issue>3</issue>
          <fpage>225</fpage>
          <lpage>240</lpage>
          <pub-id pub-id-type="doi">10.1162/105474698565686</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kennedy</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Lane</surname>
              <given-names>NE</given-names>
            </name>
            <name name-style="western">
              <surname>Berbaum</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Lilienthal</surname>
              <given-names>MG</given-names>
            </name>
          </person-group>
          <article-title>Simulator Sickness Questionnaire: an enhanced method for quantifying simulator sickness</article-title>
          <source>Int J Aviat Psychol</source>
          <year>1993</year>
          <month>07</month>
          <volume>3</volume>
          <issue>3</issue>
          <fpage>203</fpage>
          <lpage>220</lpage>
          <pub-id pub-id-type="doi">10.1207/s15327108ijap0303_3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bradley</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>PJ</given-names>
            </name>
          </person-group>
          <article-title>Measuring emotion: the self-assessment manikin and the semantic differential</article-title>
          <source>J Behav Ther Exp Psychiatry</source>
          <year>1994</year>
          <month>03</month>
          <volume>25</volume>
          <issue>1</issue>
          <fpage>49</fpage>
          <lpage>59</lpage>
          <pub-id pub-id-type="doi">10.1016/0005-7916(94)90063-9</pub-id>
          <pub-id pub-id-type="medline">7962581</pub-id>
          <pub-id pub-id-type="pii">0005-7916(94)90063-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Friston</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Daunizeau</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kilner</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kiebel</surname>
              <given-names>SJ</given-names>
            </name>
          </person-group>
          <article-title>Action and behavior: a free-energy formulation</article-title>
          <source>Biol Cybern</source>
          <year>2010</year>
          <month>03</month>
          <volume>102</volume>
          <issue>3</issue>
          <fpage>227</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1007/s00422-010-0364-z</pub-id>
          <pub-id pub-id-type="medline">20148260</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aymerich-Franch</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kishore</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Slater</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>When your robot avatar misbehaves you are likely to apologize: an exploration of guilt during robot embodiment</article-title>
          <source>Int J Soc Robot</source>
          <year>2019</year>
          <month>5</month>
          <day>9</day>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>217</fpage>
          <lpage>226</lpage>
          <pub-id pub-id-type="doi">10.1007/s12369-019-00556-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Klemchuk</surname>
              <given-names>HP</given-names>
            </name>
            <name name-style="western">
              <surname>Hutchinson</surname>
              <given-names>CB</given-names>
            </name>
            <name name-style="western">
              <surname>Frank</surname>
              <given-names>RI</given-names>
            </name>
          </person-group>
          <article-title>Body dissatisfaction and eating-related problems on the college campus: usefulness of the Eating Disorder Inventory with a nonclinical population</article-title>
          <source>J Couns Psychol</source>
          <year>1990</year>
          <month>07</month>
          <volume>37</volume>
          <issue>3</issue>
          <fpage>297</fpage>
          <lpage>305</lpage>
          <pub-id pub-id-type="doi">10.1037/0022-0167.37.3.297</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Muñoz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mehrabi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Basharat</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Middleton</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Barnett-Cowan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Boger</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Immersive virtual reality exergames for persons living with dementia: user-centered design study as a multistakeholder team during the COVID-19 pandemic</article-title>
          <source>JMIR Serious Games</source>
          <year>2022</year>
          <month>01</month>
          <day>19</day>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>e29987</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2022/1/e29987/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/29987</pub-id>
          <pub-id pub-id-type="medline">35044320</pub-id>
          <pub-id pub-id-type="pii">v10i1e29987</pub-id>
          <pub-id pub-id-type="pmcid">PMC8772876</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nijman</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Veling</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Greaves-Lord</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Vos</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zandee</surname>
              <given-names>CER</given-names>
            </name>
            <name name-style="western">
              <surname>Aan Het Rot</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Geraets</surname>
              <given-names>CNW</given-names>
            </name>
            <name name-style="western">
              <surname>Pijnenborg</surname>
              <given-names>GHM</given-names>
            </name>
          </person-group>
          <article-title>Dynamic Interactive Social Cognition Training in Virtual Reality (DiSCoVR) for people with a psychotic disorder: single-group feasibility and acceptability study</article-title>
          <source>JMIR Ment Health</source>
          <year>2020</year>
          <month>08</month>
          <day>07</day>
          <volume>7</volume>
          <issue>8</issue>
          <fpage>e17808</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mental.jmir.org/2020/8/e17808/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/17808</pub-id>
          <pub-id pub-id-type="medline">32763880</pub-id>
          <pub-id pub-id-type="pii">v7i8e17808</pub-id>
          <pub-id pub-id-type="pmcid">PMC7442939</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goudman</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Jansen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Billot</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vets</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>De Smedt</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Roulaud</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rigoard</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Moens</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality applications in chronic pain management: systematic review and meta-analysis</article-title>
          <source>JMIR Serious Games</source>
          <year>2022</year>
          <month>05</month>
          <day>10</day>
          <volume>10</volume>
          <issue>2</issue>
          <fpage>e34402</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2022/2/e34402/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/34402</pub-id>
          <pub-id pub-id-type="medline">35536641</pub-id>
          <pub-id pub-id-type="pii">v10i2e34402</pub-id>
          <pub-id pub-id-type="pmcid">PMC9131143</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
