<?xml version="1.0"?>
<dblpperson name="Gargi Vaidya" pid="313/9459" n="2">
<person key="homepages/313/9459" mdate="2022-02-18">
<author pid="313/9459">Gargi Vaidya</author>
</person>
<r><inproceedings key="conf/iclr/RengarajanVSKS22" mdate="2022-08-20">
<author pid="218/1345">Desik Rengarajan</author>
<author pid="313/9459">Gargi Vaidya</author>
<author pid="313/9184">Akshay Sarvesh</author>
<author pid="44/8356">Dileep M. Kalathil</author>
<author pid="03/353">Srinivas Shakkottai</author>
<title>Reinforcement Learning with Sparse Rewards using Guidance from Offline Demonstration.</title>
<year>2022</year>
<booktitle>ICLR</booktitle>
<ee type="oa">https://openreview.net/forum?id=YJ1WzgMVsMt</ee>
<crossref>conf/iclr/2022</crossref>
<url>db/conf/iclr/iclr2022.html#RengarajanVSKS22</url>
</inproceedings>
</r>
<r><article publtype="informal" key="journals/corr/abs-2202-04628" mdate="2022-02-18">
<author pid="218/1345">Desik Rengarajan</author>
<author pid="313/9459">Gargi Vaidya</author>
<author pid="313/9184">Akshay Sarvesh</author>
<author pid="44/8356">Dileep M. Kalathil</author>
<author pid="03/353">Srinivas Shakkottai</author>
<title>Reinforcement Learning with Sparse Rewards using Guidance from Offline Demonstration.</title>
<year>2022</year>
<volume>abs/2202.04628</volume>
<journal>CoRR</journal>
<ee type="oa">https://arxiv.org/abs/2202.04628</ee>
<url>db/journals/corr/corr2202.html#abs-2202-04628</url>
</article>
</r>
<coauthors n="4" nc="1">
<co c="0"><na f="k/Kalathil:Dileep_M=" pid="44/8356">Dileep M. Kalathil</na></co>
<co c="0"><na f="r/Rengarajan:Desik" pid="218/1345">Desik Rengarajan</na></co>
<co c="0"><na f="s/Sarvesh:Akshay" pid="313/9184">Akshay Sarvesh</na></co>
<co c="0"><na f="s/Shakkottai:Srinivas" pid="03/353">Srinivas Shakkottai</na></co>
</coauthors>
</dblpperson>

