<?xml version="1.0" encoding="utf-8" standalone="yes" ?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>Yue Hu</title>
    <link>https://yuehu.github.io/</link>
      <atom:link href="https://yuehu.github.io/index.xml" rel="self" type="application/rss+xml" />
    <description>Yue Hu</description>
    <generator>Wowchemy (https://wowchemy.com)</generator><language>en-us</language><lastBuildDate>Sun, 15 Mar 2026 00:00:00 +0000</lastBuildDate>
    
    
    <item>
      <title>Pragmatic Communication in Multi-Agent Collaborative Perception [TPAMI 2026]</title>
      <link>https://yuehu.github.io/publication/hu-pragcomm-2024/</link>
      <pubDate>Sun, 15 Mar 2026 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-pragcomm-2024/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Rate-Distortion Optimized Communication for Collaborative Perception [ICLR 2026]</title>
      <link>https://yuehu.github.io/publication/liu-rdcomm-2026/</link>
      <pubDate>Sat, 31 Jan 2026 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/liu-rdcomm-2026/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Unveiling the Impact of Data and Model Scaling on High-Level Control for Humanoid Robots [ICRA 2026]</title>
      <link>https://yuehu.github.io/publication/wei-humanmotion-2026/</link>
      <pubDate>Sat, 31 Jan 2026 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/wei-humanmotion-2026/</guid>
      <description></description>
    </item>
    
    <item>
      <title>BeliefMapNav: 3D voxel-based belief map for zero-shot object navigation [NeurIPS 2025]</title>
      <link>https://yuehu.github.io/publication/zhou-beleifmap-2025/</link>
      <pubDate>Thu, 18 Sep 2025 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/zhou-beleifmap-2025/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Imaginative world modeling with scene graphs for embodied agent navigation [ICCV 2025 Demo Track]</title>
      <link>https://yuehu.github.io/publication/hu-sgimaginenav-2025/</link>
      <pubDate>Thu, 18 Sep 2025 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-sgimaginenav-2025/</guid>
      <description></description>
    </item>
    
    <item>
      <title>NegoCollab: A Common Representation Negotiation Approach for Heterogeneous Collaborative Perception [NeurIPS 2025]</title>
      <link>https://yuehu.github.io/publication/shao-negocolla-2025/</link>
      <pubDate>Thu, 18 Sep 2025 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/shao-negocolla-2025/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Self-Evolving Multi-Agent Collaboration Networks for Software Development [ICLR 2025]</title>
      <link>https://yuehu.github.io/publication/hu-evomac-2025/</link>
      <pubDate>Sat, 01 Mar 2025 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-evomac-2025/</guid>
      <description></description>
    </item>
    
    <item>
      <title>An Extensible Framework for Open Heterogeneous Collaborative Perception [ICLR 2024]</title>
      <link>https://yuehu.github.io/publication/lu-heal-2024/</link>
      <pubDate>Mon, 01 Jan 2024 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/lu-heal-2024/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Communication-Efficient Collaborative Perception via Information Filling with Codebook [CVPR 2024]</title>
      <link>https://yuehu.github.io/publication/hu-codefilling-2024/</link>
      <pubDate>Mon, 01 Jan 2024 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-codefilling-2024/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Towards Collaborative Autonomous Driving Simulation Platform and End-to-End System [TPAMI 2025]</title>
      <link>https://yuehu.github.io/publication/liu-v2xverse-2025/</link>
      <pubDate>Mon, 01 Jan 2024 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/liu-v2xverse-2025/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Collaboration Helps Camera Overtake LiDAR in 3D Detection [CVPR 2023]</title>
      <link>https://yuehu.github.io/publication/hu-coca3d-2023/</link>
      <pubDate>Thu, 22 Jun 2023 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-coca3d-2023/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Aerial monocular 3d object detection [RAL 2023]</title>
      <link>https://yuehu.github.io/publication/hu-aerial-2023/</link>
      <pubDate>Mon, 16 Jan 2023 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-aerial-2023/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Asynchrony-Robust Collaborative Perception via Bird&#39;s Eye View Flow [NeurIPS 2023]</title>
      <link>https://yuehu.github.io/publication/wei-cobevflow-2023/</link>
      <pubDate>Sun, 01 Jan 2023 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/wei-cobevflow-2023/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Category Query Learning for Human-Object Interaction Classification [CVPR 2023]</title>
      <link>https://yuehu.github.io/publication/xie-hoi-2023/</link>
      <pubDate>Sun, 01 Jan 2023 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/xie-hoi-2023/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Where2comm is reported by 机器之心</title>
      <link>https://yuehu.github.io/post/news-where2comm/</link>
      <pubDate>Wed, 12 Oct 2022 22:17:11 +0800</pubDate>
      <guid>https://yuehu.github.io/post/news-where2comm/</guid>
      <description>&lt;p&gt;&lt;a href=&#34;https://github.com/MediaBrain-SJTU/where2comm&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Where2comm&lt;/a&gt; is reported by 机器之心: &lt;a href=&#34;https://www.jiqizhixin.com/articles/2022-10-11-28&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;《将通信带宽降低至十万分之一，NeurIPS 2022论文提出新一代协作感知方法
》&lt;/a&gt;!&lt;/p&gt;
&lt;p&gt;















&lt;figure  &gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img alt=&#34;&#34; srcset=&#34;
               /post/news-where2comm/images/news_hu68c25106f1ad4bc241056b9445dfbc5b_662021_8d9dccd25c32ebac1fa9126c2f743518.webp 400w,
               /post/news-where2comm/images/news_hu68c25106f1ad4bc241056b9445dfbc5b_662021_84b8f7ca79302c3e95a34cb8770c8db3.webp 760w,
               /post/news-where2comm/images/news_hu68c25106f1ad4bc241056b9445dfbc5b_662021_1200x1200_fit_q75_h2_lanczos.webp 1200w&#34;
               src=&#34;https://yuehu.github.io/post/news-where2comm/images/news_hu68c25106f1ad4bc241056b9445dfbc5b_662021_8d9dccd25c32ebac1fa9126c2f743518.webp&#34;
               width=&#34;631&#34;
               height=&#34;760&#34;
               loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;
&lt;/p&gt;</description>
    </item>
    
    <item>
      <title>Opening positions</title>
      <link>https://yuehu.github.io/post/recruit-22-fall/</link>
      <pubDate>Wed, 12 Oct 2022 17:22:11 +0800</pubDate>
      <guid>https://yuehu.github.io/post/recruit-22-fall/</guid>
      <description>&lt;h2 id=&#34;一平台介绍&#34;&gt;一、平台介绍&lt;/h2&gt;
&lt;p&gt;上海人工智能实验室联合上海交通大学面向海内外公开招聘机器学习、计算机视觉的博士后研究人员若干名，推进群体智能相关领域的研究，助力无人集群、 车路协同、智慧医疗等应用的发展与落地。优厚待遇国际领先。欢迎感兴趣的研究人员应聘。&lt;/p&gt;
&lt;p&gt;















&lt;figure  &gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img src=&#34;./images/img-1.png&#34; alt=&#34;&#34; loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;
&lt;/p&gt;
&lt;p&gt;上海人工智能实验室是我国人工智能领域的新型科研机构，开展战略性、原创性、前瞻性的科学研究与技术攻关，突破人工智能的重要基础理论和关键核心 技术，打造“突破型、引领型、平台型”一体化的大型综合性研究基地，支撑我 国人工智能产业实现跨越式发展，目标建成国际一流的人工智能实验室，成为享 誉全球的人工智能原创理论和技术的策源地。实验室网址：www.shlab.org.cn&lt;/p&gt;
&lt;p&gt;上海交通大学是我国历史最悠久的高等学府之一，教育部直属、上海市共建、 中央直管的全国重点大学，位列国家“双一流”、“985 工程”、“211 工程”， 为九校联盟、环太平洋大学联盟、21 世纪学术联盟、中国大学校长联谊会、国际 应用科技开发协作网、新工科教育国际联盟成员，入选“珠峰计划”、“强基计划”、“111 计划”、“2011 计划”、卓越工程师教育培养计划等。学校网址： &lt;a href=&#34;https://www.sjtu.edu.cn/&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;www.sjtu.edu.cn/&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;















&lt;figure  &gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img src=&#34;./images/img-2.jpg&#34; alt=&#34;&#34; loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;
&lt;/p&gt;
&lt;h2 id=&#34;二岗位职责&#34;&gt;二、岗位职责&lt;/h2&gt;
&lt;ol&gt;
&lt;li&gt;研究业界前沿算法，包括但不限于多智能体系统、图机器学习、联邦学习、3D 感知、数据/模型压缩；&lt;/li&gt;
&lt;li&gt;负责开源项目的开发和维护，开发新的算法框架，实现 SOTA 算法，提升用户体验和项目影响力；&lt;/li&gt;
&lt;li&gt;协助指导博士研究生及实习生的研究工作；&lt;/li&gt;
&lt;li&gt;参与申报各类科研项目。&lt;/li&gt;
&lt;/ol&gt;
&lt;h2 id=&#34;三申请条件&#34;&gt;三、申请条件&lt;/h2&gt;
&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;计算机、人工智能相关专业博士学位；&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;深入了解机器学习和计算机视觉的一个或多个方向，以一作身份在顶会或顶刊发表过高质量论文；&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;熟练掌握 Python 和 PyTorch，有良好的工程实现能力；&lt;/p&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;熟悉 C++、CUDA、ROS，在技术社区有影响力，曾参与贡献知名开源项目者 优先&lt;/p&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;h2 id=&#34;四团队简介&#34;&gt;四、团队简介&lt;/h2&gt;
&lt;p&gt;王延峰教授，博士生导师，现任上海人工智能实验室主任助理、全球高校人工智能学术联盟秘书长。国家发改委人工智能专家委员会委员、科技部科技创新 2030“新一代人工智能”重大项目指南专家组成员。主要研究方向为人工智能、 智慧医疗、新兴信息技术商业应用。&lt;/p&gt;
&lt;p&gt;陈思衡，博士生导师，现任上海交通大学电子信息与电气工程学院未来媒体 网络协同创新中心副教授，上海人工智能实验室双聘青年科学家，美国卡内基梅隆大学博士，入选国家级人才计划青年项目。研究方向为群体智能，图机器学习，联邦学习，自动驾驶。&lt;/p&gt;
&lt;h2 id=&#34;五应聘方式&#34;&gt;五、应聘方式&lt;/h2&gt;
&lt;p&gt;将详细的个人简历，包括教育背景、工作经历、成果情况及联系方式发送至邮箱：sihengc@sjtu.edu.cn ,邮件标题注明“博士后应聘_姓名”。&lt;/p&gt;
</description>
    </item>
    
    <item>
      <title>Latency-aware collaborative perception [ECCV 2022]</title>
      <link>https://yuehu.github.io/publication/lei-syncnet-2022/</link>
      <pubDate>Sat, 01 Oct 2022 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/lei-syncnet-2022/</guid>
      <description></description>
    </item>
    
    <item>
      <title>News</title>
      <link>https://yuehu.github.io/news/</link>
      <pubDate>Sat, 01 Oct 2022 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/news/</guid>
      <description>








&lt;p&gt;&lt;strong&gt;[Mar. 2026]&lt;/strong&gt; &lt;a href=&#34;https://arxiv.org/abs/2401.12694&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;PragComm&lt;/a&gt; is accepted by TPAMI 2026&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Feb. 2026]&lt;/strong&gt; &lt;a href=&#34;https://arxiv.org/abs/2511.09241&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Humanoid-Union&lt;/a&gt; is accepted by ICRA 2026&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Jan. 2026]&lt;/strong&gt; &lt;a href=&#34;https://arxiv.org/abs/2509.21994&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;RDcomm&lt;/a&gt; is accepted by ICLR 2026&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Sep. 2025]&lt;/strong&gt; &lt;a href=&#34;https://arxiv.org/abs/2506.06487&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;BeliefMapNav&lt;/a&gt; and &lt;a href=&#34;https://neurips.cc/virtual/2025/poster/119565&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;NegoCollab&lt;/a&gt; is accepted by NeurIPS 2025&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Aug. 2025]&lt;/strong&gt; &lt;a href=&#34;https://arxiv.org/abs/2508.06990&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;SGImagineNav&lt;/a&gt; is accepted by ICCV Demo Track 2025&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Apr. 2025]&lt;/strong&gt; &lt;a href=&#34;https://arxiv.org/abs/2404.09496&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;V2XVerse&lt;/a&gt; is accepted by TPAMI 2025&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Mar. 2025]&lt;/strong&gt; &lt;a href=&#34;https://mas-2025.github.io/MAS-2025/&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;MAS&lt;/a&gt; workshop proposal is accepted by ICML 2025&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Feb. 2025]&lt;/strong&gt; &lt;a href=&#34;https://openreview.net/forum?id=4R71pdPBZp&amp;amp;noteId=VXbq0eSSQc&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;EvoMAC&lt;/a&gt; is accepted by ICLR 2025&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Feb. 2024]&lt;/strong&gt; &lt;a href=&#34;https://openreview.net/forum?id=VFYX0SS8je&amp;amp;noteId=xrRordmNUG&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;CodeFilling&lt;/a&gt; is accepted by CVPR 2024&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Jan. 2024]&lt;/strong&gt; &lt;a href=&#34;https://openreview.net/pdf?id=KkrDUGIASk&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;HEAL&lt;/a&gt; is accepted by ICLR 2024&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Nov. 2023]&lt;/strong&gt; &lt;a href=&#34;https://openreview.net/pdf?id=UHIDdtxmVS&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;CoBEVFlow&lt;/a&gt; is accepted by NeurIPS 2023&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Apr. 2023]&lt;/strong&gt; &lt;a href=&#34;https://github.com/MediaBrain-SJTU/CoCa3D&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;CoCa3D&lt;/a&gt; is reported by &lt;a href=&#34;https://www.jiqizhixin.com/articles/2023-04-25-11&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;机器之心&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Mar. 2023]&lt;/strong&gt; Two papers are accepted to CVPR 2023&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Jan. 2023]&lt;/strong&gt; &lt;a href=&#34;https://arxiv.org/abs/2208.03974&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Aerial Monocular 3D Object Detection&lt;/a&gt; is accepted to IEEE Robotics and Automation Letters&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Oct. 2022]&lt;/strong&gt; &lt;a href=&#34;https://github.com/MediaBrain-SJTU/where2comm&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Where2comm&lt;/a&gt; is reported by &lt;a href=&#34;https://www.jiqizhixin.com/articles/2022-10-11-28&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;机器之心&lt;/a&gt; and &lt;a href=&#34;https://www.techbeat.net/talk-info?id=727&amp;amp;utm_campaign=1103%E8%83%A1%E6%82%A6&amp;amp;utm_medium=%E7%9F%A5%E4%B9%8E&amp;amp;utm_source=%E7%9F%A5%E4%B9%8E&amp;amp;gio_link_id=EoZAjykP&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;将门创投&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;[Sep. 2022]&lt;/strong&gt; One paper is accepted to NeurIPS 2022 Spotlight(&lt;strong&gt;Top5%&lt;/strong&gt;)&lt;/p&gt;

</description>
    </item>
    
    <item>
      <title>Where2comm: Communication-efficient collaborative perception via spatial confidence maps [NeurIPS 2022 Spotlight]</title>
      <link>https://yuehu.github.io/publication/hu-where-2-comm-2022/</link>
      <pubDate>Thu, 15 Sep 2022 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-where-2-comm-2022/</guid>
      <description></description>
    </item>
    
    <item>
      <title>We are hiring !</title>
      <link>https://yuehu.github.io/post/recruit-22-spring/</link>
      <pubDate>Wed, 04 May 2022 19:53:28 +0800</pubDate>
      <guid>https://yuehu.github.io/post/recruit-22-spring/</guid>
      <description>&lt;p&gt;MediaBrain陈思衡老师 招【2023/24级】直博/直硕生啦！&lt;/p&gt;
&lt;p&gt;陈思衡老师是未来媒体网络协同创新中心的长聘轨副教授，2019年国家重大人才工程青年项目入选者！
陈老师在CMU获得博士学位，博士后完成后，在Uber Advanced Technologies Group、三菱电机实验室MERL担任Research Scientist，至今在TPAMI、TIP、NeurIPS（oral）、CVPR （oral）、AAAI （oral）、ICLR上发表了50余篇论文，Google Scholar引用2000余次，获得过IEEE信号处理协会最佳年轻作者论文奖！学界+业界影响力Max！&lt;/p&gt;
&lt;p&gt;陈老师与海外各高校包括（CMU、Oxford、…）以及国内外各企业（Uber、滴滴、…）都有着很强的connection，可以推荐你到世界顶尖的平台学习交流！&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;如果你数学与编程功底扎实，踏实好学，对计算机视觉与机器学习有一定的了解；&lt;/li&gt;
&lt;li&gt;如果你想做【一流的学术】工作、在国际顶级期刊和会议发表【高水平 &amp;amp; 有学术影响力】的论文；&lt;/li&gt;
&lt;li&gt;如果你对图信号处理、图神经网络、无人系统等领域感兴趣，想要探索计算机视觉、机器学习和自动驾驶的问题；&lt;/li&gt;
&lt;li&gt;如果对科研有热情，希望能够加入一个积极向上、开放包容、有激情有理想地探索科学的、顶级的科研氛围与团队；
那么欢迎你加入我们！&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;加入我们，你会接受系统的科研工作练习，陈老师与学长学姐带你快速适应科研节奏，参与到我们正在开展的工作中，有机会在一年做出优质的科研工作并发表顶会论文！&lt;/p&gt;
&lt;p&gt;请有意向的同学发送简历至：sihengc[AT]sjtu[DOT]edu[DOT]cn&lt;/p&gt;
&lt;p&gt;也欢迎朋友们帮忙转发，期待大家共同进步！&lt;/p&gt;
</description>
    </item>
    
    <item>
      <title>Coperception-UAV Dataset</title>
      <link>https://yuehu.github.io/dataset/coperception-uav/</link>
      <pubDate>Tue, 19 Apr 2022 20:07:44 +0800</pubDate>
      <guid>https://yuehu.github.io/dataset/coperception-uav/</guid>
      <description>

&lt;details class=&#34;toc-inpage d-print-none  &#34; open&gt;
  &lt;summary class=&#34;font-weight-bold&#34;&gt;Table of Contents&lt;/summary&gt;
  &lt;nav id=&#34;TableOfContents&#34;&gt;
  &lt;ul&gt;
    &lt;li&gt;&lt;a href=&#34;#about-coperception-uav-dataset&#34;&gt;About Coperception-UAV Dataset&lt;/a&gt;&lt;/li&gt;
    &lt;li&gt;&lt;a href=&#34;#simulation-setting&#34;&gt;Simulation Setting&lt;/a&gt;
      &lt;ul&gt;
        &lt;li&gt;&lt;a href=&#34;#swarm-arrangement&#34;&gt;Swarm arrangement&lt;/a&gt;&lt;/li&gt;
        &lt;li&gt;&lt;a href=&#34;#sensor-setup&#34;&gt;Sensor Setup&lt;/a&gt;&lt;/li&gt;
      &lt;/ul&gt;
    &lt;/li&gt;
    &lt;li&gt;&lt;a href=&#34;#data&#34;&gt;Data&lt;/a&gt;&lt;/li&gt;
    &lt;li&gt;&lt;a href=&#34;#usage&#34;&gt;Usage&lt;/a&gt;&lt;/li&gt;
    &lt;li&gt;&lt;a href=&#34;#citation&#34;&gt;Citation&lt;/a&gt;&lt;/li&gt;
  &lt;/ul&gt;
&lt;/nav&gt;
&lt;/details&gt;

&lt;h2 id=&#34;about-coperception-uav-dataset&#34;&gt;About Coperception-UAV Dataset&lt;/h2&gt;
&lt;!-- 










  





&lt;video controls  &gt;
  &lt;source src=&#34;https://yuehu.github.io/dataset/coperception-uav/images/vis.mp4&#34; type=&#34;video/mp4&#34;&gt;
&lt;/video&gt;
 --&gt;
&lt;p&gt;















&lt;figure  &gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img src=&#34;images/vis.mp4&#34; alt=&#34;&#34; loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;
&lt;/p&gt;
&lt;p&gt;Coperception-UAV is the first comprehensive dataset for UAV-based collaborative perception.&lt;/p&gt;
&lt;p&gt;A UAV swarm has the potential to distribute tasks and achieve better, faster, and more robust performances than a single UAV. To realize this, we need to integrate collaboration ability into the entire pipeline, including perception, planning, control. Among those tasks, collaborative perception enables holistic scene understanding from multiple perspectives via the collaboration of multiple UAVs, which could fundamentally resolve the occlusion issue and the long-range issue in the traditional single-agent perception. Recently, planning and control of a UAV swarm have been intensively studied; however, the collaborative perception remains under-explored due to the lack of a comprehensive dataset. This work aims to fill this gap and proposes a collaborative perception dataset for UAV swarm.&lt;/p&gt;
&lt;p&gt;Based on the co-simulation platform of AirSim and CARLA, our dataset consists of 131.9k synchronous images collected from 5 coordinated UAVs flying at 3 altitudes over 3 simulated towns with 2 swarm formations. Each image is fully annotated with the pixel-wise semantic segmentation labels and 2D/3D bounding boxes of vehicles. We further build a benchmark on the proposed dataset by evaluating a variety of related multi-agent collaborative methods on multiple perception tasks, including object detection, semantic segmentation, and bird’seye-view (BEV) semantic segmentation.&lt;/p&gt;
&lt;h2 id=&#34;simulation-setting&#34;&gt;Simulation Setting&lt;/h2&gt;
&lt;p&gt;Our proposed dataset is collected by the co-simulation of CARLA and AirSim. We use CARLA to generate complex simulation scenes and traffic flow; and use AirSim to simulate UAV swarm flying in the scene. The flight route of UAVs is controlled by AirSim and sample data are collected randomly at about 4-second intervals.&lt;/p&gt;
&lt;h3 id=&#34;swarm-arrangement&#34;&gt;Swarm arrangement&lt;/h3&gt;
&lt;p&gt;

















&lt;figure  id=&#34;figure-formation&#34;&gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img alt=&#34;&#34; srcset=&#34;
               /dataset/coperception-uav/images/formation_hu92eb0a8a8422730f2995c25e85a3fd24_207658_ecdf1c7b13f8354e724f1206151c59bd.webp 400w,
               /dataset/coperception-uav/images/formation_hu92eb0a8a8422730f2995c25e85a3fd24_207658_293de8e7d8ffe92841bcf16d27ff9636.webp 760w,
               /dataset/coperception-uav/images/formation_hu92eb0a8a8422730f2995c25e85a3fd24_207658_1200x1200_fit_q75_h2_lanczos_3.webp 1200w&#34;
               src=&#34;https://yuehu.github.io/dataset/coperception-uav/images/formation_hu92eb0a8a8422730f2995c25e85a3fd24_207658_ecdf1c7b13f8354e724f1206151c59bd.webp&#34;
               width=&#34;714&#34;
               height=&#34;329&#34;
               loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;

The UAV swarm moves and executes tasks in the three-dimensional space, where the situation could be much more complex than those of vehicles or roadside units. In the dataset, two main factors are taken into consideration that may affect the perception
and collaboration patterns of UAV swarms: flight formation and altitude. Each UAV swarm consists of 5 UAVs. We arrange two types of formation modes for a UAV swarm: discipline mode, where all 5 UAVs keeps a consistent and relatively static array, and dynamic mode, where each UAV navigates independently in the scene. The former simulates the situation where the swarm of UAVs is executing a same specific task such as exploring an unknown area, search and rescue; while the latter simulates the monitoring and patrolling tasks in the city.&lt;/p&gt;
&lt;h3 id=&#34;sensor-setup&#34;&gt;Sensor Setup&lt;/h3&gt;
&lt;p&gt;















&lt;figure  &gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img src=&#34;./images/sensor.png&#34; alt=&#34;&#34; loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;
&lt;/p&gt;
&lt;p&gt;In the UAV swarm, Each UAV is equipped with 5 RGB cameras in 5 directions and 5 semantic cameras collecting semantic ground truth for RGB cameras.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;90° horizontal FoV&lt;/li&gt;
&lt;li&gt;1 bird’s eye view camera and 4 cameras facing forward, backward, right, and left with a pitch degree of −45°&lt;/li&gt;
&lt;li&gt;image size: 800x450 pixels&lt;/li&gt;
&lt;/ul&gt;
&lt;h2 id=&#34;data&#34;&gt;Data&lt;/h2&gt;
&lt;p&gt;Fully-annotated data are provided in the dataset, including synchronous images with pixel-wise semantic labels, 2D &amp;amp; 3D bounding boxes of vehicles, and BEV semantic map.&lt;/p&gt;
&lt;p&gt;















&lt;figure  &gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img src=&#34;./images/sample.png&#34; alt=&#34;&#34; loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;
&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Camera data&lt;/strong&gt; We collect synchronous images from all cameras on 5 UAVs, which is 25 images in a sample. In total, 123.8K images are collected for the discipline swarm mode and 8.1K for the dynamic swarm mode. We provide semantic label for each image.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Bounding boxes&lt;/strong&gt; 3D bounding boxes of vehicles are recorded at the same moment with images, including location (x, y, z), rotation (w, x, y, z in quaternion) in the global coordinate and their length, width and height. To specifically address the occlusion issue, we also provide a binary label for the occlusion status of each bounding box.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;BEV semantic label&lt;/strong&gt; We provide BEV segmentation labels of four categories: roadway, building, vehicle, and others, which are the key elements to construct the layout of a city and foreground objects. The resolution of the BEV map is 0.25m×0.25m.&lt;/p&gt;
&lt;h2 id=&#34;usage&#34;&gt;Usage&lt;/h2&gt;
&lt;p&gt;















&lt;figure  &gt;
  &lt;div class=&#34;d-flex justify-content-center&#34;&gt;
    &lt;div class=&#34;w-100&#34; &gt;&lt;img src=&#34;./images/tutorial.png&#34; alt=&#34;&#34; loading=&#34;lazy&#34; data-zoomable /&gt;&lt;/div&gt;
  &lt;/div&gt;&lt;/figure&gt;
&lt;/p&gt;
&lt;p&gt;The dataset is organized in a similar way with the widelyused autonomous driving dataset, nuScenes; so it can be used directly with the well-established nuScenes-devkit.&lt;/p&gt;
&lt;h2 id=&#34;citation&#34;&gt;Citation&lt;/h2&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;To Be Done
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;</description>
    </item>
    
    <item>
      <title>End-to-end human object interaction detection with hoi transformer [CVPR 2021]</title>
      <link>https://yuehu.github.io/publication/zou-hoi-2021/</link>
      <pubDate>Fri, 01 Jan 2021 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/zou-hoi-2021/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Collaborative motion prediction via neural motion message passing [CVPR 2020 Oral]</title>
      <link>https://yuehu.github.io/publication/hu-nmmp-2020/</link>
      <pubDate>Wed, 01 Jan 2020 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-nmmp-2020/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Neural message passing for visual relationship detection [ICML 2019 Workshop Spotlight]</title>
      <link>https://yuehu.github.io/publication/hu-vrd-2019/</link>
      <pubDate>Sat, 01 Jun 2019 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/publication/hu-vrd-2019/</guid>
      <description></description>
    </item>
    
    <item>
      <title>Slides</title>
      <link>https://yuehu.github.io/slides/example/</link>
      <pubDate>Tue, 05 Feb 2019 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/slides/example/</guid>
      <description>&lt;h1 id=&#34;create-slides-in-markdown-with-wowchemy&#34;&gt;Create slides in Markdown with Wowchemy&lt;/h1&gt;
&lt;p&gt;&lt;a href=&#34;https://wowchemy.com/&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Wowchemy&lt;/a&gt; | &lt;a href=&#34;https://wowchemy.com/docs/content/slides/&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Documentation&lt;/a&gt;&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;features&#34;&gt;Features&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;Efficiently write slides in Markdown&lt;/li&gt;
&lt;li&gt;3-in-1: Create, Present, and Publish your slides&lt;/li&gt;
&lt;li&gt;Supports speaker notes&lt;/li&gt;
&lt;li&gt;Mobile friendly slides&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;h2 id=&#34;controls&#34;&gt;Controls&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;Next: &lt;code&gt;Right Arrow&lt;/code&gt; or &lt;code&gt;Space&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Previous: &lt;code&gt;Left Arrow&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Start: &lt;code&gt;Home&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Finish: &lt;code&gt;End&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Overview: &lt;code&gt;Esc&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Speaker notes: &lt;code&gt;S&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Fullscreen: &lt;code&gt;F&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;Zoom: &lt;code&gt;Alt + Click&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://revealjs.com/pdf-export/&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;PDF Export&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;h2 id=&#34;code-highlighting&#34;&gt;Code Highlighting&lt;/h2&gt;
&lt;p&gt;Inline code: &lt;code&gt;variable&lt;/code&gt;&lt;/p&gt;
&lt;p&gt;Code block:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-python&#34; data-lang=&#34;python&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;n&#34;&gt;porridge&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;=&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;blueberry&amp;#34;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;if&lt;/span&gt; &lt;span class=&#34;n&#34;&gt;porridge&lt;/span&gt; &lt;span class=&#34;o&#34;&gt;==&lt;/span&gt; &lt;span class=&#34;s2&#34;&gt;&amp;#34;blueberry&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;    &lt;span class=&#34;nb&#34;&gt;print&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;(&lt;/span&gt;&lt;span class=&#34;s2&#34;&gt;&amp;#34;Eating...&amp;#34;&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;)&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;hr&gt;
&lt;h2 id=&#34;math&#34;&gt;Math&lt;/h2&gt;
&lt;p&gt;In-line math: $x + y = z$&lt;/p&gt;
&lt;p&gt;Block math:&lt;/p&gt;
&lt;p&gt;$$
f\left( x \right) = ;\frac{{2\left( {x + 4} \right)\left( {x - 4} \right)}}{{\left( {x + 4} \right)\left( {x + 1} \right)}}
$$&lt;/p&gt;
&lt;hr&gt;
&lt;h2 id=&#34;fragments&#34;&gt;Fragments&lt;/h2&gt;
&lt;p&gt;Make content appear incrementally&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-fallback&#34; data-lang=&#34;fallback&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;{{% fragment %}} One {{% /fragment %}}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;{{% fragment %}} **Two** {{% /fragment %}}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;{{% fragment %}} Three {{% /fragment %}}
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;p&gt;Press &lt;code&gt;Space&lt;/code&gt; to play!&lt;/p&gt;
&lt;span class=&#34;fragment &#34; &gt;
  One
&lt;/span&gt;
&lt;span class=&#34;fragment &#34; &gt;
  &lt;strong&gt;Two&lt;/strong&gt;
&lt;/span&gt;
&lt;span class=&#34;fragment &#34; &gt;
  Three
&lt;/span&gt;
&lt;hr&gt;
&lt;p&gt;A fragment can accept two optional parameters:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;code&gt;class&lt;/code&gt;: use a custom style (requires definition in custom CSS)&lt;/li&gt;
&lt;li&gt;&lt;code&gt;weight&lt;/code&gt;: sets the order in which a fragment appears&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;h2 id=&#34;speaker-notes&#34;&gt;Speaker Notes&lt;/h2&gt;
&lt;p&gt;Add speaker notes to your presentation&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-markdown&#34; data-lang=&#34;markdown&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;{{% speaker_note %}}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;-&lt;/span&gt; Only the speaker can read these notes
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;k&#34;&gt;-&lt;/span&gt; Press &lt;span class=&#34;sb&#34;&gt;`S`&lt;/span&gt; key to view
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  {{% /speaker_note %}}
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;p&gt;Press the &lt;code&gt;S&lt;/code&gt; key to view the speaker notes!&lt;/p&gt;
&lt;aside class=&#34;notes&#34;&gt;
  &lt;ul&gt;
&lt;li&gt;Only the speaker can read these notes&lt;/li&gt;
&lt;li&gt;Press &lt;code&gt;S&lt;/code&gt; key to view&lt;/li&gt;
&lt;/ul&gt;

&lt;/aside&gt;
&lt;hr&gt;
&lt;h2 id=&#34;themes&#34;&gt;Themes&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;black: Black background, white text, blue links (default)&lt;/li&gt;
&lt;li&gt;white: White background, black text, blue links&lt;/li&gt;
&lt;li&gt;league: Gray background, white text, blue links&lt;/li&gt;
&lt;li&gt;beige: Beige background, dark text, brown links&lt;/li&gt;
&lt;li&gt;sky: Blue background, thin dark text, blue links&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;
&lt;ul&gt;
&lt;li&gt;night: Black background, thick white text, orange links&lt;/li&gt;
&lt;li&gt;serif: Cappuccino background, gray text, brown links&lt;/li&gt;
&lt;li&gt;simple: White background, black text, blue links&lt;/li&gt;
&lt;li&gt;solarized: Cream-colored background, dark green text, blue links&lt;/li&gt;
&lt;/ul&gt;
&lt;hr&gt;

&lt;section data-noprocess data-shortcode-slide
  
      
      data-background-image=&#34;/media/boards.jpg&#34;
  &gt;

&lt;h2 id=&#34;custom-slide&#34;&gt;Custom Slide&lt;/h2&gt;
&lt;p&gt;Customize the slide style and background&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-markdown&#34; data-lang=&#34;markdown&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;{{&lt;span class=&#34;p&#34;&gt;&amp;lt;&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;slide&lt;/span&gt; &lt;span class=&#34;na&#34;&gt;background-image&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;&amp;#34;/media/boards.jpg&amp;#34;&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;&amp;gt;&lt;/span&gt;}}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;{{&lt;span class=&#34;p&#34;&gt;&amp;lt;&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;slide&lt;/span&gt; &lt;span class=&#34;na&#34;&gt;background-color&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;&amp;#34;#0000FF&amp;#34;&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;&amp;gt;&lt;/span&gt;}}
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;{{&lt;span class=&#34;p&#34;&gt;&amp;lt;&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;slide&lt;/span&gt; &lt;span class=&#34;na&#34;&gt;class&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;=&lt;/span&gt;&lt;span class=&#34;s&#34;&gt;&amp;#34;my-style&amp;#34;&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;&amp;gt;&lt;/span&gt;}}
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;hr&gt;
&lt;h2 id=&#34;custom-css-example&#34;&gt;Custom CSS Example&lt;/h2&gt;
&lt;p&gt;Let&amp;rsquo;s make headers navy colored.&lt;/p&gt;
&lt;p&gt;Create &lt;code&gt;assets/css/reveal_custom.css&lt;/code&gt; with:&lt;/p&gt;
&lt;div class=&#34;highlight&#34;&gt;&lt;pre tabindex=&#34;0&#34; class=&#34;chroma&#34;&gt;&lt;code class=&#34;language-css&#34; data-lang=&#34;css&#34;&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;nc&#34;&gt;reveal&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;section&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;h1&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;nc&#34;&gt;reveal&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;section&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;h2&lt;/span&gt;&lt;span class=&#34;o&#34;&gt;,&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;.&lt;/span&gt;&lt;span class=&#34;nc&#34;&gt;reveal&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;section&lt;/span&gt; &lt;span class=&#34;nt&#34;&gt;h3&lt;/span&gt; &lt;span class=&#34;p&#34;&gt;{&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;  &lt;span class=&#34;k&#34;&gt;color&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;:&lt;/span&gt; &lt;span class=&#34;kc&#34;&gt;navy&lt;/span&gt;&lt;span class=&#34;p&#34;&gt;;&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;span class=&#34;line&#34;&gt;&lt;span class=&#34;cl&#34;&gt;&lt;span class=&#34;p&#34;&gt;}&lt;/span&gt;
&lt;/span&gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;hr&gt;
&lt;h1 id=&#34;questions&#34;&gt;Questions?&lt;/h1&gt;
&lt;p&gt;&lt;a href=&#34;https://discord.gg/z8wNYzb&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Ask&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;&lt;a href=&#34;https://wowchemy.com/docs/content/slides/&#34; target=&#34;_blank&#34; rel=&#34;noopener&#34;&gt;Documentation&lt;/a&gt;&lt;/p&gt;
</description>
    </item>
    
    <item>
      <title></title>
      <link>https://yuehu.github.io/admin/config.yml</link>
      <pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate>
      <guid>https://yuehu.github.io/admin/config.yml</guid>
      <description></description>
    </item>
    
  </channel>
</rss>
