diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml new file mode 100644 index 0000000..716b126 --- /dev/null +++ b/.github/workflows/gh-pages.yaml @@ -0,0 +1,76 @@ +# .github/workflows/gh-pages.yml + +name: GitHub Pages + +on: + workflow_dispatch: + push: + branches: + - page + paths: + - page/** + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build-page: + runs-on: ubuntu-latest + env: + HUGO_VERSION: 0.127.0 + concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Setup Hugo + uses: peaceiris/actions-hugo@v2 + with: + hugo-version: "latest" + extended: true + + - name: Setup Pages + id: pages + uses: actions/configure-pages@v3 + + - name: Build + working-directory: ./page + run: hugo --minify --gc + + # - name: Setup Pages + # id: pages + # uses: actions/configure-pages@v5 + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: ./page/public + + # - name: Deploy + # uses: peaceiris/actions-gh-pages@v3 + # if: ${{ github.ref == 'refs/heads/main' }} + # with: + # github_token: ${{ secrets.GITHUB_TOKEN }} + # publish_branch: gh-pages + # publish_dir: ./page/public + + deploy-page: # Deployment job + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build-page + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.gitignore b/.gitignore index 82f9275..90dc3d9 100644 --- a/.gitignore +++ b/.gitignore @@ -160,3 +160,13 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ + +# Hugo +# Generated files by hugo +/page/public/ +/page/resources/_gen/ +/page/assets/jsconfig.json +/page/hugo_stats.json + +# Temporary lock file while building +/page/.hugo_build.lock \ No newline at end of file diff --git a/README.md b/README.md index ee1e040..bcb9885 100644 --- a/README.md +++ b/README.md @@ -1 +1,15 @@ -# snap \ No newline at end of file +# snap + +1. `hugo.exe new site page` +2. `cd page` +3. `hugo.exe mod init github.com/moverserai/snap` +4. create `configs/_default/module.toml` + ```toml + [[imports]] + disable = false + path = "github.com/nunocoracao/blowfish/v2" + ``` +5. add `go` module to `go.mod` + ```go + require github.com/nunocoracao/blowfish/v2 v2.78.0 // indirect + ``` \ No newline at end of file diff --git a/page/archetypes/default.md b/page/archetypes/default.md new file mode 100644 index 0000000..c6f3fce --- /dev/null +++ b/page/archetypes/default.md @@ -0,0 +1,5 @@ ++++ +title = '{{ replace .File.ContentBaseName "-" " " | title }}' +date = {{ .Date }} +draft = true ++++ diff --git a/page/assets/img/ahmad-odeh-JhqhGfX_Wd8-unsplash.jpg b/page/assets/img/ahmad-odeh-JhqhGfX_Wd8-unsplash.jpg new file mode 100644 index 0000000..e46653d Binary files /dev/null and b/page/assets/img/ahmad-odeh-JhqhGfX_Wd8-unsplash.jpg differ diff --git a/page/assets/img/ahmad-odeh-TK_WT3dl2tw-unsplash.jpg b/page/assets/img/ahmad-odeh-TK_WT3dl2tw-unsplash.jpg new file mode 100644 index 0000000..6707212 Binary files /dev/null and b/page/assets/img/ahmad-odeh-TK_WT3dl2tw-unsplash.jpg differ diff --git a/page/assets/img/avatar.png b/page/assets/img/avatar.png new file mode 100644 index 0000000..6161f7b Binary files /dev/null and b/page/assets/img/avatar.png differ diff --git a/page/assets/img/javier-miranda-ltyHWSOXBKg-unsplash.jpg b/page/assets/img/javier-miranda-ltyHWSOXBKg-unsplash.jpg new file mode 100644 index 0000000..94127e2 Binary files /dev/null and b/page/assets/img/javier-miranda-ltyHWSOXBKg-unsplash.jpg differ diff --git a/page/assets/img/javier-miranda-mIg0GL63lFk-unsplash.jpg b/page/assets/img/javier-miranda-mIg0GL63lFk-unsplash.jpg new file mode 100644 index 0000000..c4a2b74 Binary files /dev/null and b/page/assets/img/javier-miranda-mIg0GL63lFk-unsplash.jpg differ diff --git a/page/assets/img/mario-verduzco-vua8NbYusGE-unsplash.jpg b/page/assets/img/mario-verduzco-vua8NbYusGE-unsplash.jpg new file mode 100644 index 0000000..6f42367 Binary files /dev/null and b/page/assets/img/mario-verduzco-vua8NbYusGE-unsplash.jpg differ diff --git a/page/assets/img/milad-fakurian-DjjaZybYx4I-unsplash.jpg b/page/assets/img/milad-fakurian-DjjaZybYx4I-unsplash.jpg new file mode 100644 index 0000000..7941c4a Binary files /dev/null and b/page/assets/img/milad-fakurian-DjjaZybYx4I-unsplash.jpg differ diff --git a/page/assets/img/moverse_logo_white.png b/page/assets/img/moverse_logo_white.png new file mode 100644 index 0000000..f4218a1 Binary files /dev/null and b/page/assets/img/moverse_logo_white.png differ diff --git a/page/assets/img/sebastian-svenson-LpbyDENbQQg-unsplash.jpg b/page/assets/img/sebastian-svenson-LpbyDENbQQg-unsplash.jpg new file mode 100644 index 0000000..da8be0b Binary files /dev/null and b/page/assets/img/sebastian-svenson-LpbyDENbQQg-unsplash.jpg differ diff --git a/page/assets/img/shubham-dhage-5Sh0zR_mYQ4-unsplash.jpg b/page/assets/img/shubham-dhage-5Sh0zR_mYQ4-unsplash.jpg new file mode 100644 index 0000000..49f394f Binary files /dev/null and b/page/assets/img/shubham-dhage-5Sh0zR_mYQ4-unsplash.jpg differ diff --git a/page/assets/img/shubham-dhage-5Za8lloGtCg-unsplash.jpg b/page/assets/img/shubham-dhage-5Za8lloGtCg-unsplash.jpg new file mode 100644 index 0000000..374192c Binary files /dev/null and b/page/assets/img/shubham-dhage-5Za8lloGtCg-unsplash.jpg differ diff --git a/page/assets/img/shubham-dhage-Jkloa4CKWZs-unsplash.jpg b/page/assets/img/shubham-dhage-Jkloa4CKWZs-unsplash.jpg new file mode 100644 index 0000000..0d7d7b7 Binary files /dev/null and b/page/assets/img/shubham-dhage-Jkloa4CKWZs-unsplash.jpg differ diff --git a/page/assets/img/shubham-dhage-L7UGyxX7dyY-unsplash.jpg b/page/assets/img/shubham-dhage-L7UGyxX7dyY-unsplash.jpg new file mode 100644 index 0000000..b5d2b82 Binary files /dev/null and b/page/assets/img/shubham-dhage-L7UGyxX7dyY-unsplash.jpg differ diff --git a/page/assets/img/shubham-dhage-nG1LnDkt3HA-unsplash.jpg b/page/assets/img/shubham-dhage-nG1LnDkt3HA-unsplash.jpg new file mode 100644 index 0000000..fd54d1f Binary files /dev/null and b/page/assets/img/shubham-dhage-nG1LnDkt3HA-unsplash.jpg differ diff --git a/page/assets/img/shubham-dhage-vMutd64TPkE-unsplash.jpg b/page/assets/img/shubham-dhage-vMutd64TPkE-unsplash.jpg new file mode 100644 index 0000000..62f34cf Binary files /dev/null and b/page/assets/img/shubham-dhage-vMutd64TPkE-unsplash.jpg differ diff --git a/page/config/_default/config.toml b/page/config/_default/config.toml new file mode 100644 index 0000000..e5519d2 --- /dev/null +++ b/page/config/_default/config.toml @@ -0,0 +1,19 @@ + +defaultContentLanguage = "en" +enableRobotsTXT = "true" +pagination.pagerSize = 10 +summaryLength = 80 +outputs.home = ["HTML", "RSS", "JSON"] + +[taxonomies] + tag = "tags" + category = "categories" + author = "authors" + series = "series" + article = "articles" + paper = "papers" + +[markup] + [markup.tableOfContents] + endLevel = 4 + startLevel = 1 \ No newline at end of file diff --git a/page/config/_default/languages.en.toml b/page/config/_default/languages.en.toml new file mode 100644 index 0000000..ec554e0 --- /dev/null +++ b/page/config/_default/languages.en.toml @@ -0,0 +1,17 @@ +title = "SNAP" # "SNAP" + +[params.author] +name = "SNAP" +# image = "img/moverse_logo.png" +image = "img/moverse_logo_white.png" +headline = "Solving Neural Articulated Performances" +bio = "3D Animations at scale." +links = [ + { linkedin = "https://linkedin.com/company/moverseai" }, + { x-twitter = "https://twitter.com/moverseai" }, + { youtube = "https://www.youtube.com/@moverseaimocap" }, + { instagram = "https://www.instagram.com/moverseai/" }, + { discord = "https://discord.gg/bQc7B6qSPd" }, + { github = "https://www.github.com/moverseai" }, + { link = "https://www.moverse.ai" }, +] diff --git a/page/config/_default/menus.en.toml b/page/config/_default/menus.en.toml new file mode 100644 index 0000000..b70414c --- /dev/null +++ b/page/config/_default/menus.en.toml @@ -0,0 +1,208 @@ +# config/_default/menus.toml + +[[main]] + name = "Articles" + pageRef = "articles" + weight = 10 + +[[main]] + name = "Papers" + pageRef = "papers" + weight = 20 + +# [[main]] + # name = "About" + # pageRef = "about" + # weight = 25 + +[[main]] + pre = "github" + name = "GitHub" + url = "https://github.com/moverseai/snap" + weight = 30 + +# [[main]] + # name = "Parent" + # weight = 35 + +# [[main]] + # name = "sub-menu 1" + # parent = "Parent" + # pageRef = "samples" + # weight = 36 + +# [[main]] + # name = "sub-menu 2" + # parent = "Parent" + # pageRef = "samples" + # weight = 37 + +# [[main]] + # name = "sub-menu 3" + # parent = "Parent" + # pre = "github" + # pageRef = "samples" + # weight = 38 + + +# [[main]] + # identifier = "github2" + # pre = "github" + # url = "https://github.com/" + # weight = 40 + +# [[footer]] + # name = "Privacy" + # url = "https://external-link" + +# config/_default/menus.toml + +[[subnavigation]] + name = "NeRF" + pageRef = "tags/nerf" + weight = 10 + +[[subnavigation]] + name = "SMPL" + pageRef = "tags/smpl" + weight = 11 + +[[subnavigation]] + name = "Skeleton" + pageRef = "tags/skeleton" + weight = 12 + +[[subnavigation]] + name = "GHUM" + pageRef = "tags/ghum" + weight = 13 + +[[subnavigation]] + name = "Generalized" + pageRef = "tags/generalized" + weight = 14 + +[[subnavigation]] + name = "SDF" + pageRef = "tags/sdf" + weight = 15 + +[[subnavigation]] + name = "Texture" + pageRef = "tags/texture" + weight = 16 + +[[subnavigation]] + name = "Deformation" + pageRef = "tags/deformation" + weight = 17 + +[[subnavigation]] + name = "Monocular" + pageRef = "tags/monocular" + weight = 18 + +[[subnavigation]] + name = "Multiperson" + pageRef = "tags/multiperson" + weight = 19 + +# [[subnavigation]] + # name = "My Awesome Category" + # pageRef = "categories/awesome" + # weight = 20 + +[[main]] + name = "Venues" + weight = 45 + +[[main]] + name = "ICCV21" + parent = "Venues" + pageRef = "tags/iccv21" + weight = 46 + +[[main]] + name = "CVPR21" + parent = "Venues" + pageRef = "tags/cvpr21" + weight = 47 + +[[main]] + name = "NeurIPS21" + parent = "Venues" + pageRef = "tags/neurips21" + weight = 48 + +[[main]] + name = "arXiv21" + parent = "Venues" + pageRef = "tags/arxiv21" + weight = 49 + +[[main]] + name = "SIGGRAPH Asia 21" + parent = "Venues" + pageRef = "tags/siggraph_asia21" + weight = 48 + +[[main]] + name = "CVPR22" + parent = "Venues" + pageRef = "tags/cvpr22" + weight = 48 + +[[main]] + name = "SIGGRAPH22" + parent = "Venues" + pageRef = "tags/siggraph22" + weight = 49 + +[[main]] + name = "3DV22" + parent = "Venues" + pageRef = "tags/3dv22" + weight = 50 + +[[main]] + name = "ECCV22" + parent = "Venues" + pageRef = "tags/eccv22" + weight = 51 + +[[main]] + name = "SIGGRAPH Asia 22" + parent = "Venues" + pageRef = "tags/siggraph_asia22" + weight = 52 + +[[main]] + name = "arXiv22" + parent = "Venues" + pageRef = "tags/arxiv22" + weight = 53 + + +[[main]] + name = "CVPR 23" + parent = "Venues" + pageRef = "tags/cvpr23" + weight = 56 + +[[main]] + name = "TPAMI 23" + parent = "Venues" + pageRef = "tags/tpami23" + weight = 57 + +[[main]] + name = "TVCG 23" + parent = "Venues" + pageRef = "tags/tvcg23" + weight = 58 + +[[main]] + name = "TPAMI 24" + parent = "Venues" + pageRef = "tags/tpami24" + weight = 59 \ No newline at end of file diff --git a/page/config/_default/module.toml b/page/config/_default/module.toml new file mode 100644 index 0000000..520d196 --- /dev/null +++ b/page/config/_default/module.toml @@ -0,0 +1,3 @@ +[[imports]] +disable = false +path = "github.com/nunocoracao/blowfish/v2" diff --git a/page/config/_default/params.toml b/page/config/_default/params.toml new file mode 100644 index 0000000..feed52f --- /dev/null +++ b/page/config/_default/params.toml @@ -0,0 +1,120 @@ + +colorScheme = "congo" # https://blowfish.page/docs/getting-started/#colour-schemes +defaultAppearance = "light" +autoSwitchAppearance = true +enableSearch = true +enableCodeCopy = true +replyByEmail = true +robots = "all" +disableImageZoom = false +backgroundImageWidth = 1200 +disableTextInHeader = false +defaultBackgroundImage = "img/sebastian-svenson-LpbyDENbQQg-unsplash" +highlightCurrentMenuArea = true +smartTOC = true +smartTOCHideUnfocusedChildren = false + +header.layout = "fixed-fill-blur" # Defines the header for the entire site, supported values are basic, fixed, fixed-fill, and fixed-fill-blur +# header.layout = "fixed-fill" # Defines the header for the entire site, supported values are basic, fixed, fixed-fill, and fixed-fill-blur + +footer.showMenu = true +footer.showCopyright = true +footer.showThemeAttribution = true +footer.showAppearanceSwitcher = true +footer.showScrollToTop = true + +homepage.layout = "hero" # The layout of the homepage. Valid values are page, profile, hero, card, background, or custom. When set to custom +# homepage.homepageImage = "img/sebastian-svenson-LpbyDENbQQg-unsplash.jpg" +# homepage.homepageImage = "img/ahmad-odeh-JhqhGfX_Wd8-unsplash.jpg" +# homepage.homepageImage = "img/shubham-dhage-5Za8lloGtCg-unsplash.jpg" +# homepage.homepageImage = "img/shubham-dhage-Jkloa4CKWZs-unsplash.jpg" +# homepage.homepageImage = "img/shubham-dhage-L7UGyxX7dyY-unsplash.jpg" +# homepage.homepageImage = "img/shubham-dhage-nG1LnDkt3HA-unsplash.jpg" +# homepage.homepageImage = "img/shubham-dhage-vMutd64TPkE-unsplash.jpg" +# homepage.homepageImage = "img/ahmad-odeh-TK_WT3dl2tw-unsplash.jpg" +homepage.homepageImage = "img/javier-miranda-mIg0GL63lFk-unsplash.jpg" +# homepage.homepageImage = "img/mario-verduzco-vua8NbYusGE-unsplash.jpg" +# homepage.homepageImage = "img/milad-fakurian-DjjaZybYx4I-unsplash.jpg" + +homepage.showRecent = true +homepage.showRecentItems = 5 +homepage.showMoreLink = true +homepage.showMoreLinkDest = "papers" # "categories/papers" +homepage.cardView = true +homepage.cardViewScreenWidth = true +homepage.layoutBackgroundBlur = true + +article.showDate = true # Whether or not article dates are displayed. +# article.showViews = false # Whether or not article views are displayed. This requires firebase integrations to be enabled, look below. +# article.showLikes = false # Whether or not article likes are displayed. This requires firebase integrations to be enabled, look below. +article.showDateOnlyInArticle = true # Show date within article even if not displayed in article listings/cards. +article.showDateUpdated = true # Whether or not the dates articles were updated are displayed. +article.showAuthor = true # Whether or not the author box is displayed in the article footer. +article.showAuthorBottom = false # Author boxes are displayed at the bottom of each page instead of the top. +article.showHero = true # Whether the thumbnail image will be shown as a hero image within each article page. +article.heroStyle = "basic" # Not set Style to display the hero image, valid options are: basic, big, background, thumbAndBackground. +article.layoutBackgroundBlur = true # Makes the background image in the background article heroStyle blur with the scroll +article.layoutBackgroundHeaderSpace = true # Add space between the header and the body. +article.showBreadcrumbs = true # Whether or not breadcrumbs are displayed in the article header. +article.showDraftLabel = true # Whether or not the draft indicator is shown next to articles when site is built with --buildDrafts. +article.showEdit = true # Whether or not the link to edit the article content should be displayed. +# article.editURL = # Not set When article.showEdit is active, the URL for the edit link. +article.editAppendPath = true # When article.showEdit is active, whether or not the path to the current article should be appended to the URL set at article.editURL. +article.seriesOpened = true # Whether or not the series module will be displayed open by default or not. +article.showHeadingAnchors = true # Whether or not heading anchor links are displayed alongside headings within articles. +article.showPagination = true # Whether or not the next/previous article links are displayed in the article footer. +article.invertPagination = false # Whether or not to flip the direction of the next/previous article links. +article.showReadingTime = true # Whether or not article reading times are displayed. +article.showTableOfContents = true # Whether or not the table of contents is displayed on articles. +article.showRelatedContent = true # Display related content for each post. Might required additional configuration to your config.toml. Please check the theme config.toml if you want to enable this feature and copy all the relevant related entries. Also check Hugo’s docs on related content. +article.relatedContentLimit = 3 # Limit of related articles to display if showRelatedContent is turned on. +article.showTaxonomies = true # Whether or not the taxonomies related to this article are displayed. +article.showAuthorsBadges = true # Whether the authors taxonomies are are displayed in the article or list header. This requires the setup of multiple authors and the authors taxonomy. Check this page for more details on how to configure that feature. +article.showWordCount = true # Whether or not article word counts are displayed. +article.showComments = true # Whether or not the comments partial is included after the article footer. +article.sharingLinks = ["linkedin", "twitter", "reddit", "email"] # Which sharing links to display at the end of each article. When not provided, or set to false no links will be displayed. Available values are: “linkedin”, “twitter”, “reddit”, “pinterest”, “facebook”, “email”, “whatsapp”, and “telegram” +article.showZenMode = true # Flag to activate Zen Mode reading feature for articles. + + +list.showHero = true # Whether the thumbnail image will be shown as a hero image within each list page. +list.heroStyle = "basic" # Style to display the hero image, valid options are: basic, big, background, thumbAndBackground. +list.showBreadcrumbs = true # Whether or not breadcrumbs are displayed in the header on list pages. +list.layoutBackgroundBlur = true # Makes the background image in the background list heroStyle blur with the scroll +list.layoutBackgroundHeaderSpace = true # Add space between the header and the body. +list.showTableOfContents = true # Whether or not the table of contents is displayed on list pages. +list.showSummary = false # Whether or not article summaries are displayed on list pages. If a summary is not provided in the front matter, one will be auto generated using the summaryLength parameter in the site configuration. +# list.showViews = true # Whether or not list views are displayed. This requires firebase integrations to be enabled, look below. +# list.showLikes = false # Whether or not list likes are displayed. This requires firebase integrations to be enabled, look below. +list.showCards = true # Whether or not each article is displayed as a card or as simple inline text. +list.orderByWeight = true # Whether or not articles are sorted by weights. +list.groupByYear = false # Whether or not articles are grouped by year on list pages. +list.cardView = true # Display lists as a gallery of cards. +list.cardViewScreenWidth = true # Enhance the width of card galleries in lists to take the full width available. +list.constrainItemsWidth = true # Limit item width to prose to increase readability. Useful when no feature images are available. + +sitemap.excludedKinds = ["taxonomy", "term"] # Kinds of content that should be excluded from the generated /sitemap.xml file. Refer to the Hugo docs for acceptable values. + +taxonomy.showTermCount = true # Whether or not the number of articles within a taxonomy term is displayed on the taxonomy listing. +taxonomy.showHero = false # Whether the thumbnail image will be shown as a hero image within each taxonomy page. +taxonomy.heroStyle = "basic" # Style to display the hero image, valid options are: basic, big, background, thumbAndBackground. +taxonomy.showBreadcrumbs = true # Whether or not breadcrumbs are displayed in the taxonomy header. +# taxonomy.showViews = false # Whether or not article views are displayed. This requires firebase integrations to be enabled, look below. +# taxonomy.showLikes = false # Whether or not article likes are displayed. This requires firebase integrations to be enabled, look below. +taxonomy.showTableOfContents = true # Whether or not the table of contents is displayed on taxonomies. +taxonomy.cardView = true # Display lists as a gallery of cards. + +term.showHero = true # Whether the thumbnail image will be shown as a hero image within each term page. +term.heroStyle = "basic" # Style to display the hero image, valid options are: basic, big, background, thumbAndBackground. +term.showBreadcrumbs = true # Whether or not breadcrumbs are displayed in the term header. +# term.showViews = false # Whether or not article views are displayed. This requires firebase integrations to be enabled, look below. +# term.showLikes = false # Whether or not article likes are displayed. This requires firebase integrations to be enabled, look below. +term.showTableOfContents = true # Whether or not the table of contents is displayed on terms. +term.groupByYear = true # Whether or not articles are grouped by year on term pages. +term.cardView = true # Display lists as a gallery of cards. +term.cardViewScreenWidth = false # Enhance the width of card galleries in lists to take the full width available. + +verification.google = true # The site verification string provided by Google to be included in the site metadata. +verification.bing = true # The site verification string provided by Bing to be included in the site metadata. +verification.pinterest = true # The site verification string provided by Pinterest to be included in the site metadata. +verification.yandex = false # The site verification string provided by Yandex to be included in the site metadata. + diff --git a/page/content/about.md b/page/content/about.md new file mode 100644 index 0000000..d42fd90 --- /dev/null +++ b/page/content/about.md @@ -0,0 +1,16 @@ +--- +title: SNAP +date: +draft: false +logo_path: +website: +description: +layout: article # simple # single # article +showAuthor: false +authors: + - "nick" +--- + +# Solving Neural Articulated Performances + + diff --git a/page/content/papers/_index.md b/page/content/papers/_index.md new file mode 100644 index 0000000..1e49859 --- /dev/null +++ b/page/content/papers/_index.md @@ -0,0 +1,14 @@ +--- +title: "Papers" +description: "Complete list of all papers." +cascade: + showReadingTime: false +layout: simple +--- + +{{< list title="2023" cardView=true limit=12 where="Type" value="2023" >}} + +{{< list title="2022" cardView=true limit=12 where="Type" value="2022" >}} + +{{< list title="2021" cardView=true limit=12 where="Type" value="2021" >}} + diff --git a/page/content/papers/a-nerf/feature.gif b/page/content/papers/a-nerf/feature.gif new file mode 100644 index 0000000..f87f6bf Binary files /dev/null and b/page/content/papers/a-nerf/feature.gif differ diff --git a/page/content/papers/a-nerf/index.md b/page/content/papers/a-nerf/index.md new file mode 100644 index 0000000..1b0e33a --- /dev/null +++ b/page/content/papers/a-nerf/index.md @@ -0,0 +1,67 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: A-NeRF +categories: ["papers"] +tags: ["nerf", "skeleton", "neurips21"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Animatable Neural Radiance Fields for Modeling Dynamic Human Bodies" +summary: TODO +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 1 +--- + +## `A-NeRF`: Animatable Neural Radiance Fields for Modeling Dynamic Human Bodies + +> Shih-Yang Su, Frank Yu, Michael Zollhöfer, Helge Rhodin + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} Skeleton {{< /keyword >}} +{{< keyword icon="email" >}} *NeurIPS* 2021 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="LemonATsu/A-NeRF" >}} + +### Abstract +{{< lead >}} +While deep learning reshaped the classical motion capture pipeline with feedforward networks, generative models are required to recover fine alignment via iterative refinement. Unfortunately, the existing models are usually hand-crafted or learned in controlled conditions, only applicable to limited domains. We propose a method to learn a generative neural body model from unlabelled monocular videos by extending Neural Radiance Fields (NeRFs). We equip them with a skeleton to apply to time-varying and articulated motion. A key insight is that implicit models require the inverse of the forward kinematics used in explicit surface models. Our reparameterization defines spatial latent variables relative to the pose of body parts and thereby overcomes ill-posed inverse operations with an overparameterization. This enables learning volumetric body shape and appearance from scratch while jointly refining the articulated pose; all without ground truth labels for appearance, pose, or 3D shape on the input videos. When used for novel-view-synthesis and motion capture, our neural model improves accuracy on diverse datasets. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2102.06199" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="A-NeRF overview" + caption="`A-NeRF` overview." + >}} + +### Results + +#### Data +{{}} +{{}} +{{}} + +#### Comparisons +{{}} + +#### Performance +{{}} +{{}} +{{}} +{{}} +{{}} diff --git a/page/content/papers/a-nerf/method.jpg b/page/content/papers/a-nerf/method.jpg new file mode 100644 index 0000000..370e230 Binary files /dev/null and b/page/content/papers/a-nerf/method.jpg differ diff --git a/page/content/papers/anim-nerf/feature.gif b/page/content/papers/anim-nerf/feature.gif new file mode 100644 index 0000000..84396f2 Binary files /dev/null and b/page/content/papers/anim-nerf/feature.gif differ diff --git a/page/content/papers/anim-nerf/index.md b/page/content/papers/anim-nerf/index.md new file mode 100644 index 0000000..bd2ca2d --- /dev/null +++ b/page/content/papers/anim-nerf/index.md @@ -0,0 +1,65 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: Anim-NeRF +categories: ["papers"] +tags: ["nerf", "smpl", "monocular", "arxiv21"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: Animatable Neural Radiance Fields from Monocular RGB Videos +summary: TODO +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 2 +--- + +## `Anim-NeRF`: Animatable Neural Radiance Fields from Monocular RGB Videos + +> Jianchuan Chen, Ying Zhang, Di Kang, Xuefei Zhe, Linchao Bao, Xu Jia, Huchuan Lu + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *arXiv* 2021 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="JanaldoChen/Anim-NeRF" >}} + +### Abstract +{{< lead >}} +We present animatable neural radiance fields (animatable `NeRF`) for detailed human avatar creation from monocular videos. Our approach extends neural radiance fields (`NeRF`) to the dynamic scenes with human movements via introducing explicit pose-guided deformation while learning the scene representation network. In particular, we estimate the human pose for each frame and learn a constant canonical space for the detailed human template, which enables natural shape deformation from the observation space to the canonical space under the explicit control of the pose parameters. To compensate for inaccurate pose estimation, we introduce the pose refinement strategy that updates the initial pose during the learning process, which not only helps to learn more accurate human reconstruction but also accelerates the convergence. In experiments we show that the proposed approach achieves **1)** implicit human geometry and appearance reconstruction with high-quality details, **2)** photo-realistic rendering of the human from novel views, and **3)** animation of the human with novel poses. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2106.13629" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.png" + alt="Anim-NeRF overview" + caption="`Anim-NeRF` overview." + >}} + +### Results + +#### Data + +{{}} +{{}} +{{}} + +#### Comparisons +{{}} + +#### Performance +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/anim-nerf/method.png b/page/content/papers/anim-nerf/method.png new file mode 100644 index 0000000..947b23b Binary files /dev/null and b/page/content/papers/anim-nerf/method.png differ diff --git a/page/content/papers/animatable_nerf/feature.gif b/page/content/papers/animatable_nerf/feature.gif new file mode 100644 index 0000000..51c9d01 Binary files /dev/null and b/page/content/papers/animatable_nerf/feature.gif differ diff --git a/page/content/papers/animatable_nerf/index.md b/page/content/papers/animatable_nerf/index.md new file mode 100644 index 0000000..72ac160 --- /dev/null +++ b/page/content/papers/animatable_nerf/index.md @@ -0,0 +1,64 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: AnimatableNeRF +categories: ["papers"] +tags: ["nerf", "smpl", "iccv21", "tpami24"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: Animatable Neural Radiance Fields for Modeling Dynamic Human Bodies +summary: TODO +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 3 +--- + +## `AnimatableNeRF`: Animatable Neural Radiance Fields for Modeling Dynamic Human Bodies + +> Sida Peng, Junting Dong, Qianqian Wang, Shangzhan Zhang, Qing Shuai, Xiaowei Zhou, Hujun Bao + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *ICCV* 2021 {{< /keyword >}} +{{< keyword icon="email" >}} *TPAMI* 2024 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="zju3dv/animatable_nerf" >}} + +### Abstract +{{< lead >}} +This paper addresses the challenge of reconstructing an animatable human model from a multi-view video. Some recent works have proposed to decompose a non-rigidly deforming scene into a canonical neural radiance field and a set of deformation fields that map observation-space points to the canonical space, thereby enabling them to learn the dynamic scene from images. However, they represent the deformation field as translational vector field or SE(3) field, which makes the optimization highly under-constrained. Moreover, these representations cannot be explicitly controlled by input motions. Instead, we introduce neural blend weight fields to produce the deformation fields. Based on the skeleton-driven deformation, blend weight fields are used with 3D human skeletons to generate observation-tocanonical and canonical-to-observation correspondences. Since 3D human skeletons are more observable, they can regularize the learning of deformation fields. Moreover, the learned blend weight fields can be combined with input skeletal motions to generate new deformation fields to animate the human model. Experiments show that our approach significantly outperforms recent human synthesis methods. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2105.02872" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="AnimatableNeRF overview" + caption="`AnimatableNeRF` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Performance +{{}} +{{}} +{{}} +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/animatable_nerf/method.jpg b/page/content/papers/animatable_nerf/method.jpg new file mode 100644 index 0000000..27d413a Binary files /dev/null and b/page/content/papers/animatable_nerf/method.jpg differ diff --git a/page/content/papers/arah/feature.gif b/page/content/papers/arah/feature.gif new file mode 100644 index 0000000..34490c5 Binary files /dev/null and b/page/content/papers/arah/feature.gif differ diff --git a/page/content/papers/arah/index.md b/page/content/papers/arah/index.md new file mode 100644 index 0000000..505f094 --- /dev/null +++ b/page/content/papers/arah/index.md @@ -0,0 +1,74 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: ARAH +categories: ["papers"] +tags: ["nerf", "smpl", "sdf", "eccv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "ARAH: Animatable Volume Rendering of Articulated Human SDFs" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 3 +--- + +## `ARAH`: Animatable Volume Rendering of Articulated Human SDFs + +> Shaofei Wang, Katja Schwarz, Andreas Geiger, Siyu Tang + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} SDF {{< /keyword >}} +{{< keyword icon="email" >}} *ECCV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="taconite/arah-release" >}} + +### Abstract +{{< lead >}} +Combining human body models with differentiable rendering has recently enabled animatable avatars of clothed humans from sparse sets of multi-view RGB videos. While state-of-the-art approaches achieve a realistic appearance with neural radiance fields (NeRF), the inferred geometry often lacks detail due to missing geometric constraints. Further, animating avatars in out-of-distribution poses is not yet possible because the mapping from observation space to canonical space does not generalize faithfully to unseen poses. In this work, we address these shortcomings and propose a model to create animatable clothed human avatars with detailed geometry that generalize well to out-of-distribution poses. To achieve detailed geometry, we combine an articulated implicit surface representation with volume rendering. For generalization, we propose a novel joint root-finding algorithm for simultaneous ray-surface intersection search and correspondence search. Our algorithm enables efficient point sampling and accurate point canonicalization while generalizing well to unseen poses. We demonstrate that our proposed pipeline can generate clothed avatars with high-quality pose-dependent geometry and appearance from a sparse set of multi-view RGB videos. Our method achieves state-of-the-art performance on geometry and appearance reconstruction while creating animatable avatars that generalize well to out-of-distribution poses beyond the small number of training poses. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2210.100362" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method1.png" + alt="ARAH overview" + caption="`ARAH` overview." + >}} + +{{< figure + src="method2.png" + alt="ARAH details" + caption="`ARAH` rendering." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} +{{}} +{{}} + +#### Performance +{{}} +{{}} +{{}} +{{}} diff --git a/page/content/papers/arah/method1.png b/page/content/papers/arah/method1.png new file mode 100644 index 0000000..45bf62e Binary files /dev/null and b/page/content/papers/arah/method1.png differ diff --git a/page/content/papers/arah/method2.png b/page/content/papers/arah/method2.png new file mode 100644 index 0000000..687c5ed Binary files /dev/null and b/page/content/papers/arah/method2.png differ diff --git a/page/content/papers/cagenerf/feature.png b/page/content/papers/cagenerf/feature.png new file mode 100644 index 0000000..a0038fa Binary files /dev/null and b/page/content/papers/cagenerf/feature.png differ diff --git a/page/content/papers/cagenerf/index.md b/page/content/papers/cagenerf/index.md new file mode 100644 index 0000000..7651bed --- /dev/null +++ b/page/content/papers/cagenerf/index.md @@ -0,0 +1,58 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: CageNeRF +categories: ["papers"] +tags: ["nerf", "deformation", "neurips22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "CageNeRF: Cage-based Neural Radiance Fields for Genrenlized 3D Deformation and Animation" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 4 +--- + +## `CageNeRF`: Cage-based Neural Radiance Fields for Genrenlized 3D Deformation and Animation + +> Yicong Peng, Yichao Yan, Shenqi Liu, Yuhao Cheng, Shanyan Guan, Bowen Pan, Guangtao Zhai, Xiaokang Yang + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} Deformation {{< /keyword >}} +{{< keyword icon="email" >}} *NeurIPS* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="PengYicong/CageNeRF" >}} + +### Abstract +{{< lead >}} +While implicit representations have achieved high-fidelity results in 3D rendering, deforming and animating the implicit field remains challenging. Existing works typically leverage data-dependent models as deformation priors, such as SMPL for human body animation. However, this dependency on category-specific priors limits them to generalize to other objects. To solve this problem, we propose a novel framework for deforming and animating the neural radiance field learned on arbitrary objects. The key insight is that we introduce a cage-based representation as deformation prior, which is category-agnostic. Specifically, the deformation is performed based on an enclosing polygon mesh with sparsely defined vertices called cage inside the rendering space, where each point is projected into a novel position based on the barycentric interpolation of the deformed cage vertices. In this way, we transform the cage into a generalized constraint, which is able to deform and animate arbitrary target objects while preserving geometry details. Based on extensive experiments, we demonstrate the effectiveness of our framework in the task of geometry editing, object animation and deformation transfer. +{{< /lead >}} + +{{< button href="https://proceedings.neurips.cc/paper_files/paper/2022/file/cb78e6b5246b03e0b82b4acc8b11cc21-Paper-Conference.pdf" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.png" + alt="CageNeRF overview" + caption="`CageNeRF` overview." + >}} + +### Results + +#### Data +{{}} + +#### Comparisons +{{}} \ No newline at end of file diff --git a/page/content/papers/cagenerf/method.png b/page/content/papers/cagenerf/method.png new file mode 100644 index 0000000..6b60124 Binary files /dev/null and b/page/content/papers/cagenerf/method.png differ diff --git a/page/content/papers/danbo/feature.gif b/page/content/papers/danbo/feature.gif new file mode 100644 index 0000000..e67f70f Binary files /dev/null and b/page/content/papers/danbo/feature.gif differ diff --git a/page/content/papers/danbo/index.md b/page/content/papers/danbo/index.md new file mode 100644 index 0000000..5b49b91 --- /dev/null +++ b/page/content/papers/danbo/index.md @@ -0,0 +1,66 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: DANBO +categories: ["papers"] +tags: ["nerf", "skeleton", "eccv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "DANBO: Disentangled Articulated Neural Body Representations via Graph Neural Networks" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 1 +--- + +## `DANBO`: Disentangled Articulated Neural Body Representations via Graph Neural Networks + +> Shih-Yang Su, Timur Bagautdinov, Helge Rhodin + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} Skeleton {{< /keyword >}} +{{< keyword icon="email" >}} *ECCV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="LemonATsu/DANBO-pytorch" >}} + +### Abstract +{{< lead >}} +Deep learning greatly improved the realism of animatable human models by learning geometry and appearance from collections of 3D scans, template meshes, and multi-view imagery. High-resolution models enable photo-realistic avatars but at the cost of requiring studio settings not available to end users. Our goal is to create avatars directly from raw images without relying on expensive studio setups and surface tracking. While a few such approaches exist, those have limited generalization capabilities and are prone to learning spurious (chance) correlations between irrelevant body parts, resulting in implausible deformations and missing body parts on unseen poses. We introduce a three-stage method that induces two inductive biases to better disentangled pose-dependent deformation. First, we model correlations of body parts explicitly with a graph neural network. Second, to further reduce the effect of chance correlations, we introduce localized per-bone features that use a factorized volumetric representation and a new aggregation function. We demonstrate that our model produces realistic body shapes under challenging unseen poses and shows high-quality image synthesis. Our proposed representation strikes a better trade-off between model capacity, expressiveness, and robustness than competing methods. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2205.01666" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="DANBO overview" + caption="`DANBO` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} +{{}} +{{}} + +#### Performance +{{}} +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/danbo/overview.png b/page/content/papers/danbo/overview.png new file mode 100644 index 0000000..803bdab Binary files /dev/null and b/page/content/papers/danbo/overview.png differ diff --git a/page/content/papers/dracon/feature.png b/page/content/papers/dracon/feature.png new file mode 100644 index 0000000..0a240f4 Binary files /dev/null and b/page/content/papers/dracon/feature.png differ diff --git a/page/content/papers/dracon/index.md b/page/content/papers/dracon/index.md new file mode 100644 index 0000000..a8b26e6 --- /dev/null +++ b/page/content/papers/dracon/index.md @@ -0,0 +1,59 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: DRaCoN +categories: ["papers"] +tags: ["nerf", "smpl", "arxiv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "DRaCoN – Differentiable Rasterization Conditioned Neural Radiance Fields for Articulated Avatars" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 5 +--- + +## `DRaCoN` – Differentiable Rasterization Conditioned Neural Radiance Fields for Articulated Avatars + +> Amit Raj, Umar Iqbal, Koki Nagano, Sameh Khamis, Pavlo Molchanov, James Hays, Jan Kautz + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *arXiv* 2022 {{< /keyword >}} +{{< /keywordList >}} + +### Abstract +{{< lead >}} +Acquisition and creation of digital human avatars is an important problem with applications to virtual telepresence, gaming, and human modeling. Most contemporary approaches for avatar generation can be viewed either as 3D-based methods, which use multi-view data to learn a 3D representation with appearance (such as a mesh, implicit surface, or volume), or 2D-based methods which learn photo-realistic renderings of avatars but lack accurate 3D representations. In this work, we present, DRaCoN, a framework for learning full-body volumetric avatars which exploits the advantages of both the 2D and 3D neural rendering techniques. It consists of a Differentiable Rasterization module, DiffRas, that synthesizes a low-resolution version of the target image along with additional latent features guided by a parametric body model. The output of DiffRas is then used as conditioning to our conditional neural 3D representation module (c-NeRF) which generates the final high-res image along with body geometry using volumetric rendering. While DiffRas helps in obtaining photo-realistic image quality, c-NeRF, which employs signed distance fields (SDF) for 3D representations, helps to obtain fine 3D geometric details. Experiments on the challenging ZJU-MoCap and Human3.6M datasets indicate that DRaCoN outperforms state-of-the-art methods both in terms of error metrics and visual quality. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2203.15798" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="DRaCoN overview" + caption="`DRaCoN` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/dracon/overview.png b/page/content/papers/dracon/overview.png new file mode 100644 index 0000000..1dcc1e2 Binary files /dev/null and b/page/content/papers/dracon/overview.png differ diff --git a/page/content/papers/easymocap/feature.gif b/page/content/papers/easymocap/feature.gif new file mode 100644 index 0000000..a5ea224 Binary files /dev/null and b/page/content/papers/easymocap/feature.gif differ diff --git a/page/content/papers/easymocap/index.md b/page/content/papers/easymocap/index.md new file mode 100644 index 0000000..6b9093b --- /dev/null +++ b/page/content/papers/easymocap/index.md @@ -0,0 +1,42 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: EasyMoCap +categories: ["papers"] +tags: ["nerf", "smpl", "multiperson", "siggraph22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Novel View Synthesis of Human Interactions from Sparse Multi-view Videos" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 13 +--- + +## Novel View Synthesis of Human Interactions from Sparse Multi-view Videos + +> Qing Shuai, Chen Geng, Qi Fang, Sida Peng, Wenhao Shen, Xiaowei Zhou, Hujun Bao + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *SIGGRAPH* 2021 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="zju3dv/EasyMocap" >}} + +### Abstract +{{< lead >}} +This paper presents a novel system for generating free-viewpoint videos of multiple human performers from very sparse RGB cameras. The system reconstructs a layered neural representation of the dynamic multi-person scene from multi-view videos with each layer representing a moving instance or static background. Unlike previous work that requires instance segmentation as input, a novel approach is proposed to decompose the multi-person scene into layers and reconstruct neural representations for each layer in a weakly-supervised manner, yielding both high-quality novel view rendering and accurate instance masks. Camera synchronization error is also addressed in the proposed approach. The experiments demonstrate the better view synthesis quality of the proposed system compared to previous ones and the capability of producing an editable free-viewpoint video of a real soccer game using several asynchronous GoPro cameras +{{< /lead >}} + +{{< button href="https://dl.acm.org/doi/abs/10.1145/3528233.3530704" target="_blank" >}} +Paper +{{< /button >}} \ No newline at end of file diff --git a/page/content/papers/gm-nerf/feature.gif b/page/content/papers/gm-nerf/feature.gif new file mode 100644 index 0000000..34743cf Binary files /dev/null and b/page/content/papers/gm-nerf/feature.gif differ diff --git a/page/content/papers/gm-nerf/index.md b/page/content/papers/gm-nerf/index.md new file mode 100644 index 0000000..0f47fb9 --- /dev/null +++ b/page/content/papers/gm-nerf/index.md @@ -0,0 +1,67 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: GM-NeRF +categories: ["papers"] +tags: ["nerf", "smpl", "generalized", "cvpr23"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "GM-NeRF: Learning Generalizable Model-based Neural Radiance Fields from Multi-view Images" +summary: TODO +keywords: # +type: '2023' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2023"] +series_order: 4 +--- + +## `GM-NeRF`: Learning Generalizable Model-based Neural Radiance Fields from Multi-view Images + +> Jianchuan Chen, Wentao Yi, Liqian Ma, Xu Jia, Huchuan Lu + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Generalized {{< /keyword >}} +{{< keyword icon="email" >}} *CVPR* 2023 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="JanaldoChen/GM-NeRF" >}} + +### Abstract +{{< lead >}} +In this work, we focus on synthesizing high-fidelity novel view images for arbitrary human performers, given a set of sparse multi-view images. It is a challenging task due to the large variation among articulated body poses and heavy self-occlusions. To alleviate this, we introduce an effective generalizable framework Generalizable Model-based Neural Radiance Fields (GM-NeRF) to synthesize freeviewpoint images. Specifically, we propose a geometryguided attention mechanism to register the appearance code from multi-view 2D images to a geometry proxy which can alleviate the misalignment between inaccurate geometry prior and pixel space. On top of that, we further conduct neural rendering and partial gradient backpropagation for efficient perceptual supervision and improvement of the perceptual quality of synthesis. To evaluate our method, we conduct experiments on synthesized datasets THuman2.0 and Multi-garment, and real-world datasets Genebody and ZJUMocap. The results demonstrate that our approach outperforms state-of-the-art methods in terms of novel view synthesis and geometric reconstruction. +{{< /lead >}} + +{{< button href="https://openaccess.thecvf.com/content/CVPR2023/papers/Chen_GM-NeRF_Learning_Generalizable_Model-Based_Neural_Radiance_Fields_From_Multi-View_Images_CVPR_2023_paper.pdf" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="GM-NeRF overview" + caption="`GM-NeRF` overview." + >}} + +### Results + +#### Data +{{}} +{{}} +{{}} + +#### Comparisons +{{}} +{{}} +{{}} +{{}} + +#### Performance +{{}} diff --git a/page/content/papers/gm-nerf/overview.png b/page/content/papers/gm-nerf/overview.png new file mode 100644 index 0000000..15d6406 Binary files /dev/null and b/page/content/papers/gm-nerf/overview.png differ diff --git a/page/content/papers/gpnerf/details.png b/page/content/papers/gpnerf/details.png new file mode 100644 index 0000000..909c9f9 Binary files /dev/null and b/page/content/papers/gpnerf/details.png differ diff --git a/page/content/papers/gpnerf/feature.png b/page/content/papers/gpnerf/feature.png new file mode 100644 index 0000000..51cac59 Binary files /dev/null and b/page/content/papers/gpnerf/feature.png differ diff --git a/page/content/papers/gpnerf/index.md b/page/content/papers/gpnerf/index.md new file mode 100644 index 0000000..e064484 --- /dev/null +++ b/page/content/papers/gpnerf/index.md @@ -0,0 +1,73 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: GP-NeRF +categories: ["papers"] +tags: ["nerf", "smpl", "eccv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Geometry-Guided Progressive NeRF for Generalizable +and Efficient Neural Human Rendering" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 3 +--- + +## `GP-NeRF`: Geometry-Guided Progressive NeRF for Generalizable and Efficient Neural Human Rendering + +> Mingfei Chen, Jianfeng Zhang, Xiangyu Xu, Lijuan Liu, Yujun Cai, Jiashi +Feng, Shuicheng Yan + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *ECCV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="sail-sg/GP-Nerf" >}} + +### Abstract +{{< lead >}} +In this work we develop a generalizable and efficient Neural Radiance Field (NeRF) pipeline for high-fidelity free-viewpoint human body synthesis under settings with sparse camera views. Though existing NeRF-based methods can synthesize rather realistic details for human body, they tend to produce poor results when the input has self-occlusion, especially for unseen humans under sparse views. Moreover, these methods often require a large number of sampling points for rendering, which leads to low efficiency and limits their realworld applicability. To address these challenges, we propose a Geometry-guided Progressive NeRF (GP-NeRF). In particular, to better tackle self-occlusion, we devise a geometry-guided multi-view feature integration approach that utilizes the estimated geometry prior to integrate the incomplete information from input views and construct a complete geometry volume for the target human body. Meanwhile, for achieving higher rendering efficiency, we introduce a progressive rendering pipeline through geometry guidance, which leverages the geometric feature volume and the predicted density values to progressively reduce the number of sampling points and speed up the rendering process. Experiments on the ZJU-MoCap and THUman datasets show that our method outperforms the stateof-the-arts significantly across multiple generalization settings, while the time cost is reduced > 70% via applying our efficient progressive rendering pipeline. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2112.04312" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.png" + alt="GP-NeRF overview" + caption="`GP-NeRF` overview." + >}} + +{{< figure + src="details.png" + alt="GP-NeRF details" + caption="`GP-NeRF` details." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} +{{}} + +#### Performance +{{}} +{{}} +{{}} diff --git a/page/content/papers/gpnerf/method.png b/page/content/papers/gpnerf/method.png new file mode 100644 index 0000000..b24db75 Binary files /dev/null and b/page/content/papers/gpnerf/method.png differ diff --git a/page/content/papers/h-nerf/feature.jpg b/page/content/papers/h-nerf/feature.jpg new file mode 100644 index 0000000..5c24049 Binary files /dev/null and b/page/content/papers/h-nerf/feature.jpg differ diff --git a/page/content/papers/h-nerf/index.md b/page/content/papers/h-nerf/index.md new file mode 100644 index 0000000..11298bd --- /dev/null +++ b/page/content/papers/h-nerf/index.md @@ -0,0 +1,70 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: H-NeRF +categories: ["papers"] +tags: ["nerf", "ghum", "sdf", "neurips21"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +# showPagination: true +# showHero: true +# layoutBackgroundBlur: true +# heroStyle: thumbAndBackground +description: Neural Radiance Fields for Rendering and Temporal Reconstruction of Humans in Motion +summary: Constrained by a structured implicit human body model, represented using signed distance functions, `H-NeRF` robustly fuses information from sparse views and generalizes well beyond the poses or views observed in training. +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 4 +--- + +## `H-NeRF`: Neural Radiance Fields for Rendering and Temporal Reconstruction of Humans in Motion + +> Hongyi Xu, Thiemo Alldieck, Cristian Sminchisescu + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} GHUM {{< /keyword >}} +{{< keyword icon="tag" >}} SDF {{< /keyword >}} +{{< keyword icon="email" >}} *NeurIPS* 2021 {{< /keyword >}} +{{< /keywordList >}} + +### Abstract +{{< lead >}} +We present neural radiance fields for rendering and temporal (4D) reconstruction of humans in motion (H-NeRF), as captured by a sparse set of cameras or even from a monocular video. Our approach combines ideas from neural scene representation, novel-view synthesis, and implicit statistical geometric human representations, coupled using novel loss functions. Instead of learning a radiance field with a uniform occupancy prior, we constrain it by a structured implicit human body model, represented using signed distance functions. This allows us to robustly fuse information from sparse views and generalize well beyond the poses or views observed in training. Moreover, we apply geometric constraints to co-learn the structure of the observed subject – including both body and clothing – and to regularize the radiance field to geometrically plausible solutions. Extensive experiments on multiple datasets demonstrate the robustness and the accuracy of our approach, its generalization capabilities significantly outside a small training set of poses and views, and statistical extrapolation beyond the observed shape. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2110.13746" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="H-NeRF overview" + caption="`H-NeRF` overview." + >}} + +### Results + +#### Data +{{}} +{{}} +{{}} +{{}} + +#### Comparisons +{{}} + +#### Performance +{{}} +{{}} +{{}} +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/h-nerf/method.jpg b/page/content/papers/h-nerf/method.jpg new file mode 100644 index 0000000..bcd15dd Binary files /dev/null and b/page/content/papers/h-nerf/method.jpg differ diff --git a/page/content/papers/hf-avatar/feature.gif b/page/content/papers/hf-avatar/feature.gif new file mode 100644 index 0000000..3f09874 Binary files /dev/null and b/page/content/papers/hf-avatar/feature.gif differ diff --git a/page/content/papers/hf-avatar/index.md b/page/content/papers/hf-avatar/index.md new file mode 100644 index 0000000..67aab50 --- /dev/null +++ b/page/content/papers/hf-avatar/index.md @@ -0,0 +1,61 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: HF-Avatar +categories: ["papers"] +tags: ["nerf", "smpl", "texture", "monocular", "cvpr22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +# showPagination: true +# showHero: true +# layoutBackgroundBlur: true +# heroStyle: thumbAndBackground +description: "High-Fidelity Human Avatars from a Single RGB Camera +" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 10 +--- + +## `HF-Avatar`: High-Fidelity Human Avatars from a Single RGB Camera + +> Hao Zhao, Jinsong Zhang, Yu-Kun Lai, Zerong Zheng, Yingdi Xie, Yebin Liu, Kun Li + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Texture {{< /keyword >}} +{{< keyword icon="tag" >}} Monocular {{< /keyword >}} +{{< keyword icon="email" >}} *CVPR* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="hzhao1997/HF-Avatar" >}} + +### Abstract +{{< lead >}} +In this paper, we propose a coarse-to-fine framework to reconstruct a personalized high-fidelity human avatar from a monocular video. To deal with the misalignment problem caused by the changed poses and shapes in different frames, we design a dynamic surface network to recover pose-dependent surface deformations, which help to decouple the shape and texture of the person. To cope with the complexity of textures and generate photo-realistic results, we propose a reference-based neural rendering network and exploit a bottom-up sharpening-guided finetuning strategy to obtain detailed textures. Our framework also enables photo-realistic novel view/pose synthesis and shape editing applications. Experimental results on both the public dataset and our collected dataset demonstrate that our method outperforms the state-of-theart methods. +{{< /lead >}} + +{{< button href="https://openaccess.thecvf.com/content/CVPR2022/papers/Zhao_High-Fidelity_Human_Avatars_From_a_Single_RGB_Camera_CVPR_2022_paper.pdf" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.png" + alt="HF-Avatar overview" + caption="`HF-Avatar` overview." + >}} + +### Results + +#### Data +{{}} \ No newline at end of file diff --git a/page/content/papers/hf-avatar/method.png b/page/content/papers/hf-avatar/method.png new file mode 100644 index 0000000..42e3239 Binary files /dev/null and b/page/content/papers/hf-avatar/method.png differ diff --git a/page/content/papers/htvr/feature.gif b/page/content/papers/htvr/feature.gif new file mode 100644 index 0000000..b375e9e Binary files /dev/null and b/page/content/papers/htvr/feature.gif differ diff --git a/page/content/papers/htvr/index.md b/page/content/papers/htvr/index.md new file mode 100644 index 0000000..c6788f7 --- /dev/null +++ b/page/content/papers/htvr/index.md @@ -0,0 +1,69 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: HTVR +categories: ["papers"] +tags: ["nerf", "texture", "smpl", "3dv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "HVTR: Hybrid Volumetric-Textural Rendering for Human Avatars" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 9 +--- + +## `HVTR`: Hybrid Volumetric-Textural Rendering for Human Avatars + +> Tao Hu, Tao Yu, Zerong Zheng, He Zhang, Yebin Liu, Matthias Zwicker + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Texture {{< /keyword >}} +{{< keyword icon="email" >}} *3DV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="TaoHuUMD/SurMo" >}} + +### Abstract +{{< lead >}} +We propose a novel neural rendering pipeline, Hybrid Volumetric-Textural Rendering (HVTR), which synthesizes virtual human avatars from arbitrary poses efficiently and at high quality. First, we learn to encode articulated human motions on a dense UV manifold of the human body surface. To handle complicated motions (e.g., self-occlusions), we then leverage the encoded information on the UV manifold to construct a 3D volumetric representation based on a dynamic pose-conditioned neural radiance field. While this allows us to represent 3D geometry with changing topology, volumetric rendering is computationally heavy. Hence we employ only a rough volumetric representation using a pose-conditioned downsampled neural radiance field (PDNeRF), which we can render efficiently at low resolutions. In addition, we learn 2D textural features that are fused with rendered volumetric features in image space. The key advantage of our approach is that we can then convert the fused features into a high-resolution, high-quality avatar by a fast GAN-based textural renderer. We demonstrate that hybrid rendering enables HVTR to handle complicated motions, render high-quality avatars under usercontrolled poses/shapes and even loose clothing, and most importantly, be efficient at inference time. Our experimental results also demonstrate state-of-the-art quantitative results +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2112.10203" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="HTVR overview" + caption="`HTVR` overview." + >}} + +{{< figure + src="method.png" + alt="HTVR details" + caption="`HTVR` details." + >}} + +### Results + +#### Data +{{}} + +#### Comparisons +{{}} + +#### Performance +{{}} +{{}} diff --git a/page/content/papers/htvr/method.png b/page/content/papers/htvr/method.png new file mode 100644 index 0000000..7e132a4 Binary files /dev/null and b/page/content/papers/htvr/method.png differ diff --git a/page/content/papers/htvr/overview.png b/page/content/papers/htvr/overview.png new file mode 100644 index 0000000..d4de4dc Binary files /dev/null and b/page/content/papers/htvr/overview.png differ diff --git a/page/content/papers/humannerf1/feature.jpg b/page/content/papers/humannerf1/feature.jpg new file mode 100644 index 0000000..070d874 Binary files /dev/null and b/page/content/papers/humannerf1/feature.jpg differ diff --git a/page/content/papers/humannerf1/index.md b/page/content/papers/humannerf1/index.md new file mode 100644 index 0000000..5684a27 --- /dev/null +++ b/page/content/papers/humannerf1/index.md @@ -0,0 +1,62 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: HumanNeRF-1 +categories: ["papers"] +tags: ["nerf", "smpl", "cvpr22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "HumanNeRF: Free-viewpoint Rendering of Moving People from Monocular Video" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 1 +--- + +## `HumanNeRF`: Free-viewpoint Rendering of Moving People from Monocular Video + +> Chung-Yi Weng, Brian Curless, Pratul P. Srinivasan, Jonathan T. Barron, Ira Kemelmacher-Shlizerman + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *CVPR* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="chungyiweng/HumanNeRF" >}} + +### Abstract +{{< lead >}} +We introduce a free-viewpoint rendering method – HumanNeRF – that works on a given monocular video of a human performing complex body motions, e.g. a video from YouTube. Our method enables pausing the video at any frame and rendering the subject from arbitrary new camera viewpoints or even a full 360-degree camera path for that particular frame and body pose. This task is particularly challenging, as it requires synthesizing photorealistic details of the body, as seen from various camera angles that may not exist in the input video, as well as synthesizing fine details such as cloth folds and facial appearance. Our method optimizes for a volumetric representation of the person in a canonical T-pose, in concert with a motion field that maps the estimated canonical representation to every frame of the video via backward warps. The motion field is decomposed into skeletal rigid and non-rigid motions, produced by deep networks. We show significant performance improvements over prior work, and compelling examples of free-viewpoint renderings from monocular video of moving humans in challenging uncontrolled capture scenarios +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2201.04127" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="HumanNeRF overview" + caption="`HumaNeRF` overview." + >}} + +### Results + +#### Data +{{}} + +#### Comparisons +{{}} + +#### Performance +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/humannerf1/method.jpg b/page/content/papers/humannerf1/method.jpg new file mode 100644 index 0000000..7963e94 Binary files /dev/null and b/page/content/papers/humannerf1/method.jpg differ diff --git a/page/content/papers/humannerf2/feature.png b/page/content/papers/humannerf2/feature.png new file mode 100644 index 0000000..86fd861 Binary files /dev/null and b/page/content/papers/humannerf2/feature.png differ diff --git a/page/content/papers/humannerf2/index.md b/page/content/papers/humannerf2/index.md new file mode 100644 index 0000000..1670451 --- /dev/null +++ b/page/content/papers/humannerf2/index.md @@ -0,0 +1,69 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: HumanNeRF-2 +categories: ["papers"] +tags: ["nerf", "smpl", "cvpr22", "generalized"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "HumanNeRF: Efficiently Generated Human Radiance Field from Sparse Inputs" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 2 +--- + +## `HumanNeRF`: Efficiently Generated Human Radiance Field from Sparse Inputs + +> Fuqiang Zhao, Wei Yang, Jiakai Zhang, Pei Lin, Yingliang Zhang, Jingyi Yu, Lan Xu + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Generalized {{< /keyword >}} +{{< keyword icon="email" >}} *CVPR* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="zhaofuq/HumanNeRF" >}} + +### Abstract +{{< lead >}} +Recent neural human representations can produce highquality multi-view rendering but require using dense multiview inputs and costly training. They are hence largely limited to static models as training each frame is infeasible. We present HumanNeRF - a neural representation with efficient generalization ability - for high-fidelity free-view synthesis of dynamic humans. Analogous to how IBRNet assists NeRF by avoiding per-scene training, HumanNeRF employs an aggregated pixel-alignment feature across multiview inputs along with a pose embedded non-rigid deformation field for tackling dynamic motions. The raw HumanNeRF can already produce reasonable rendering on sparse video inputs of unseen subjects and camera settings. To further improve the rendering quality, we augment our solution with in-hour scene-specific fine-tuning, and an appearance blending module for combining the benefits of both neural volumetric rendering and neural texture blending. Extensive experiments on various multi-view dynamic human datasets demonstrate effectiveness of our approach in synthesizing photo-realistic free-view humans under challenging motions and with very sparse camera view inputs. + +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2112.02789.pdf" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="HumanNeRF overview" + caption="`HumaNeRF` overview." + >}} +{{< figure + src="method2.png" + alt="HumanNeRF rendering" + caption="`HumaNeRF` rendering." + >}} + +### Results + +#### Data +{{}} + +#### Comparisons +{{}} + +#### Performance +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/humannerf2/method.jpg b/page/content/papers/humannerf2/method.jpg new file mode 100644 index 0000000..1400415 Binary files /dev/null and b/page/content/papers/humannerf2/method.jpg differ diff --git a/page/content/papers/humannerf2/method2.png b/page/content/papers/humannerf2/method2.png new file mode 100644 index 0000000..6544d91 Binary files /dev/null and b/page/content/papers/humannerf2/method2.png differ diff --git a/page/content/papers/instantavatar/feature.gif b/page/content/papers/instantavatar/feature.gif new file mode 100644 index 0000000..008cd0d Binary files /dev/null and b/page/content/papers/instantavatar/feature.gif differ diff --git a/page/content/papers/instantavatar/index.md b/page/content/papers/instantavatar/index.md new file mode 100644 index 0000000..ed332a3 --- /dev/null +++ b/page/content/papers/instantavatar/index.md @@ -0,0 +1,70 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: InstantAvatar +categories: ["papers"] +tags: ["nerf", "smpl", "monocular", "cvpr23"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +# showPagination: true +# showHero: true +# layoutBackgroundBlur: true +# heroStyle: thumbAndBackground +description: "InstantAvatar: Learning Avatars from Monocular Video in 60 Seconds" +summary: TODO +keywords: # +type: '2023' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2023"] +series_order: 1 +--- + +## `InstantAvatar`: Learning Avatars from Monocular Video in 60 Seconds + +> Tianjian Jiang, Xu Chen, Jie Song, Otmar Hilliges + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Monocular {{< /keyword >}} +{{< keyword icon="email" >}} *CVPR* 2023 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="tijiang13/InstantAvatar" >}} + +### Abstract +{{< lead >}} +In this paper, we take a significant step towards realworld applicability of monocular neural avatar reconstruction by contributing InstantAvatar, a system that can reconstruct human avatars from a monocular video within seconds, and these avatars can be animated and rendered at an interactive rate. To achieve this efficiency we propose a carefully designed and engineered system, that leverages emerging acceleration structures for neural fields, in combination with an efficient empty space-skipping strategy for dynamic scenes. We also contribute an efficient implementation that we will make available for research purposes. Compared to existing methods, InstantAvatar converges 130× faster and can be trained in minutes instead of hours. It achieves comparable or even better reconstruction quality and novel pose synthesis results. When given the same time budget, our method significantly outperforms SoTA methods. InstantAvatar can yield acceptable visual quality in as little as 10 seconds training time. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2212.10550" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="InstantAvatar overview" + caption="`InstantAvatar` overview." + >}} + +### Results + +#### Data +{{}} + +#### Comparisons +{{}} +{{}} + +#### Performance +{{}} +{{}} +{{}} +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/instantavatar/method.jpg b/page/content/papers/instantavatar/method.jpg new file mode 100644 index 0000000..66361b5 Binary files /dev/null and b/page/content/papers/instantavatar/method.jpg differ diff --git a/page/content/papers/intrinsicngp/feature.jpg b/page/content/papers/intrinsicngp/feature.jpg new file mode 100644 index 0000000..beb909b Binary files /dev/null and b/page/content/papers/intrinsicngp/feature.jpg differ diff --git a/page/content/papers/intrinsicngp/index.md b/page/content/papers/intrinsicngp/index.md new file mode 100644 index 0000000..9af437a --- /dev/null +++ b/page/content/papers/intrinsicngp/index.md @@ -0,0 +1,59 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: IntrinsicNGP +categories: ["papers"] +tags: ["nerf", "smpl", "tvcg23"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "IntrinsicNGP: Intrinsic Coordinate based Hash +Encoding for Human NeRF" +summary: TODO +keywords: # +type: '2023' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2023"] +series_order: 6 +--- + +## `IntrinsicNGP`: Intrinsic Coordinate based Hash Encoding for Human NeRF + +> Bo Peng, Jun Hu, Jingtao Zhou, Xuan Gao, Juyong Zhang + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *TVCG* 2023 {{< /keyword >}} +{{< /keywordList >}} + +### Abstract +{{< lead >}} +Recently, many works have been proposed to use the neural radiance field for novel view synthesis of human performers. However, most of these methods require hours of training, making them difficult for practical use. To address this challenging problem, we propose IntrinsicNGP, which can be trained from scratch and achieve high-fidelity results in a few minutes with videos of a human performer. To achieve this goal, we introduce a continuous and optimizable intrinsic coordinate instead of the original explicit Euclidean coordinate in the hash encoding module of InstantNGP. With this novel intrinsic coordinate, IntrinsicNGP can aggregate interframe information for dynamic objects using proxy geometry shapes. Moreover, the results trained with the given rough geometry shapes can be further refined with an optimizable offset field based on the intrinsic coordinate. Extensive experimental results on several datasets demonstrate the effectiveness and efficiency of IntrinsicNGP. We also illustrate the ability of our approach to edit the shape of reconstructed objects. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2302.14683" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="IntrinsicNGP overview" + caption="`IntrinsicNGP` overview." + >}} + +### Results + +#### Data +{{}} + +#### Comparisons +{{}} +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/intrinsicngp/method.jpg b/page/content/papers/intrinsicngp/method.jpg new file mode 100644 index 0000000..ab88d94 Binary files /dev/null and b/page/content/papers/intrinsicngp/method.jpg differ diff --git a/page/content/papers/keypointnerf/feature.gif b/page/content/papers/keypointnerf/feature.gif new file mode 100644 index 0000000..cb779ab Binary files /dev/null and b/page/content/papers/keypointnerf/feature.gif differ diff --git a/page/content/papers/keypointnerf/index.md b/page/content/papers/keypointnerf/index.md new file mode 100644 index 0000000..6f1d165 --- /dev/null +++ b/page/content/papers/keypointnerf/index.md @@ -0,0 +1,61 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: KeypointNeRF +categories: ["papers"] +tags: ["nerf", "skeleton", "generalized", "eccv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "KeypointNeRF: Generalizing Image-based Volumetric Avatars +using Relative Spatial Encoding of Keypoints" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 14 +--- + +## `KeypointNeRF`: Generalizing Image-based Volumetric Avatars using Relative Spatial Encoding of Keypoints + +> Marko Mihajlovic, Aayush Bansal, Michael Zollhoefer, Siyu Tang, +Shunsuke Saito + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} Skeleton {{< /keyword >}} +{{< keyword icon="tag" >}} Generalized {{< /keyword >}} +{{< keyword icon="email" >}} *ECCV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="facebookresearch/KeypointNeRF" >}} + +### Abstract +{{< lead >}} +Image-based volumetric humans using pixel-aligned features promise generalization to unseen poses and identities. Prior work leverages global spatial encodings and multi-view geometric consistency to reduce spatial ambiguity. However, global encodings often suffer from overfitting to the distribution of the training data, and it is difficult to learn multi-view consistent reconstruction from sparse views. In this work, we investigate common issues with existing spatial encodings and propose a simple yet highly effective approach to modeling high-fidelity volumetric humans from sparse views. One of the key ideas is to encode relative spatial 3D information via sparse 3D keypoints. This approach is robust to the sparsity of viewpoints and cross-dataset domain gap. Our approach outperforms state-of-the-art methods for head reconstruction. On human body reconstruction for unseen subjects, we also achieve performance comparable to prior work that uses a parametric human body model and temporal feature aggregation. Our experiments show that a majority of errors in prior work stem from an inappropriate choice of spatial encoding and thus we suggest a new direction for high-fidelity image-based human modeling. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2205.04992" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="KeypointNeRF overview" + caption="`KeypointNeRF` overview." + >}} + +### Results + +#### Data +{{}} + +#### Comparisons +{{}} diff --git a/page/content/papers/keypointnerf/overview.png b/page/content/papers/keypointnerf/overview.png new file mode 100644 index 0000000..dbbd627 Binary files /dev/null and b/page/content/papers/keypointnerf/overview.png differ diff --git a/page/content/papers/narf/feature.gif b/page/content/papers/narf/feature.gif new file mode 100644 index 0000000..adee624 Binary files /dev/null and b/page/content/papers/narf/feature.gif differ diff --git a/page/content/papers/narf/index.md b/page/content/papers/narf/index.md new file mode 100644 index 0000000..49255da --- /dev/null +++ b/page/content/papers/narf/index.md @@ -0,0 +1,61 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NARF +categories: ["papers"] +tags: ["nerf", "skeleton", "iccv21"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: Neural Articulated Radiance Field +summary: TODO +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 5 +--- + +## `NARF`: Neural Articulated Radiance Field + +> Atsuhiro Noguchi, Xiao Sun, Stephen Lin, Tatsuya Harada + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} Skeleton {{< /keyword >}} +{{< keyword icon="email" >}} *ICCV* 2021 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="nogu-atsu/NARF" >}} + +### Abstract +{{< lead >}} +We present Neural Articulated Radiance Field (NARF), a novel deformable 3D representation for articulated objects learned from images. While recent advances in 3D implicit representation have made it possible to learn models of complex objects, learning pose-controllable representations of articulated objects remains a challenge, as current methods require 3D shape supervision and are unable to render appearance. In formulating an implicit representation of 3D articulated objects, our method considers only the rigid transformation of the most relevant object part in solving for the radiance field at each 3D location. In this way, the proposed method represents pose-dependent changes without significantly increasing the computational complexity. NARF is fully differentiable and can be trained from images with pose annotations. Moreover, through the use of an autoencoder, it can learn appearance variations over multiple instances of an object class. Experiments show that the proposed method is efficient and can generalize well to novel poses. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2104.03110" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="NARF overview" + caption="`NARF` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + + +#### Performance +{{}} +{{}} diff --git a/page/content/papers/narf/method.jpg b/page/content/papers/narf/method.jpg new file mode 100644 index 0000000..3d178d4 Binary files /dev/null and b/page/content/papers/narf/method.jpg differ diff --git a/page/content/papers/ndf/feature.gif b/page/content/papers/ndf/feature.gif new file mode 100644 index 0000000..c528f89 Binary files /dev/null and b/page/content/papers/ndf/feature.gif differ diff --git a/page/content/papers/ndf/index.md b/page/content/papers/ndf/index.md new file mode 100644 index 0000000..f390d12 --- /dev/null +++ b/page/content/papers/ndf/index.md @@ -0,0 +1,61 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NDF +categories: ["papers"] +tags: ["nerf", "smpl", "eccv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "NDF: Neural Deformable Fields for Dynamic +Human Modelling" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 6 +--- + +## `NDF`: Neural Deformable Fields for Dynamic Human Modelling + +> Ruiqi Zhang, Jie Chen + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *ECCV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="HKBU-VSComputing/2022_ECCV_NDF" >}} + +### Abstract +{{< lead >}} +We propose Neural Deformable Fields (NDF), a new representation for dynamic human digitization from a multi-view video. Recent works proposed to represent a dynamic human body with shared canonical neural radiance fields which links to the observation space with deformation fields estimations. However, the learned canonical representation is static and the current design of the deformation fields is not able to represent large movements or detailed geometry changes. In this paper, we propose to learn a neural deformable field wrapped around a fitted parametric body model to represent the dynamic human. The NDF is spatially aligned by the underlying reference surface. A neural network is then learned to map pose to the dynamics of NDF. The proposed NDF representation can synthesize the digitized performer with novel views and novel poses with a detailed and reasonable dynamic appearance. Experiments show that our method significantly outperforms recent human synthesis methods. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2207.09193" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="NDF overview" + caption="`NDF` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/ndf/overview.png b/page/content/papers/ndf/overview.png new file mode 100644 index 0000000..f8802ff Binary files /dev/null and b/page/content/papers/ndf/overview.png differ diff --git a/page/content/papers/nerfcap/feature.png b/page/content/papers/nerfcap/feature.png new file mode 100644 index 0000000..00a4fcb Binary files /dev/null and b/page/content/papers/nerfcap/feature.png differ diff --git a/page/content/papers/nerfcap/index.md b/page/content/papers/nerfcap/index.md new file mode 100644 index 0000000..d3ccf52 --- /dev/null +++ b/page/content/papers/nerfcap/index.md @@ -0,0 +1,68 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NerfCap +categories: ["papers"] +tags: ["nerf", "smpl", "deformation", "tvcg22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "NerfCap: Human Performance Capture with +Dynamic Neural Radiance Fields" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 8 +--- + +## `NerfCap`: Human Performance Capture with Dynamic Neural Radiance Fields + +> Kangkan Wang, Sida Peng, Xiaowei Zhou, Jian Yang, Guofeng Zhang + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Deformation {{< /keyword >}} +{{< keyword icon="email" >}} *TVCG* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="wangkangkan/nerfcap" >}} + +### Abstract +{{< lead >}} +This paper addresses the challenge of human performance capture from sparse multi-view or monocular videos. Given a template mesh of the performer, previous methods capture the human motion by non-rigidly registering the template mesh to images with 2D silhouettes or dense photometric alignment. However, the detailed surface deformation cannot be recovered from the silhouettes, while the photometric alignment suffers from instability caused by appearance variation in the videos. To solve these problems, we propose NerfCap, a novel performance capture method based on the dynamic neural radiance field (NeRF) representation of the performer. Specifically, a canonical NeRF is initialized from the template geometry and registered to the video frames by optimizing the deformation field and the appearance model of the canonical NeRF. To capture both large body motion and detailed surface deformation, NerfCap combines linear blend skinning with embedded graph deformation. In contrast to the mesh-based methods that suffer from fixed topology and texture, NerfCap is able to flexibly capture complex geometry and appearance variation across the videos, and synthesize more photo-realistic images. In addition, NerfCap can be pre-trained end to end in a self-supervised manner by matching the synthesized videos with the input videos. Experimental results on various datasets show that NerfCap outperforms prior works in terms of both surface reconstruction accuracy and novel-view synthesis quality. +{{< /lead >}} + +{{< button href="http://www.cad.zju.edu.cn/home/gfzhang/papers/NerfCap/NerfCap_TVCG_2022.pdf" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="NerfCap overview" + caption="`NerfCap` overview." + >}} + +### Results + +#### Data +{{}} +{{}} +{{}} + +#### Comparisons +{{}} +{{}} + +#### Performance +{{}} +{{}} +{{}} diff --git a/page/content/papers/nerfcap/overview.png b/page/content/papers/nerfcap/overview.png new file mode 100644 index 0000000..e438a79 Binary files /dev/null and b/page/content/papers/nerfcap/overview.png differ diff --git a/page/content/papers/neural_capture/details.png b/page/content/papers/neural_capture/details.png new file mode 100644 index 0000000..310e0f2 Binary files /dev/null and b/page/content/papers/neural_capture/details.png differ diff --git a/page/content/papers/neural_capture/feature.gif b/page/content/papers/neural_capture/feature.gif new file mode 100644 index 0000000..4aab4a7 Binary files /dev/null and b/page/content/papers/neural_capture/feature.gif differ diff --git a/page/content/papers/neural_capture/index.md b/page/content/papers/neural_capture/index.md new file mode 100644 index 0000000..be38611 --- /dev/null +++ b/page/content/papers/neural_capture/index.md @@ -0,0 +1,75 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NeuralCapture +categories: ["papers"] +tags: ["nerf", "smpl", "monocular", "eccv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +# showPagination: true +# showHero: true +# layoutBackgroundBlur: true +# heroStyle: thumbAndBackground +description: Neural Capture of Animatable 3D Human from Monocular Video +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 11 +--- + +## `NeuralCapture`: Neural Capture of Animatable 3D Human from Monocular Video + +> Gusi Te, Xiu Li, Xiao Li, Jinglu Wang, Wei Hu, Yan Lu + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Monocular {{< /keyword >}} +{{< keyword icon="email" >}} *ECCV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="tegusi/NeuralCapture" >}} + +### Abstract +{{< lead >}} +We present a novel paradigm of building an animatable 3D human representation from a monocular video input, such that it can be rendered in any unseen poses and views. Our method is based on a dynamic Neural Radiance Field (NeRF) rigged by a mesh-based parametric 3D human model serving as a geometry proxy. Previous methods usually rely on multi-view videos or accurate 3D geometry information as additional inputs; besides, most methods suffer from degraded quality when generalized to unseen poses. We identify that the key to generalization is a good input embedding for querying dynamic NeRF: A good input embedding should define an injective mapping in the full volumetric space, guided by surface mesh deformation under pose variation. Based on this observation, we propose to embed the input query with its relationship to local surface regions spanned by a set of geodesic nearest neighbors on mesh vertices. By including both position and relative distance information, our embedding defines a distance-preserved deformation mapping and generalizes well to unseen poses. To reduce the dependency on additional inputs, we first initialize per-frame 3D meshes using off-the-shelf tools and then propose a pipeline to jointly optimize NeRF and refine the initial mesh. Extensive experiments show our method can synthesize plausible human rendering results under unseen poses and views. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2208.08728" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="NeuralCapture overview" + caption="`NeuralCapture` overview." + >}} + +{{< figure + src="details.png" + alt="NeuralCapture details" + caption="`NeuralCapture` details." + >}} + +### Results + +#### Data +{{}} +{{}} +{{}} + +#### Comparisons +{{}} +{{}} + +#### Performance +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/neural_capture/overview.png b/page/content/papers/neural_capture/overview.png new file mode 100644 index 0000000..e882e4c Binary files /dev/null and b/page/content/papers/neural_capture/overview.png differ diff --git a/page/content/papers/neural_human_performer/feature.gif b/page/content/papers/neural_human_performer/feature.gif new file mode 100644 index 0000000..c64b2a9 Binary files /dev/null and b/page/content/papers/neural_human_performer/feature.gif differ diff --git a/page/content/papers/neural_human_performer/index.md b/page/content/papers/neural_human_performer/index.md new file mode 100644 index 0000000..e32a56b --- /dev/null +++ b/page/content/papers/neural_human_performer/index.md @@ -0,0 +1,60 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NeuralHumanPerfomer +categories: ["papers"] +tags: ["nerf", "smpl", "neurips21"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Neural Human Performer: Learning Generalizable Radiance Fields for Human Performance Rendering" +summary: TODO +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 6 +--- + +## `Neural Human Performer`: Learning Generalizable Radiance Fields for Human Performance Rendering + +> Youngjoong Kwon, Dahun Kim, Duygu Ceylan, Henry Fuchs + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *NeurIPS* 2021 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="YoungJoongUNC/Neural_Human_Performer" >}} + +### Abstract +{{< lead >}} +In this paper, we aim at synthesizing a free-viewpoint video of an arbitrary human +performance using sparse multi-view cameras. Recently, several works have addressed this problem by learning person-specific neural radiance fields (NeRF) to capture the appearance of a particular human. In parallel, some work proposed to use pixel-aligned features to generalize radiance fields to arbitrary new scenes and objects. Adopting such generalization approaches to humans, however, is highly challenging due to the heavy occlusions and dynamic articulations of body parts. To tackle this, we propose Neural Human Performer, a novel approach that learns generalizable neural radiance fields based on a parametric human body model for robust performance capture. Specifically, we first introduce a temporal transformer that aggregates tracked visual features based on the skeletal body motion over time. Moreover, a multi-view transformer is proposed to perform cross-attention between the temporally-fused features and the pixel-aligned features at each time step to integrate observations on the fly from multiple views. Experiments on the ZJU-MoCap and AIST datasets show that our method significantly outperforms recent generalizable NeRF methods on unseen identities and poses. +{{< /lead >}} + +{{< button href="https://proceedings.neurips.cc/paper/2021/file/cf866614b6b18cda13fe699a3a65661b-Paper.pdf" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="Neural Human Performer overview" + caption="`Neural Human Performer` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} \ No newline at end of file diff --git a/page/content/papers/neural_human_performer/method.jpg b/page/content/papers/neural_human_performer/method.jpg new file mode 100644 index 0000000..817b849 Binary files /dev/null and b/page/content/papers/neural_human_performer/method.jpg differ diff --git a/page/content/papers/neuralactor/feature.png b/page/content/papers/neuralactor/feature.png new file mode 100644 index 0000000..6ab189c Binary files /dev/null and b/page/content/papers/neuralactor/feature.png differ diff --git a/page/content/papers/neuralactor/index.md b/page/content/papers/neuralactor/index.md new file mode 100644 index 0000000..4057651 --- /dev/null +++ b/page/content/papers/neuralactor/index.md @@ -0,0 +1,68 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NeuralActor +categories: ["papers"] +tags: ["nerf", "smpl", "texture", "siggraph_asia21"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Neural Actor: Neural Free-view Synthesis of Human Actors with Pose Control" +summary: TODO +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 7 +--- + +## `Neural Actor`: Neural Free-view Synthesis of Human Actors with Pose Control + +> Lingjie Liu, Marc Habermann, Viktor Rudnev, Kripasindhu Sarkar, Jiatao Gu, Christian Theobalt + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} Texture {{< /keyword >}} +{{< keyword icon="email" >}} *SIGGRAPH Asia* 2021 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="lingjie0206/Neural_Actor_Main_Code" >}} + +### Abstract +{{< lead >}} +We propose Neural Actor (NA), a new method for high-quality synthesis of humans from arbitrary viewpoints and under arbitrary controllable poses. Our method is built upon recent neural scene representation and rendering works which learn representations of geometry and appearance from only 2D images. While existing works demonstrated compelling rendering of static scenes and playback of dynamic scenes, photo-realistic reconstruction and rendering of humans with neural implicit methods, in particular under user-controlled novel poses, is still difficult. To address this problem, we utilize a coarse body model as the proxy to unwarp the surrounding 3D space into a canonical pose. A neural radiance field learns pose-dependent geometric deformations and pose- and view-dependent appearance effects in the canonical space from multi-view video input. To synthesize novel views of high fidelity dynamic geometry and appearance, we leverage 2D texture maps defined on the body model as latent variables for predicting residual deformations and the dynamic appearance. Experiments demonstrate that our method achieves better quality than the state-of-the-arts on playback as well as novel pose synthesis, and can even generalize well to new poses that starkly differ from the training poses. Furthermore, our method also supports body shape control of the synthesized results. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2106.02019" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="Neural Actor overview" + caption="`Neural Actor` overview." + >}} + +### Results + +#### Data +{{}} +{{}} +{{}} +{{}} + +#### Comparisons +{{}} + +#### Performance +{{}} +{{}} +{{}} +{{}} diff --git a/page/content/papers/neuralactor/method.jpg b/page/content/papers/neuralactor/method.jpg new file mode 100644 index 0000000..03c9627 Binary files /dev/null and b/page/content/papers/neuralactor/method.jpg differ diff --git a/page/content/papers/neuralam/feature.jpg b/page/content/papers/neuralam/feature.jpg new file mode 100644 index 0000000..79068e3 Binary files /dev/null and b/page/content/papers/neuralam/feature.jpg differ diff --git a/page/content/papers/neuralam/index.md b/page/content/papers/neuralam/index.md new file mode 100644 index 0000000..e639b0f --- /dev/null +++ b/page/content/papers/neuralam/index.md @@ -0,0 +1,73 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NeuralAM +categories: ["papers"] +tags: ["sdf", "deformation", "siggraph_asia22"] +layout: simple # single # +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Human Performance Modeling and Rendering via Neural Animated Mesh" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 4 +--- + +## `NeuralAM`: Human Performance Modeling and Rendering via Neural Animated Mesh + +> Fuqiang Zhao, Yuheng Jiang, Kaixin Yao, Jiakai Zhang, Liao Wang, Haizhao Dai, Yuhui Zhong, Yingliang Zhang, Minye Wu, Lan Xu, Jingyi Yu + +{{< keywordList >}} +{{< keyword icon="tag" >}} SDF {{< /keyword >}} +{{< keyword icon="tag" >}} Deformation {{< /keyword >}} +{{< keyword icon="email" >}} *SIGGRAPH Asia* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="zhaofuq/Instant-NSR" >}} + +### Abstract +{{< lead >}} +We have recently seen tremendous progress in the neural advances for photo-real human modeling and rendering. However, it's still challenging to integrate them into an existing mesh-based pipeline for downstream applications. In this paper, we present a comprehensive neural approach for high-quality reconstruction, compression, and rendering of human performances from dense multi-view videos. Our core intuition is to bridge the traditional animated mesh workflow with a new class of highly efficient neural techniques. We first introduce a neural surface reconstructor for high-quality surface generation in minutes. It marries the implicit volumetric rendering of the truncated signed distance field (TSDF) with multi-resolution hash encoding. We further propose a hybrid neural tracker to generate animated meshes, which combines explicit non-rigid tracking with implicit dynamic deformation in a self-supervised framework. The former provides the coarse warping back into the canonical space, while the latter implicit one further predicts the displacements using the 4D hash encoding as in our reconstructor. Then, we discuss the rendering schemes using the obtained animated meshes, ranging from dynamic texturing to lumigraph rendering under various bandwidth settings. To strike an intricate balance between quality and bandwidth, we propose a hierarchical solution by first rendering 6 virtual views covering the performer and then conducting occlusion-aware neural texture blending. We demonstrate the efficacy of our approach in a variety of mesh-based applications and photo-realistic free-view experiences on various platforms, i.e., inserting virtual human performances into real environments through mobile AR or immersively watching talent shows with VR headsets. +{{< /lead >}} + +{{< button href="https://dl.acm.org/doi/pdf/10.1145/3550454.3555451" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="NeuralAM overview" + caption="`NeuralAM` overview." + >}} + +{{< figure + src="method1.png" + alt="NeuralAM tracking" + caption="`NeuralAM` tracking." + >}} + +{{< figure + src="method2.png" + alt="NeuralAM rendering" + caption="`NeuralAM` rendering." + >}} + +### Results + +#### Data +{{}} + +#### Performance +{{}} +{{}} +{{}} +{{}} \ No newline at end of file diff --git a/page/content/papers/neuralam/method1.png b/page/content/papers/neuralam/method1.png new file mode 100644 index 0000000..9118cae Binary files /dev/null and b/page/content/papers/neuralam/method1.png differ diff --git a/page/content/papers/neuralam/method2.png b/page/content/papers/neuralam/method2.png new file mode 100644 index 0000000..a8cca54 Binary files /dev/null and b/page/content/papers/neuralam/method2.png differ diff --git a/page/content/papers/neuralam/overview.png b/page/content/papers/neuralam/overview.png new file mode 100644 index 0000000..708ab70 Binary files /dev/null and b/page/content/papers/neuralam/overview.png differ diff --git a/page/content/papers/neuralbody/feature.gif b/page/content/papers/neuralbody/feature.gif new file mode 100644 index 0000000..ca73477 Binary files /dev/null and b/page/content/papers/neuralbody/feature.gif differ diff --git a/page/content/papers/neuralbody/index.md b/page/content/papers/neuralbody/index.md new file mode 100644 index 0000000..b97383f --- /dev/null +++ b/page/content/papers/neuralbody/index.md @@ -0,0 +1,62 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: NeuralBody +categories: ["papers"] +tags: ["nerf", "smpl", "cvpr21", "tpami23"] +layout: simple # single # +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Neural Body: Implicit Neural Representations with Structured Latent Codes for Novel View Synthesis of Dynamic Humans" +summary: TODO +keywords: # +type: '2021' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2021"] +series_order: 8 +--- + +## `NeuralBody`: Implicit Neural Representations with Structured Latent Codes for Novel View Synthesis of Dynamic Humans + +> Sida Peng, Yuanqing Zhang, Yinghao Xu, Qianqian Wang, Qing Shuai, Hujun Bao, Xiaowei Zhou + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="email" >}} *CVPR* 2021 {{< /keyword >}} +{{< keyword icon="email" >}} *TPAMI* 2023 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="zju3dv/neuralbody" >}} + +### Abstract +{{< lead >}} +This paper addresses the challenge of novel view synthesis for a human performer from a very sparse set of camera views. Some recent works have shown that learning implicit neural representations of 3D scenes achieves remarkable view synthesis quality given dense input views. However, the representation learning will be ill-posed if the views are highly sparse. To solve this ill-posed problem, our key idea is to integrate observations over video frames. To this end, we propose Neural Body, a new human body representation which assumes that the learned neural representations at different frames share the same set of latent codes anchored to a deformable mesh, so that the observations across frames can be naturally integrated. The deformable mesh also provides geometric guidance for the network to learn 3D representations more efficiently. To evaluate our approach, we create a multi-view dataset named ZJU-MoCap that captures performers with complex motions. Experiments on ZJU-MoCap show that our approach outperforms prior works by a large margin in terms of novel view synthesis quality. We also demonstrate the capability of our approach to reconstruct a moving person from a monocular video on the People-Snapshot dataset. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2012.15838v2" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="method.jpg" + alt="Neural Body overview" + caption="`Neural Body` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + + +#### Performance +{{}} +{{}} diff --git a/page/content/papers/neuralbody/method.jpg b/page/content/papers/neuralbody/method.jpg new file mode 100644 index 0000000..19dbf38 Binary files /dev/null and b/page/content/papers/neuralbody/method.jpg differ diff --git a/page/content/papers/surface-aligned-nerf/feature.gif b/page/content/papers/surface-aligned-nerf/feature.gif new file mode 100644 index 0000000..f0b316a Binary files /dev/null and b/page/content/papers/surface-aligned-nerf/feature.gif differ diff --git a/page/content/papers/surface-aligned-nerf/index.md b/page/content/papers/surface-aligned-nerf/index.md new file mode 100644 index 0000000..866bfd8 --- /dev/null +++ b/page/content/papers/surface-aligned-nerf/index.md @@ -0,0 +1,62 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: Surface-Aligned-NeRF +categories: ["papers"] +tags: ["nerf", "smpl", "sdf", "cvpr22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "Surface-Aligned Neural Radiance Fields for Controllable 3D Human Synthesis" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 16 +--- + +## Surface-Aligned Neural Radiance Fields for Controllable 3D Human Synthesis + +> Tianhan Xu, Yasuhiro Fujita, Eiichi Matsumoto + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} SMPL {{< /keyword >}} +{{< keyword icon="tag" >}} SDF {{< /keyword >}} +{{< keyword icon="email" >}} *CVPR* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="pfnet-research/surface-aligned-nerf" >}} + +### Abstract +{{< lead >}} +We propose a new method for reconstructing controllable implicit 3D human models from sparse multi-view RGB videos. Our method defines the neural scene representation on the mesh surface points and signed distances from the surface of a human body mesh. We identify an indistinguishability issue that arises when a point in 3D space is mapped to its nearest surface point on a mesh for learning surface-aligned neural scene representation. To address this issue, we propose projecting a point onto a mesh surface using a barycentric interpolation with modified vertex normals. Experiments with the ZJU-MoCap and Human3.6M datasets show that our approach achieves a higher quality in a novel-view and novel-pose synthesis than existing methods. We also demonstrate that our method easily supports the control of body shape and clothes +{{< /lead >}} + +{{< button href="https://openaccess.thecvf.com/content/CVPR2022/papers/Xu_Surface-Aligned_Neural_Radiance_Fields_for_Controllable_3D_Human_Synthesis_CVPR_2022_paper.pdf" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.png" + alt="Surface Aligned NeRF overview" + caption="`Surface Aligned NeRF` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} +{{}} +{{}} diff --git a/page/content/papers/surface-aligned-nerf/overview.png b/page/content/papers/surface-aligned-nerf/overview.png new file mode 100644 index 0000000..9b6995b Binary files /dev/null and b/page/content/papers/surface-aligned-nerf/overview.png differ diff --git a/page/content/papers/tava/feature.gif b/page/content/papers/tava/feature.gif new file mode 100644 index 0000000..9fb628a Binary files /dev/null and b/page/content/papers/tava/feature.gif differ diff --git a/page/content/papers/tava/index.md b/page/content/papers/tava/index.md new file mode 100644 index 0000000..9628cc2 --- /dev/null +++ b/page/content/papers/tava/index.md @@ -0,0 +1,62 @@ +--- +date: 2024-10-12T04:14:54-08:00 +draft: false +params: + author: Nikolaos Zioulis +title: TAVA +categories: ["papers"] +tags: ["nerf", "skeleton", "eccv22"] +layout: simple +menu: # +robots: all +# sharingLinks: # +weight: 10 +showHero: true +description: "TAVA: Template-free Animatable Volumetric Actors" +summary: TODO +keywords: # +type: '2022' # we use year as a type to list papers in the list view +series: ["Papers Published @ 2022"] +series_order: 5 +--- + +## `TAVA`: Template-free Animatable Volumetric Actors + +> Ruilong Li, Julian Tanke, Minh Vo, Michael Zollhoefer, Jürgen Gall, Angjoo Kanazawa, Christoph Lassner + +{{< keywordList >}} +{{< keyword icon="tag" >}} NeRF {{< /keyword >}} +{{< keyword icon="tag" >}} Skeleton {{< /keyword >}} +{{< keyword icon="email" >}} *ECCV* 2022 {{< /keyword >}} +{{< /keywordList >}} + +{{< github repo="facebookresearch/tava" >}} + +### Abstract +{{< lead >}} +Coordinate-based volumetric representations have the potential to generate photo-realistic virtual avatars from images. However, virtual avatars also need to be controllable even to a novel pose that may not have been observed. Traditional techniques, such as LBS, provide such a function; yet it usually requires a hand-designed body template, 3D scan data, and limited appearance models. On the other hand, neural representation has been shown to be powerful in representing visual details, but are under explored on deforming dynamic articulated actors. In this paper, we propose TAVA, a method to create Template-free Animatable Volumetric Actors, based on neural representations. We rely solely on multi-view data and a tracked skeleton to create a volumetric model of an actor, which can be animated at the test time given novel pose. Since TAVA does not require a body template, it is applicable to humans as well as other creatures such as animals. Furthermore, TAVA is designed such that it can recover accurate dense correspondences, making it amenable to content-creation and editing tasks. Through extensive experiments, we demonstrate that the proposed method generalizes well to novel poses as well as unseen views and showcase basic editing capabilities. +{{< /lead >}} + +{{< button href="https://arxiv.org/pdf/2206.08929" target="_blank" >}} +Paper +{{< /button >}} + +### Approach + +{{< figure + src="overview.jpg" + alt="TAVA overview" + caption="`TAVA` overview." + >}} + +### Results + +#### Data +{{}} +{{}} + +#### Comparisons +{{}} +{{}} +{{}} +{{}} diff --git a/page/content/papers/tava/overview.jpg b/page/content/papers/tava/overview.jpg new file mode 100644 index 0000000..2eb11fc Binary files /dev/null and b/page/content/papers/tava/overview.jpg differ diff --git a/page/content/tags/3dv22/_index.md b/page/content/tags/3dv22/_index.md new file mode 100644 index 0000000..1b5f170 --- /dev/null +++ b/page/content/tags/3dv22/_index.md @@ -0,0 +1,8 @@ +--- +title: 3DV 2022 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/arxiv21/_index.md b/page/content/tags/arxiv21/_index.md new file mode 100644 index 0000000..a7a1925 --- /dev/null +++ b/page/content/tags/arxiv21/_index.md @@ -0,0 +1,8 @@ +--- +title: arXiv 2021 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/cvpr21/_index.md b/page/content/tags/cvpr21/_index.md new file mode 100644 index 0000000..65eda54 --- /dev/null +++ b/page/content/tags/cvpr21/_index.md @@ -0,0 +1,8 @@ +--- +title: CVPR 2021 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/cvpr22/_index.md b/page/content/tags/cvpr22/_index.md new file mode 100644 index 0000000..4a0ce61 --- /dev/null +++ b/page/content/tags/cvpr22/_index.md @@ -0,0 +1,8 @@ +--- +title: CVPR 2022 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/cvpr23/_index.md b/page/content/tags/cvpr23/_index.md new file mode 100644 index 0000000..4699ce4 --- /dev/null +++ b/page/content/tags/cvpr23/_index.md @@ -0,0 +1,8 @@ +--- +title: CVPR 2023 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/deformation/_index.md b/page/content/tags/deformation/_index.md new file mode 100644 index 0000000..251c916 --- /dev/null +++ b/page/content/tags/deformation/_index.md @@ -0,0 +1,8 @@ +--- +title: Deformation +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/eccv22/_index.md b/page/content/tags/eccv22/_index.md new file mode 100644 index 0000000..b69da6f --- /dev/null +++ b/page/content/tags/eccv22/_index.md @@ -0,0 +1,8 @@ +--- +title: ECCV 2022 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/generalized/_index.md b/page/content/tags/generalized/_index.md new file mode 100644 index 0000000..aeed2b6 --- /dev/null +++ b/page/content/tags/generalized/_index.md @@ -0,0 +1,8 @@ +--- +title: Generalized +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/ghum/_index.md b/page/content/tags/ghum/_index.md new file mode 100644 index 0000000..6c84452 --- /dev/null +++ b/page/content/tags/ghum/_index.md @@ -0,0 +1,8 @@ +--- +title: GHUM +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/iccv21/_index.md b/page/content/tags/iccv21/_index.md new file mode 100644 index 0000000..e3756b4 --- /dev/null +++ b/page/content/tags/iccv21/_index.md @@ -0,0 +1,8 @@ +--- +title: ICCV 2021 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/multiperson/_index.md b/page/content/tags/multiperson/_index.md new file mode 100644 index 0000000..22a5150 --- /dev/null +++ b/page/content/tags/multiperson/_index.md @@ -0,0 +1,8 @@ +--- +title: Multiperson +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/nerf/_index.md b/page/content/tags/nerf/_index.md new file mode 100644 index 0000000..dd7d3ea --- /dev/null +++ b/page/content/tags/nerf/_index.md @@ -0,0 +1,8 @@ +--- +title: NeRF +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/neurips21/_index.md b/page/content/tags/neurips21/_index.md new file mode 100644 index 0000000..a4bdaf9 --- /dev/null +++ b/page/content/tags/neurips21/_index.md @@ -0,0 +1,8 @@ +--- +title: NeurIPS 2021 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/sdf/_index.md b/page/content/tags/sdf/_index.md new file mode 100644 index 0000000..cd74938 --- /dev/null +++ b/page/content/tags/sdf/_index.md @@ -0,0 +1,8 @@ +--- +title: SDF +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/siggraph22/_index.md b/page/content/tags/siggraph22/_index.md new file mode 100644 index 0000000..66a2bc2 --- /dev/null +++ b/page/content/tags/siggraph22/_index.md @@ -0,0 +1,8 @@ +--- +title: SIGGRAPH 2022 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/siggraph_asia21/_index.md b/page/content/tags/siggraph_asia21/_index.md new file mode 100644 index 0000000..ab7c59a --- /dev/null +++ b/page/content/tags/siggraph_asia21/_index.md @@ -0,0 +1,8 @@ +--- +title: SIGGRAPH Asia 2021 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/siggraph_asia22/_index.md b/page/content/tags/siggraph_asia22/_index.md new file mode 100644 index 0000000..490bb80 --- /dev/null +++ b/page/content/tags/siggraph_asia22/_index.md @@ -0,0 +1,8 @@ +--- +title: SIGGRAPH Asia 2022 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/smpl/_index.md b/page/content/tags/smpl/_index.md new file mode 100644 index 0000000..138c09a --- /dev/null +++ b/page/content/tags/smpl/_index.md @@ -0,0 +1,8 @@ +--- +title: SMPL +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/texture/_index.md b/page/content/tags/texture/_index.md new file mode 100644 index 0000000..231f39a --- /dev/null +++ b/page/content/tags/texture/_index.md @@ -0,0 +1,8 @@ +--- +title: Texture +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/tpami23/_index.md b/page/content/tags/tpami23/_index.md new file mode 100644 index 0000000..e7e8e33 --- /dev/null +++ b/page/content/tags/tpami23/_index.md @@ -0,0 +1,8 @@ +--- +title: TPAMI 2023 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/tpami24/_index.md b/page/content/tags/tpami24/_index.md new file mode 100644 index 0000000..978b8e5 --- /dev/null +++ b/page/content/tags/tpami24/_index.md @@ -0,0 +1,8 @@ +--- +title: TPAMI 2024 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/content/tags/tvcg23/_index.md b/page/content/tags/tvcg23/_index.md new file mode 100644 index 0000000..97f8a07 --- /dev/null +++ b/page/content/tags/tvcg23/_index.md @@ -0,0 +1,8 @@ +--- +title: TVCG 2023 +date: +draft: false +logo_path: +website: +description: +--- \ No newline at end of file diff --git a/page/data/authors/nick.json b/page/data/authors/nick.json new file mode 100644 index 0000000..be73d24 --- /dev/null +++ b/page/data/authors/nick.json @@ -0,0 +1,11 @@ +{ + "name": "Nikolaos Zioulis", + "image" : "img/avatar.png", + "headline": "mocap🔹volcap 🔹AI", + "bio": "mocap🔹volcap 🔹AI | CTO @ Moverse", + "social": [ + { "linkedin": "https://www.linkedin.com/in/nikolaos-zioulis/" }, + { "github": "https://www.github.com/zokin" }, + { "link": "https://zokin.github.io" } + ] +} diff --git a/page/go.mod b/page/go.mod new file mode 100644 index 0000000..11d373d --- /dev/null +++ b/page/go.mod @@ -0,0 +1,5 @@ +module github.com/moverseai/snap + +go 1.20 + +require github.com/nunocoracao/blowfish/v2 v2.78.0 // indirect diff --git a/page/go.sum b/page/go.sum new file mode 100644 index 0000000..ba5b118 --- /dev/null +++ b/page/go.sum @@ -0,0 +1,2 @@ +github.com/nunocoracao/blowfish/v2 v2.78.0 h1:rPNg+RGMpLwlOAhdlYUAGdUMxTVwhkZH8VrDnt+WGvs= +github.com/nunocoracao/blowfish/v2 v2.78.0/go.mod h1:4SkMc+Ht8gpQCwArqiHMBDP3soxi2OWuAhVney+cuyk= diff --git a/page/hugo.toml b/page/hugo.toml new file mode 100644 index 0000000..f60fcb4 --- /dev/null +++ b/page/hugo.toml @@ -0,0 +1,3 @@ +baseURL = 'https://moverseai.github.io/snap/' +languageCode = 'en-us' +title = 'Solving Neural Articulated Performances' # SNAP diff --git a/page/layouts/_default/article.html b/page/layouts/_default/article.html new file mode 100644 index 0000000..ae8a0bd --- /dev/null +++ b/page/layouts/_default/article.html @@ -0,0 +1,95 @@ +{{ define "main" }} +
+
+ {{ if .Params.showBreadcrumbs | default (.Site.Params.article.showBreadcrumbs | default false) }} + {{ partial "breadcrumbs.html" . }} + {{ end }} +

+ {{ .Title | emojify }} +

+ + {{ $authorsData := .Site.Data.authors }} + {{ $taxonomies := .Site.Taxonomies.authors }} + {{ $baseURL := .Site.BaseURL }} + {{ $taxonomyLink := 0 }} + {{ $showAuthor := 0 }} + + {{ if not (strings.HasSuffix $baseURL "/") }} + {{ $baseURL = delimit (slice $baseURL "/") "" }} + {{ end }} + + {{ if not (.Params.showAuthorBottom | default ( .Site.Params.article.showAuthorBottom | default false)) }} + + {{ if .Params.showAuthor | default (.Site.Params.article.showAuthor | default true) }} + {{ $showAuthor = 1 }} + {{ partial "author.html" . }} + {{ end }} + + {{ range $author := .Page.Params.authors }} + {{ $authorData := index $authorsData $author }} + {{- if $authorData -}} + {{ range $taxonomyname, $taxonomy := $taxonomies }} + {{ if (eq $taxonomyname $author) }} + {{ $taxonomyLink = delimit (slice $baseURL "authors/" $author "/") "" }} + {{ end }} + {{ end }} + {{ partial "author-extra.html" (dict "context" . "data" $authorData "link" $taxonomyLink) }} + {{- end -}} + {{ end }} + + {{ if or $taxonomyLink $showAuthor }} +
+ {{ end }} + + {{ end }} +
+
+ + + + + {{ .Content }} + + {{ if (.Params.showAuthorBottom | default ( .Site.Params.article.showAuthorBottom | default false)) }} + + {{ if .Params.showAuthor | default (.Site.Params.article.showAuthor | default true) }} + {{ $showAuthor = 1 }} + {{ partial "author.html" . }} + {{ end }} + + {{ range $author := .Page.Params.authors }} + {{ $authorData := index $authorsData $author }} + {{- if $authorData -}} + {{ range $taxonomyname, $taxonomy := $taxonomies }} + {{ if (eq $taxonomyname $author) }} + {{ $taxonomyLink = delimit (slice $baseURL "authors/" $author "/") "" }} + {{ end }} + {{ end }} + {{ partial "author-extra.html" (dict "context" . "data" $authorData "link" $taxonomyLink) }} + {{- end -}} + {{ end }} + + {{ if or $taxonomyLink $showAuthor }} +
+ {{ end }} + + {{ end }} + + + {{ partial "series/series.html" . }} +
+
+ {{ partial "sharing-links.html" . }} +
+
+{{ end }} \ No newline at end of file diff --git a/page/layouts/_default/simple.html b/page/layouts/_default/simple.html new file mode 100644 index 0000000..cda1c45 --- /dev/null +++ b/page/layouts/_default/simple.html @@ -0,0 +1,19 @@ +{{ define "main" }} +
+
+ {{ if .Params.showBreadcrumbs | default (.Site.Params.article.showBreadcrumbs | default false) }} + {{ partial "breadcrumbs.html" . }} + {{ end }} +

+ {{ .Title | emojify }} +

+
+
+ {{ .Content }} + {{ partial "series/series.html" . }} +
+
+ {{ partial "sharing-links.html" . }} +
+
+{{ end }} \ No newline at end of file diff --git a/page/layouts/shortcodes/badge.html b/page/layouts/shortcodes/badge.html new file mode 100644 index 0000000..f79a830 --- /dev/null +++ b/page/layouts/shortcodes/badge.html @@ -0,0 +1,7 @@ +{{if .Get "link"}} + +{{end}} +{{.Get +{{if .Get "link"}} + +{{end}} \ No newline at end of file