diff --git a/404.html b/404.html index fdee95fbd..e5331b51b 100644 --- a/404.html +++ b/404.html @@ -5,16 +5,18 @@ 404 | 学习 wgpu - + + - - + + - + + -
Skip to content

404

PAGE NOT FOUND

But if you don't change your direction, and if you keep looking, you may end up where you are heading.
- +
Skip to content

404

PAGE NOT FOUND

But if you don't change your direction, and if you keep looking, you may end up where you are heading.
+ \ No newline at end of file diff --git a/GLOSSARY_OF_TERMS.html b/GLOSSARY_OF_TERMS.html index f5804ea52..fac675ea5 100644 --- a/GLOSSARY_OF_TERMS.html +++ b/GLOSSARY_OF_TERMS.html @@ -5,19 +5,21 @@ 术语中英对照表 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

术语中英对照表

译者在翻译时使用了一些尚未统一的中文术语译词,统一罗列如下:

Rust 术语

术语中文翻译
Build构建
crate包,项目(需基于上下文来区分)
feature特性
lifetime生命周期
panic恐慌
Range<_>范围
struct结构体
Vec动态数组
workspace⼯作空间(指在⼀个根包(crate)下包含了多个⼦包(crate)的项目)

WebGPU 术语

术语中文翻译
Buffer缓冲区
BindGroup绑定组
Device逻辑设备(区别于物理设备)
Fragment片元
Frame
Frame Buffer帧缓冲区
Instance Buffer实例缓冲区
wgpu::InstanceGPU 实例(简称为:实例)
PresentMode呈现模式
Queue命令队列
Sample采样
Sampler采样器
Slot插槽
Staging Buffer中继缓冲区
Surface展示平面(区别于 Canvas 画布,服务于帧画面的呈现)
Texture纹理
TextureView纹理视图
Texel纹素
Uniform Buffer统一缓冲区
Vec2, Vec3, Vec4向量
Vertex顶点

其它术语

术语中文翻译
Camera虚拟摄像机(简称为:摄像机,相机)
Controller控制器
Instancing实例化绘制
Mapping映射
Matrice矩阵
Material材质
Mesh网格
Normal法向量(又称为:法线)
Object绘制对象,对象
Quaternion四元数
View视图
VSync垂直同步
- +
Skip to content

术语中英对照表

译者在翻译时使用了一些尚未统一的中文术语译词,统一罗列如下:

Rust 术语

术语中文翻译
Build构建
crate包,项目(需基于上下文来区分)
feature特性
lifetime生命周期
panic恐慌
Range<_>范围
struct结构体
Vec动态数组
workspace⼯作空间(指在⼀个根包(crate)下包含了多个⼦包(crate)的项目)

WebGPU 术语

术语中文翻译
Buffer缓冲区
BindGroup绑定组
Device逻辑设备(区别于物理设备)
Fragment片元
Frame
Frame Buffer帧缓冲区
Instance Buffer实例缓冲区
wgpu::InstanceGPU 实例(简称为:实例)
PresentMode呈现模式
Queue命令队列
Sample采样
Sampler采样器
Slot插槽
Staging Buffer中继缓冲区
Surface展示平面(区别于 Canvas 画布,服务于帧画面的呈现)
Texture纹理
TextureView纹理视图
Texel纹素
Uniform Buffer统一缓冲区
Vec2, Vec3, Vec4向量
Vertex顶点

其它术语

术语中文翻译
Camera虚拟摄像机(简称为:摄像机,相机)
Controller控制器
Instancing实例化绘制
Mapping映射
Matrice矩阵
Material材质
Mesh网格
Normal法向量(又称为:法线)
Object绘制对象,对象
Quaternion四元数
View视图
VSync垂直同步
+ \ No newline at end of file diff --git a/assets/GLOSSARY_OF_TERMS.md.239b8b10.js b/assets/GLOSSARY_OF_TERMS.md.UkTnpW7D.js similarity index 94% rename from assets/GLOSSARY_OF_TERMS.md.239b8b10.js rename to assets/GLOSSARY_OF_TERMS.md.UkTnpW7D.js index e024c95cb..99788ec54 100644 --- a/assets/GLOSSARY_OF_TERMS.md.239b8b10.js +++ b/assets/GLOSSARY_OF_TERMS.md.UkTnpW7D.js @@ -1 +1 @@ -import{_ as t,o as d,c as r,S as e}from"./chunks/framework.adbf3c9e.js";const b=JSON.parse('{"title":"术语中英对照表","description":"","frontmatter":{},"headers":[],"relativePath":"GLOSSARY_OF_TERMS.md","filePath":"GLOSSARY_OF_TERMS.md","lastUpdated":1701933923000}'),a={name:"GLOSSARY_OF_TERMS.md"},o=e('

术语中英对照表

译者在翻译时使用了一些尚未统一的中文术语译词,统一罗列如下:

Rust 术语

术语中文翻译
Build构建
crate包,项目(需基于上下文来区分)
feature特性
lifetime生命周期
panic恐慌
Range<_>范围
struct结构体
Vec动态数组
workspace⼯作空间(指在⼀个根包(crate)下包含了多个⼦包(crate)的项目)

WebGPU 术语

术语中文翻译
Buffer缓冲区
BindGroup绑定组
Device逻辑设备(区别于物理设备)
Fragment片元
Frame
Frame Buffer帧缓冲区
Instance Buffer实例缓冲区
wgpu::InstanceGPU 实例(简称为:实例)
PresentMode呈现模式
Queue命令队列
Sample采样
Sampler采样器
Slot插槽
Staging Buffer中继缓冲区
Surface展示平面(区别于 Canvas 画布,服务于帧画面的呈现)
Texture纹理
TextureView纹理视图
Texel纹素
Uniform Buffer统一缓冲区
Vec2, Vec3, Vec4向量
Vertex顶点

其它术语

术语中文翻译
Camera虚拟摄像机(简称为:摄像机,相机)
Controller控制器
Instancing实例化绘制
Mapping映射
Matrice矩阵
Material材质
Mesh网格
Normal法向量(又称为:法线)
Object绘制对象,对象
Quaternion四元数
View视图
VSync垂直同步
',8),n=[o];function c(h,i,l,s,_,u){return d(),r("div",null,n)}const p=t(a,[["render",c]]);export{b as __pageData,p as default}; +import{_ as t,o as d,c as r,R as e}from"./chunks/framework.bMtwhlie.js";const b=JSON.parse('{"title":"术语中英对照表","description":"","frontmatter":{},"headers":[],"relativePath":"GLOSSARY_OF_TERMS.md","filePath":"GLOSSARY_OF_TERMS.md","lastUpdated":1703303099000}'),a={name:"GLOSSARY_OF_TERMS.md"},o=e('

术语中英对照表

译者在翻译时使用了一些尚未统一的中文术语译词,统一罗列如下:

Rust 术语

术语中文翻译
Build构建
crate包,项目(需基于上下文来区分)
feature特性
lifetime生命周期
panic恐慌
Range<_>范围
struct结构体
Vec动态数组
workspace⼯作空间(指在⼀个根包(crate)下包含了多个⼦包(crate)的项目)

WebGPU 术语

术语中文翻译
Buffer缓冲区
BindGroup绑定组
Device逻辑设备(区别于物理设备)
Fragment片元
Frame
Frame Buffer帧缓冲区
Instance Buffer实例缓冲区
wgpu::InstanceGPU 实例(简称为:实例)
PresentMode呈现模式
Queue命令队列
Sample采样
Sampler采样器
Slot插槽
Staging Buffer中继缓冲区
Surface展示平面(区别于 Canvas 画布,服务于帧画面的呈现)
Texture纹理
TextureView纹理视图
Texel纹素
Uniform Buffer统一缓冲区
Vec2, Vec3, Vec4向量
Vertex顶点

其它术语

术语中文翻译
Camera虚拟摄像机(简称为:摄像机,相机)
Controller控制器
Instancing实例化绘制
Mapping映射
Matrice矩阵
Material材质
Mesh网格
Normal法向量(又称为:法线)
Object绘制对象,对象
Quaternion四元数
View视图
VSync垂直同步
',8),n=[o];function c(h,i,l,s,_,u){return d(),r("div",null,n)}const p=t(a,[["render",c]]);export{b as __pageData,p as default}; diff --git a/assets/GLOSSARY_OF_TERMS.md.239b8b10.lean.js b/assets/GLOSSARY_OF_TERMS.md.UkTnpW7D.lean.js similarity index 56% rename from assets/GLOSSARY_OF_TERMS.md.239b8b10.lean.js rename to assets/GLOSSARY_OF_TERMS.md.UkTnpW7D.lean.js index 0cdca4ab3..e6882c28a 100644 --- a/assets/GLOSSARY_OF_TERMS.md.239b8b10.lean.js +++ b/assets/GLOSSARY_OF_TERMS.md.UkTnpW7D.lean.js @@ -1 +1 @@ -import{_ as t,o as d,c as r,S as e}from"./chunks/framework.adbf3c9e.js";const b=JSON.parse('{"title":"术语中英对照表","description":"","frontmatter":{},"headers":[],"relativePath":"GLOSSARY_OF_TERMS.md","filePath":"GLOSSARY_OF_TERMS.md","lastUpdated":1701933923000}'),a={name:"GLOSSARY_OF_TERMS.md"},o=e("",8),n=[o];function c(h,i,l,s,_,u){return d(),r("div",null,n)}const p=t(a,[["render",c]]);export{b as __pageData,p as default}; +import{_ as t,o as d,c as r,R as e}from"./chunks/framework.bMtwhlie.js";const b=JSON.parse('{"title":"术语中英对照表","description":"","frontmatter":{},"headers":[],"relativePath":"GLOSSARY_OF_TERMS.md","filePath":"GLOSSARY_OF_TERMS.md","lastUpdated":1703303099000}'),a={name:"GLOSSARY_OF_TERMS.md"},o=e("",8),n=[o];function c(h,i,l,s,_,u){return d(),r("div",null,n)}const p=t(a,[["render",c]]);export{b as __pageData,p as default}; diff --git a/assets/GMEM_load.3c3ed86f.jpg b/assets/GMEM_load.KFQaOUnP.jpg similarity index 100% rename from assets/GMEM_load.3c3ed86f.jpg rename to assets/GMEM_load.KFQaOUnP.jpg diff --git a/assets/GMEM_store.6f15c0e3.jpg b/assets/GMEM_store.gbnpZX9D.jpg similarity index 100% rename from assets/GMEM_store.6f15c0e3.jpg rename to assets/GMEM_store.gbnpZX9D.jpg diff --git a/assets/address_mode.a4746540.png b/assets/address_mode.h6IYUOwy.png similarity index 100% rename from assets/address_mode.a4746540.png rename to assets/address_mode.h6IYUOwy.png diff --git a/assets/ambient_diffuse_lighting.14acf617.png b/assets/ambient_diffuse_lighting.Aqd8YxwU.png similarity index 100% rename from assets/ambient_diffuse_lighting.14acf617.png rename to assets/ambient_diffuse_lighting.Aqd8YxwU.png diff --git a/assets/ambient_diffuse_specular_lighting.4ab44d3e.png b/assets/ambient_diffuse_specular_lighting.5nUogza3.png similarity index 100% rename from assets/ambient_diffuse_specular_lighting.4ab44d3e.png rename to assets/ambient_diffuse_specular_lighting.5nUogza3.png diff --git a/assets/ambient_diffuse_wrong.a2837930.png b/assets/ambient_diffuse_wrong.a6ZMIR1H.png similarity index 100% rename from assets/ambient_diffuse_wrong.a2837930.png rename to assets/ambient_diffuse_wrong.a6ZMIR1H.png diff --git a/assets/ambient_lighting.d9ea7212.png b/assets/ambient_lighting.Yz5DfKXU.png similarity index 100% rename from assets/ambient_lighting.d9ea7212.png rename to assets/ambient_lighting.Yz5DfKXU.png diff --git a/assets/app.CxEJfTfm.js b/assets/app.CxEJfTfm.js new file mode 100644 index 000000000..76b81f1b2 --- /dev/null +++ b/assets/app.CxEJfTfm.js @@ -0,0 +1,7 @@ +import{u as _,_ as u,o as n,c as o,k as c,a5 as v,a6 as h,t as p,e as l,R as w,a as x,v as m,a7 as y,a8 as E,a9 as b,aa as $,ab as A,ac as C,ad as P,ae as z,af as S,ag as L,d as W,j as k,z as T,ah as G,ai as R,aj as N,ak as F}from"./chunks/framework.bMtwhlie.js";import{t as I}from"./chunks/theme.fe7zvf8B.js";const U={name:"AutoGithubLink",props:{customCodePath:""},data(){return{codePath:""}},mounted(){const e="https://github.com/jinleili/learn-wgpu-zh/tree/master/code/",{page:t}=_();!this.customCodePath||this.customCodePath==""?this.codePath=e+t.value.relativePath.replace("index.md","").replace(".md",""):this.codePath=e+this.customCodePath}},j={class:"auto-github-link"},O=["href"];function B(e,t,a,i,s,r){return n(),o("div",j,[c("a",{href:s.codePath,target:"_blank",rel:"noopener noreferrer"}," 查看源码! ",8,O)])}const V=u(U,[["render",B]]),D={name:"JoinWeiChatGroup"},M={src:v,style:{width:"312px","margin-top":"24px"}};function H(e,t,a,i,s,r){return n(),o("img",M)}const J=u(D,[["render",H]]);function q(e){return e.replace(/\w\S*/g,function(t){return t.charAt(0).toUpperCase()+t.substr(1).toLowerCase()})}const K={name:"WasmExample",props:{example:"",autoLoad:!1},data(){return{error:"",loading:!1,exampleStarted:!1}},computed:{exampleName(){return q(this.example)}},methods:{async loadExample(){this.loading=!0,this.exampleStarted=!0;try{(await h(()=>import(`./wasm/${this.example}.js`.replace("_","-")),__vite__mapDeps([]))).default().then(t=>{this.loading=!1,this.exampleStarted=!0},t=>{`${t}`.includes("don't mind me. This isn't actually an error!")?(this.exampleStarted=!0,this.loading=!1):this.showErr(t)})}catch(e){this.showErr(e)}},showErr(e){this.error=`An error occurred loading "${this.example}": ${e}`,console.error(e),this.exampleStarted=!1,this.loading=!1}},async mounted(){await this.$nextTick(),this.autoLoad&&await this.loadExample()}},Q={id:"wasm-example"},X={key:0,class:"error"},Y={key:1,class:"loading"},Z=["disabled"];function ee(e,t,a,i,s,r){return n(),o("div",Q,[s.error?(n(),o("div",X,p(s.error),1)):l("",!0),s.loading?(n(),o("div",Y," 正在加载 WASM 模块 ... ")):l("",!0),s.exampleStarted?l("",!0):(n(),o("button",{key:2,onClick:t[0]||(t[0]=g=>r.loadExample()),disabled:s.loading},"点击运行 "+p(r.exampleName),9,Z))])}const te=u(K,[["render",ee]]),ae={name:"WasmFullScreen",props:{wasmName:""},data(){return{timeOutFunctionId:0,missedResizeCount:0,can_resize_canvas:!0}},methods:{canvas_resize_completed(){this.can_resize_canvas=!0},dispatch_resize_event(){this.can_resize_canvas=!1;let e=document.getElementById("simuverse_container");e!=null&&e.dispatchEvent(new Event("canvas_size_need_change"))},window_resized(){let e=document.getElementsByClassName("VPLocalNav");var t=64;e[0]&&(t+=e[0].clientHeight);let a=document.getElementById("simuverse_container");a.style.top=t+"px",clearTimeout(this.timeOutFunctionId),this.can_resize_canvas||this.missedResizeCount>10?(this.missedResizeCount=0,this.timeOutFunctionId=setTimeout(this.dispatch_resize_event,100)):(this.missedResizeCount++,this.timeOutFunctionId=setTimeout(this.window_resized,100))},showAlert(){this.hideLoading();let e=document.getElementById("alert");e!=null&&(e.style.display="block")},hideLoading(){let e=document.getElementById("loading");e!=null&&(e.style.display="none")},async loadSimuverse(){h(()=>import(`https://jinleili.github.io/simuverse/${this.wasmName}.js`),__vite__mapDeps([])).then(e=>{this.hideLoading(),e.default()}).catch(e=>{})}},async mounted(){window.onresize=this.window_resized,window.dispatch_resize_event=this.dispatch_resize_event,window.canvas_resize_completed=this.canvas_resize_completed,"navigator"in window&&"gpu"in navigator?navigator.gpu.requestAdapter().then(e=>{this.loadSimuverse()}).catch(e=>{this.showAlert()}):this.showAlert()},beforeDestroy(){delete window.onresize,delete window.dispatch_resize_event,delete window.canvas_resize_completed}},se={id:"simuverse_container"},ie=w('
WASM 加载中...
',2),ne=[ie];function oe(e,t,a,i,s,r){return n(),o("div",se,ne)}const re=u(ae,[["render",oe]]);function le(e){return e.replace(/\w\S*/g,function(t){return t.charAt(0).toUpperCase()+t.substr(1).toLowerCase()})}const de={name:"WebGPUExample",props:{example:"",autoLoad:!0},data(){return{error:"",loading:!1,exampleStarted:!1,showAlert:!1}},computed:{exampleName(){return le(this.example)}},methods:{detectWebGPUThenLoad(){"navigator"in window&&"gpu"in navigator?navigator.gpu.requestAdapter().then(e=>{this.loadExample()}).catch(e=>{this.showAlert=!0}):this.showAlert=!0},async loadExample(){this.loading=!0,this.exampleStarted=!0;try{(await h(()=>import(`./wasm/${this.example}.js`.replace("_","-")),__vite__mapDeps([]))).default().then(t=>{this.loading=!1,this.exampleStarted=!0},t=>{`${t}`.includes("don't mind me. This isn't actually an error!")?(this.exampleStarted=!0,this.loading=!1):this.showErr(t)})}catch(e){this.showErr(e)}},showErr(e){this.error=`An error occurred loading "${this.example}": ${e}`,console.error(e),this.exampleStarted=!1,this.loading=!1,this.showAlert=!0}},async mounted(){this.autoLoad&&this.detectWebGPUThenLoad()}},ce=["id"],ue={key:0,style:{color:"#353535","margin-top":"20px"}},he=c("div",{style:{"line-height":"40px"}},"此浏览器版本不支持 WebGPU",-1),pe=c("div",{style:{"font-size":"16px",color:"#999999"}},[x("请使用 Chrome/Microsoft Edge 113 及以上版本,或者 Chrome/Edge Canary, FireFox Nightly 并 "),c("span",null,[c("a",{href:"https://jinleili.github.io/learn-wgpu-zh/#如何开启浏览器-webgpu-试验功能",class:"a"},"开启 WebGPU 实验功能")])],-1),me=[he,pe],_e={key:1},fe=["disabled"];function ge(e,t,a,i,s,r){return n(),o("div",{id:a.example},[s.showAlert?(n(),o("div",ue,me)):l("",!0),s.loading?(n(),o("div",_e," 正在加载 WASM 模块 ... ")):l("",!0),s.exampleStarted?l("",!0):(n(),o("button",{key:2,class:"webgpu_example_button",onClick:t[0]||(t[0]=g=>r.detectWebGPUThenLoad()),disabled:s.loading},"点击运行 "+p(r.exampleName),9,fe))],8,ce)}const ve=u(de,[["render",ge]]),we={...I,enhanceApp({app:e}){e.component("AutoGithubLink",V),e.component("JoinWeiChatGroup",J),e.component("WasmExample",te),e.component("WebGPUExample",ve),e.component("WasmFullScreen",re)}};function f(e){if(e.extends){const t=f(e.extends);return{...t,...e,async enhanceApp(a){t.enhanceApp&&await t.enhanceApp(a),e.enhanceApp&&await e.enhanceApp(a)}}}return e}const d=f(we),xe=W({name:"VitePressApp",setup(){const{site:e}=_();return k(()=>{T(()=>{document.documentElement.lang=e.value.lang,document.documentElement.dir=e.value.dir})}),e.value.router.prefetchLinks&&G(),R(),N(),d.setup&&d.setup(),()=>F(d.Layout)}});async function ye(){const e=be(),t=Ee();t.provide(E,e);const a=b(e.route);return t.provide($,a),t.component("Content",A),t.component("ClientOnly",C),Object.defineProperties(t.config.globalProperties,{$frontmatter:{get(){return a.frontmatter.value}},$params:{get(){return a.page.value.params}}}),d.enhanceApp&&await d.enhanceApp({app:t,router:e,siteData:P}),{app:t,router:e,data:a}}function Ee(){return z(xe)}function be(){let e=m,t;return S(a=>{let i=L(a),s=null;return i&&(e&&(t=i),(e||t===i)&&(i=i.replace(/\.js$/,".lean.js")),s=h(()=>import(i),__vite__mapDeps([]))),m&&(e=!1),s},d.NotFound)}m&&ye().then(({app:e,router:t,data:a})=>{t.go().then(()=>{y(t.route,a.site),e.mount("#app")})});export{ye as createApp}; +function __vite__mapDeps(indexes) { + if (!__vite__mapDeps.viteFileDeps) { + __vite__mapDeps.viteFileDeps = [] + } + return indexes.map((i) => __vite__mapDeps.viteFileDeps[i]) +} \ No newline at end of file diff --git a/assets/app.fd1c48b9.js b/assets/app.fd1c48b9.js deleted file mode 100644 index 5f421b24f..000000000 --- a/assets/app.fd1c48b9.js +++ /dev/null @@ -1 +0,0 @@ -import{u as _,_ as u,o as i,c as o,k as c,a0 as p,t as h,e as l,S as v,a as w,A as m,a1 as x,a2 as y,a3 as E,a4 as b,a5 as A,a6 as $,a7 as C,a8 as P,a9 as S,aa as z,d as W,j as L,x as T,ab as k,ac as G,ad as F,ae as N}from"./chunks/framework.adbf3c9e.js";import{t as R}from"./chunks/theme.366df15a.js";const I={name:"AutoGithubLink",props:{customCodePath:""},data(){return{codePath:""}},mounted(){const e="https://github.com/jinleili/learn-wgpu-zh/tree/master/code/",{page:t}=_();!this.customCodePath||this.customCodePath==""?this.codePath=e+t.value.relativePath.replace("index.md","").replace(".md",""):this.codePath=e+this.customCodePath}};const U={class:"auto-github-link"},j=["href"];function O(e,t,a,n,s,r){return i(),o("div",U,[c("a",{href:s.codePath,target:"_blank",rel:"noopener noreferrer"}," 查看源码! ",8,j)])}const B=u(I,[["render",O]]),V="/learn-wgpu-zh/res/wx.jpg",D={name:"JoinWeiChatGroup"},M={src:V,style:{width:"312px","margin-top":"24px"}};function H(e,t,a,n,s,r){return i(),o("img",M)}const J=u(D,[["render",H]]);function q(e){return e.replace(/\w\S*/g,function(t){return t.charAt(0).toUpperCase()+t.substr(1).toLowerCase()})}const K={name:"WasmExample",props:{example:"",autoLoad:!1},data(){return{error:"",loading:!1,exampleStarted:!1}},computed:{exampleName(){return q(this.example)}},methods:{async loadExample(){this.loading=!0,this.exampleStarted=!0;try{(await p(()=>import(`./wasm/${this.example}.js`.replace("_","-")),[])).default().then(t=>{this.loading=!1,this.exampleStarted=!0},t=>{`${t}`.includes("don't mind me. This isn't actually an error!")?(this.exampleStarted=!0,this.loading=!1):this.showErr(t)})}catch(e){this.showErr(e)}},showErr(e){this.error=`An error occurred loading "${this.example}": ${e}`,console.error(e),this.exampleStarted=!1,this.loading=!1}},async mounted(){await this.$nextTick(),this.autoLoad&&await this.loadExample()}},Q={id:"wasm-example"},X={key:0,class:"error"},Y={key:1,class:"loading"},Z=["disabled"];function ee(e,t,a,n,s,r){return i(),o("div",Q,[s.error?(i(),o("div",X,h(s.error),1)):l("",!0),s.loading?(i(),o("div",Y," 正在加载 WASM 模块 ... ")):l("",!0),s.exampleStarted?l("",!0):(i(),o("button",{key:2,onClick:t[0]||(t[0]=g=>r.loadExample()),disabled:s.loading},"点击运行 "+h(r.exampleName),9,Z))])}const te=u(K,[["render",ee]]);const ae={name:"WasmFullScreen",props:{wasmName:""},data(){return{timeOutFunctionId:0,missedResizeCount:0,can_resize_canvas:!0}},methods:{canvas_resize_completed(){this.can_resize_canvas=!0},dispatch_resize_event(){this.can_resize_canvas=!1;let e=document.getElementById("simuverse_container");e!=null&&e.dispatchEvent(new Event("canvas_size_need_change"))},window_resized(){let e=document.getElementsByClassName("VPLocalNav");var t=64;e[0]&&(t+=e[0].clientHeight);let a=document.getElementById("simuverse_container");a.style.top=t+"px",clearTimeout(this.timeOutFunctionId),this.can_resize_canvas||this.missedResizeCount>10?(this.missedResizeCount=0,this.timeOutFunctionId=setTimeout(this.dispatch_resize_event,100)):(this.missedResizeCount++,this.timeOutFunctionId=setTimeout(this.window_resized,100))},showAlert(){this.hideLoading();let e=document.getElementById("alert");e!=null&&(e.style.display="block")},hideLoading(){let e=document.getElementById("loading");e!=null&&(e.style.display="none")},async loadSimuverse(){p(()=>import(`https://jinleili.github.io/simuverse/${this.wasmName}.js`),[]).then(e=>{this.hideLoading(),e.default()}).catch(e=>{})}},async mounted(){window.onresize=this.window_resized,window.dispatch_resize_event=this.dispatch_resize_event,window.canvas_resize_completed=this.canvas_resize_completed,"navigator"in window&&"gpu"in navigator?navigator.gpu.requestAdapter().then(e=>{this.loadSimuverse()}).catch(e=>{this.showAlert()}):this.showAlert()},beforeDestroy(){delete window.onresize,delete window.dispatch_resize_event,delete window.canvas_resize_completed}},se={id:"simuverse_container"},ne=v('
WASM 加载中...
',2),ie=[ne];function oe(e,t,a,n,s,r){return i(),o("div",se,ie)}const re=u(ae,[["render",oe]]);function le(e){return e.replace(/\w\S*/g,function(t){return t.charAt(0).toUpperCase()+t.substr(1).toLowerCase()})}const de={name:"WebGPUExample",props:{example:"",autoLoad:!0},data(){return{error:"",loading:!1,exampleStarted:!1,showAlert:!1}},computed:{exampleName(){return le(this.example)}},methods:{detectWebGPUThenLoad(){"navigator"in window&&"gpu"in navigator?navigator.gpu.requestAdapter().then(e=>{this.loadExample()}).catch(e=>{this.showAlert=!0}):this.showAlert=!0},async loadExample(){this.loading=!0,this.exampleStarted=!0;try{(await p(()=>import(`./wasm/${this.example}.js`.replace("_","-")),[])).default().then(t=>{this.loading=!1,this.exampleStarted=!0},t=>{`${t}`.includes("don't mind me. This isn't actually an error!")?(this.exampleStarted=!0,this.loading=!1):this.showErr(t)})}catch(e){this.showErr(e)}},showErr(e){this.error=`An error occurred loading "${this.example}": ${e}`,console.error(e),this.exampleStarted=!1,this.loading=!1,this.showAlert=!0}},async mounted(){this.autoLoad&&this.detectWebGPUThenLoad()}},ce=["id"],ue={key:0,style:{color:"#353535","margin-top":"20px"}},pe=c("div",{style:{"line-height":"40px"}},"此浏览器版本不支持 WebGPU",-1),he=c("div",{style:{"font-size":"16px",color:"#999999"}},[w("请使用 Chrome/Microsoft Edge 113 及以上版本,或者 Chrome/Edge Canary, FireFox Nightly 并 "),c("span",null,[c("a",{href:"https://jinleili.github.io/learn-wgpu-zh/#如何开启浏览器-webgpu-试验功能",class:"a"},"开启 WebGPU 实验功能")])],-1),me=[pe,he],_e={key:1},fe=["disabled"];function ge(e,t,a,n,s,r){return i(),o("div",{id:a.example},[s.showAlert?(i(),o("div",ue,me)):l("",!0),s.loading?(i(),o("div",_e," 正在加载 WASM 模块 ... ")):l("",!0),s.exampleStarted?l("",!0):(i(),o("button",{key:2,class:"webgpu_example_button",onClick:t[0]||(t[0]=g=>r.detectWebGPUThenLoad()),disabled:s.loading},"点击运行 "+h(r.exampleName),9,fe))],8,ce)}const ve=u(de,[["render",ge]]),we={...R,enhanceApp({app:e}){e.component("AutoGithubLink",B),e.component("JoinWeiChatGroup",J),e.component("WasmExample",te),e.component("WebGPUExample",ve),e.component("WasmFullScreen",re)}};function f(e){if(e.extends){const t=f(e.extends);return{...t,...e,async enhanceApp(a){t.enhanceApp&&await t.enhanceApp(a),e.enhanceApp&&await e.enhanceApp(a)}}}return e}const d=f(we),xe=W({name:"VitePressApp",setup(){const{site:e}=_();return L(()=>{T(()=>{document.documentElement.lang=e.value.lang,document.documentElement.dir=e.value.dir})}),k(),G(),F(),d.setup&&d.setup(),()=>N(d.Layout)}});async function ye(){const e=be(),t=Ee();t.provide(y,e);const a=E(e.route);return t.provide(b,a),t.component("Content",A),t.component("ClientOnly",$),Object.defineProperties(t.config.globalProperties,{$frontmatter:{get(){return a.frontmatter.value}},$params:{get(){return a.page.value.params}}}),d.enhanceApp&&await d.enhanceApp({app:t,router:e,siteData:C}),{app:t,router:e,data:a}}function Ee(){return P(xe)}function be(){let e=m,t;return S(a=>{let n=z(a);return n?(e&&(t=n),(e||t===n)&&(n=n.replace(/\.js$/,".lean.js")),m&&(e=!1),p(()=>import(n),[])):null},d.NotFound)}m&&ye().then(({app:e,router:t,data:a})=>{t.go().then(()=>{x(t.route,a.site),e.mount("#app")})});export{ye as createApp}; diff --git a/assets/beginner_tutorial1-window.md.16634366.js b/assets/beginner_tutorial1-window.md.16634366.js deleted file mode 100644 index 5754f4a71..000000000 --- a/assets/beginner_tutorial1-window.md.16634366.js +++ /dev/null @@ -1,117 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const d=JSON.parse('{"title":"依赖与窗口","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial1-window.md","filePath":"beginner/tutorial1-window.md","lastUpdated":1701933923000}'),t={name:"beginner/tutorial1-window.md"},c=r(`

依赖与窗口

部分读者可能已经熟悉如何在 Rust 中打开窗口程序,且有自己偏好的窗口管理库。但本教程是为所有人设计的,所以不免要涉及这部分的内容。所幸你可以跳过这部分,但有一点值得了解,即无论使用什么样的窗口解决方案,都需要实现 raw-window-handle 里定义的 raw_window_handle()raw_display_handle() 两个抽象接口。如果有兴趣自己动手来为 wgpu 实现一个基础的窗口,可以参考 wgpu-in-app与 Android App 集成这一章节也有详情的介绍。

我们要使用哪些包?

我们将尽量保持基础部分的简单性。后续我们会逐渐添加依赖,先列出相关的 Cargo.toml 依赖项如下:

toml
[dependencies]
-winit = "0.28.7"
-env_logger = "0.10"
-log = "0.4"
-wgpu = "0.17"

使用 Rust 的新版解析器

自 0.10 版本起,wgpu 需要使用 cargo 的 新版特性解析器,这在 Rust 的 2021 edition(即任何基于 Rust 1.56.0 或更新版本的新项目)中是默认启用的。但如果你仍在使用 2018 edition,那么就需要在单项目 Cargo.toml[package] 配置中,或者在⼯作空间的根级 Cargo.toml[workspace] 配置中添加 resolver = "2" 项。

关于 env_logger

通过 env_logger::init() 来启用日志是非常重要的。当 wgpu 遇到各类错误时,它都会用一条通用性的消息抛出 panic,并通过日志来记录实际的错误信息。 也就是说,如果不添加 env_logger::init(),wgpu 将静默地退出,从而令你非常困惑!
(下面的代码中已经启用)

创建一个新项目

运行 cargo new xxx,xxx 是指你的项目名称。
(下面的例子中我使用了 tutorial1_window)

示例代码

这一部分没有什么特别之处,所以直接贴出完整的代码。只需将其粘贴到你的 main.rs 中即可:

rust
use winit::{
-    event::*,
-    event_loop::{ControlFlow, EventLoop},
-    window::WindowBuilder,
-};
-
-pub fn run() {
-    env_logger::init();
-    let event_loop = EventLoop::new();
-    let window = WindowBuilder::new().build(&event_loop).unwrap();
-
-    event_loop.run(move |event, _, control_flow| match event {
-        Event::WindowEvent {
-            ref event,
-            window_id,
-        } if window_id == window.id() => match event {
-            WindowEvent::CloseRequested
-            | WindowEvent::KeyboardInput {
-                input:
-                    KeyboardInput {
-                        state: ElementState::Pressed,
-                        virtual_keycode: Some(VirtualKeyCode::Escape),
-                        ..
-                    },
-                ..
-            } => *control_flow = ControlFlow::Exit,
-            _ => {}
-        },
-        _ => {}
-    });
-}

上述代码所做的全部工作就是创建了一个窗口,并在用户关闭或按下 escape 键前使其保持打开。接下来,我们需要在入口函数中运行这些代码。很简单,只需在 main() 函数中调用 run(),然后运行!

rust
fn main() {
-    run();
-}

(其中 tutorial1_window 是你之前用 cargo 创建的项目的名称)

当你只打算支持桌面环境时,上边这些就是全部所要做的!在下一个教程中,我们将真正开始使用 wgpu!

添加对 web 的支持

如果讲完了这个关于 WebGPU 的教程,却不提如何在 web 上使用它,那么这个教程就是不完整的。幸运的是,让一个 wgpu 程序在浏览器中运行并不难。

让我们从修改 Cargo.toml 开始:

toml
[lib]
-crate-type = ["cdylib", "rlib"]

这几行告诉 cargo 允许项目构建(build)一个本地的 Rust 静态库(rlib)和一个 C/C++ 兼容库(cdylib)。 我们需要 rlib 来在桌面环境中运行 wgpu,需要 cdylib 来构建在浏览器中运行的 Web Assembly。

仅在需要将项目做为其他 Rust 项目的(crate)提供时,[lib] 项的配置才是必须的。所以我们的示例程序可以省略上面这一步。

添加上述 [lib] 内容依赖于像原作者那样将主要代码写入一个 lib.rs 文件,而如果想要通过下文的 wasm-pack 方法构建,则需要进行上述步骤。

Web Assembly

Web Assembly 即 WASM,是大多数现代浏览器支持的二进制格式,它令 Rust 等底层语言能在网页上运行。这允许我们用 Rust 编写应用程序,并使用几行 Javascript 来加载它到 Web 浏览器中运行。

现在,我们仅需添加一些专门用于在 WASM 中运行的依赖项:

toml
[dependencies]
-cfg-if = "1"
-# 其他常规依赖...
-
-[target.'cfg(target_arch = "wasm32")'.dependencies]
-console_error_panic_hook = "0.1.7"
-console_log = "1.0"
-wasm-bindgen = "0.2.87"
-wasm-bindgen-futures = "0.4.34"
-web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-]}

cfg-if提供了一个宏,使得更加容易管理特定平台的代码。

[target.'cfg(target_arch = "wasm32")'.dependencies] 行告诉 cargo,如果我们的目标是 wasm32 架构,则只包括这些依赖项。接下来的几个依赖项只是让我们与 javascript 的交互更容易。

更多示例代码

首先, 我们需要在 main.rs 内引入 wasm-bindgen :

rust
#[cfg(target_arch="wasm32")]
-use wasm_bindgen::prelude::*;

接下来,需要告诉 wasm-bindgen 在 WASM 被加载后执行我们的 run() 函数。

rust
#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
-pub async fn run() {
-    // 省略的代码...
-}

然后需要根据是否在 WASM 环境来切换我们正在使用的日志。在 run() 函数内添加以下代码替换 env_logger::init() 行。

rust
cfg_if::cfg_if! {
-    if #[cfg(target_arch = "wasm32")] {
-        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
-        console_log::init_with_level(log::Level::Warn).expect("无法初始化日志库");
-    } else {
-        env_logger::init();
-    }
-}

上边的代码判断了构建目标,在 web 构建中设置 console_logconsole_error_panic_hook。这很重要,因为 env_logger 目前不支持 Web Assembly。

另一种实现

在第 3~8 章,run() 函数及遍历 event_loop 的代码被统一封装到了 framework.rs 中, 还定义了 Action trait 来抽象每一章中不同的 State 。 然后通过调用 wasm_bindgen_futures 包的 spawn_local 函数来创建 State 实例并处理 JS 异常。

第 1 ~ 2 章的代码通过 cargo run-wasm --bin xxx 运行时,在浏览器的控制台中会看到的 ...Using exceptions for control flow, don't mind me. This isn't actually an error! 错误现在被消除了:

rust
#[cfg(target_arch = "wasm32")]
-pub fn run<A: Action + 'static>() {
-    // ...
-    wasm_bindgen_futures::spawn_local(async move {
-        let (event_loop, instance) = create_action_instance::<A>().await;
-        let run_closure = Closure::once_into_js(move || start_event_loop::<A>(event_loop, instance));
-
-        // 处理运行过程中抛出的 JS 异常。
-        // 否则 wasm_bindgen_futures 队列将中断,且不再处理任何任务。
-        if let Err(error) = call_catch(&run_closure) {
-            // ...
-        }
-    }
-}

接下来,在创建了事件循环与窗口之后,我们需要在应用程序所在的 HTML 网页中添加一个画布(canvas):

rust
#[cfg(target_arch = "wasm32")]
-{
-    // Winit 不允许用 CSS 调整大小,所以在 web 环境里我们必须手动设置大小。
-    use winit::dpi::PhysicalSize;
-    window.set_inner_size(PhysicalSize::new(450, 400));
-
-    use winit::platform::web::WindowExtWebSys;
-    web_sys::window()
-        .and_then(|win| win.document())
-        .and_then(|doc| {
-            let dst = doc.get_element_by_id("wasm-example")?;
-            let canvas = web_sys::Element::from(window.canvas());
-            dst.append_child(&canvas).ok()?;
-            Some(())
-        })
-        .expect("无法将画布添加到网页上");
-}

"wasm-example" 这个 ID 是针对我的项目(也就是本教程)的。你可以你在 HTML 中使用任何 ID 来代替,或者,你也可以直接将画布添加到 <body> 中,就像 wgpu 源码仓库中所做的那样,这部分最终由你决定。

上边这些就是我们现在需要的所有 web 专用代码。接下来要做的就是构建 Web Assembly 本身。

译者注:以下关于 wasm-pack 的内容来自原文。但是由于它和 WebGPU 接口都尚未稳定,译者暂时不推荐用它构建此教程中的项目。参考本教程和原作者的仓库,这里给出一个使用 cargo build 的简易构建过程,如有疏漏请 PR 指正。

  1. 如果要支持 WebGL,那么在 Cargo.toml 中加入以下描述来启用 cargo 的 --features 参数,参考 wgpu 的运行指南
toml
[features]
-default = []
-webgl = ["wgpu/webgl"]
  1. 运行 cargo build --target wasm32-unknown-unknown --features webgl
  2. 安装 wasm-bindgen 并运行:
shell
cargo install -f wasm-bindgen-cli --version 0.2.84
-wasm-bindgen --no-typescript --out-dir {你的输出目录,例如 ./tutorial1_window_output} --web {wasm 所在的目录,例如 .\\target\\wasm32-unknown-unknown\\release\\tutorial1_window.wasm}
  1. 此时会得到一个包含 .wasm 和 .js 文件的文件夹。可以用下文的 html 引入该 .js 文件。如果直接在浏览器打开该 html 文件,可能遇到 CORS 问题;如果正常运行,则可能出现一个警告 Using exceptions for control flow, don't mind me. This isn't actually an error!,忽略即可。

Wasm Pack

你可以只用 wasm-bindgen 来构建一个 wgpu 应用程序,但我在这样做的时候遇到了一些问题。首先,你需要在电脑上安装 wasm-bindgen,并将其作为一个依赖项。作为依赖关系的版本需要与你安装的版本完全一致,否则构建将会失败。

为了克服这个缺点,并使阅读这篇教程的人更容易上手,我选择在组合中加入 wasm-pack。wasm-pack 可以为你安装正确的 wasm-bindgen 版本,而且它还支持为不同类型的 web 目标进行构建:浏览器、NodeJS 和 webpack 等打包工具。

使用 wasm-pack 前,你需要先安装

完成安装后,就可以用它来构建我们的项目了。当你的项目是一个独立的(crate)时,可以直接使用 wasm-pack build。如果是工作区(workspace),就必须指定你要构建的包。想象一下是一个名为 game 的目录,你就会使用:

bash
wasm-pack build game

译者注wasm-pack build 需要如之前所说的那样加入 [lib] 等来构建静态库。

一旦 wasm-pack 完成构建,在你的目录下就会有一个 pkg 目录,运行 WASM 代码所需的所有 javascript 代码都在这里。然后在 javascript 中导入 WASM 模块:

js
const init = await import("./pkg/game.js");
-init().then(() => console.log("WASM Loaded"));

这个网站使用了 VitePress,并且是在 Vue 组件中加载 WASM。如果想看看我是怎么做的,可以查看这里

如果打算在一个普通的 HTML 网站中使用你的 WASM 模块,只需告诉 wasm-pack 以 web 为构建目标:

bash
wasm-pack build --target web

然后就可以在一个 ES6 模块中运行 WASM 代码:

html
<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="UTF-8" />
-    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
-    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <title>Pong with WASM</title>
-  </head>
-
-  <body id="wasm-example">
-    <script type="module">
-      import init from "./pkg/pong.js";
-      init().then(() => {
-        console.log("WASM Loaded");
-      });
-    </script>
-    <style>
-      canvas {
-        background-color: black;
-      }
-    </style>
-  </body>
-</html>

点击下面的按钮查看示例代码运行!

`,56);function D(F,y,i,C,A,b){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[c,n(a,{example:"tutorial1_window"}),n(l)])}const m=p(t,[["render",D]]);export{d as __pageData,m as default}; diff --git a/assets/beginner_tutorial1-window.md.16634366.lean.js b/assets/beginner_tutorial1-window.md.16634366.lean.js deleted file mode 100644 index 64ff4d09c..000000000 --- a/assets/beginner_tutorial1-window.md.16634366.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const d=JSON.parse('{"title":"依赖与窗口","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial1-window.md","filePath":"beginner/tutorial1-window.md","lastUpdated":1701933923000}'),t={name:"beginner/tutorial1-window.md"},c=r("",56);function D(F,y,i,C,A,b){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[c,n(a,{example:"tutorial1_window"}),n(l)])}const m=p(t,[["render",D]]);export{d as __pageData,m as default}; diff --git a/assets/beginner_tutorial1-window.md.Phs9gXri.js b/assets/beginner_tutorial1-window.md.Phs9gXri.js new file mode 100644 index 000000000..970d7da45 --- /dev/null +++ b/assets/beginner_tutorial1-window.md.Phs9gXri.js @@ -0,0 +1,117 @@ +import{_ as l,D as s,o as p,c as e,I as i,R as h}from"./chunks/framework.bMtwhlie.js";const b=JSON.parse('{"title":"依赖与窗口","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial1-window.md","filePath":"beginner/tutorial1-window.md","lastUpdated":1703303099000}'),t={name:"beginner/tutorial1-window.md"},k=h(`

依赖与窗口

部分读者可能已经熟悉如何在 Rust 中打开窗口程序,且有自己偏好的窗口管理库。但本教程是为所有人设计的,所以不免要涉及这部分的内容。所幸你可以跳过这部分,但有一点值得了解,即无论使用什么样的窗口解决方案,都需要实现 raw-window-handle 里定义的 raw_window_handle()raw_display_handle() 两个抽象接口。如果有兴趣自己动手来为 wgpu 实现一个基础的窗口,可以参考 wgpu-in-app与 Android App 集成这一章节也有详情的介绍。

我们要使用哪些包?

我们将尽量保持基础部分的简单性。后续我们会逐渐添加依赖,先列出相关的 Cargo.toml 依赖项如下:

toml
[dependencies]
+winit = "0.28.7"
+env_logger = "0.10"
+log = "0.4"
+wgpu = "0.17"

使用 Rust 的新版解析器

自 0.10 版本起,wgpu 需要使用 cargo 的 新版特性解析器,这在 Rust 的 2021 edition(即任何基于 Rust 1.56.0 或更新版本的新项目)中是默认启用的。但如果你仍在使用 2018 edition,那么就需要在单项目 Cargo.toml[package] 配置中,或者在⼯作空间的根级 Cargo.toml[workspace] 配置中添加 resolver = "2" 项。

关于 env_logger

通过 env_logger::init() 来启用日志是非常重要的。当 wgpu 遇到各类错误时,它都会用一条通用性的消息抛出 panic,并通过日志来记录实际的错误信息。 也就是说,如果不添加 env_logger::init(),wgpu 将静默地退出,从而令你非常困惑!
(下面的代码中已经启用)

创建一个新项目

运行 cargo new xxx,xxx 是指你的项目名称。
(下面的例子中我使用了 tutorial1_window)

示例代码

这一部分没有什么特别之处,所以直接贴出完整的代码。只需将其粘贴到你的 main.rs 中即可:

rust
use winit::{
+    event::*,
+    event_loop::{ControlFlow, EventLoop},
+    window::WindowBuilder,
+};
+
+pub fn run() {
+    env_logger::init();
+    let event_loop = EventLoop::new();
+    let window = WindowBuilder::new().build(&event_loop).unwrap();
+
+    event_loop.run(move |event, _, control_flow| match event {
+        Event::WindowEvent {
+            ref event,
+            window_id,
+        } if window_id == window.id() => match event {
+            WindowEvent::CloseRequested
+            | WindowEvent::KeyboardInput {
+                input:
+                    KeyboardInput {
+                        state: ElementState::Pressed,
+                        virtual_keycode: Some(VirtualKeyCode::Escape),
+                        ..
+                    },
+                ..
+            } => *control_flow = ControlFlow::Exit,
+            _ => {}
+        },
+        _ => {}
+    });
+}

上述代码所做的全部工作就是创建了一个窗口,并在用户关闭或按下 escape 键前使其保持打开。接下来,我们需要在入口函数中运行这些代码。很简单,只需在 main() 函数中调用 run(),然后运行!

rust
fn main() {
+    run();
+}

(其中 tutorial1_window 是你之前用 cargo 创建的项目的名称)

当你只打算支持桌面环境时,上边这些就是全部所要做的!在下一个教程中,我们将真正开始使用 wgpu!

添加对 web 的支持

如果讲完了这个关于 WebGPU 的教程,却不提如何在 web 上使用它,那么这个教程就是不完整的。幸运的是,让一个 wgpu 程序在浏览器中运行并不难。

让我们从修改 Cargo.toml 开始:

toml
[lib]
+crate-type = ["cdylib", "rlib"]

这几行告诉 cargo 允许项目构建(build)一个本地的 Rust 静态库(rlib)和一个 C/C++ 兼容库(cdylib)。 我们需要 rlib 来在桌面环境中运行 wgpu,需要 cdylib 来构建在浏览器中运行的 Web Assembly。

仅在需要将项目做为其他 Rust 项目的(crate)提供时,[lib] 项的配置才是必须的。所以我们的示例程序可以省略上面这一步。

添加上述 [lib] 内容依赖于像原作者那样将主要代码写入一个 lib.rs 文件,而如果想要通过下文的 wasm-pack 方法构建,则需要进行上述步骤。

Web Assembly

Web Assembly 即 WASM,是大多数现代浏览器支持的二进制格式,它令 Rust 等底层语言能在网页上运行。这允许我们用 Rust 编写应用程序,并使用几行 Javascript 来加载它到 Web 浏览器中运行。

现在,我们仅需添加一些专门用于在 WASM 中运行的依赖项:

toml
[dependencies]
+cfg-if = "1"
+# 其他常规依赖...
+
+[target.'cfg(target_arch = "wasm32")'.dependencies]
+console_error_panic_hook = "0.1.7"
+console_log = "1.0"
+wasm-bindgen = "0.2.87"
+wasm-bindgen-futures = "0.4.34"
+web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+]}

cfg-if提供了一个宏,使得更加容易管理特定平台的代码。

[target.'cfg(target_arch = "wasm32")'.dependencies] 行告诉 cargo,如果我们的目标是 wasm32 架构,则只包括这些依赖项。接下来的几个依赖项只是让我们与 javascript 的交互更容易。

更多示例代码

首先, 我们需要在 main.rs 内引入 wasm-bindgen :

rust
#[cfg(target_arch="wasm32")]
+use wasm_bindgen::prelude::*;

接下来,需要告诉 wasm-bindgen 在 WASM 被加载后执行我们的 run() 函数。

rust
#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
+pub async fn run() {
+    // 省略的代码...
+}

然后需要根据是否在 WASM 环境来切换我们正在使用的日志。在 run() 函数内添加以下代码替换 env_logger::init() 行。

rust
cfg_if::cfg_if! {
+    if #[cfg(target_arch = "wasm32")] {
+        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
+        console_log::init_with_level(log::Level::Warn).expect("无法初始化日志库");
+    } else {
+        env_logger::init();
+    }
+}

上边的代码判断了构建目标,在 web 构建中设置 console_logconsole_error_panic_hook。这很重要,因为 env_logger 目前不支持 Web Assembly。

另一种实现

在第 3~8 章,run() 函数及遍历 event_loop 的代码被统一封装到了 framework.rs 中, 还定义了 Action trait 来抽象每一章中不同的 State 。 然后通过调用 wasm_bindgen_futures 包的 spawn_local 函数来创建 State 实例并处理 JS 异常。

第 1 ~ 2 章的代码通过 cargo run-wasm --bin xxx 运行时,在浏览器的控制台中会看到的 ...Using exceptions for control flow, don't mind me. This isn't actually an error! 错误现在被消除了:

rust
#[cfg(target_arch = "wasm32")]
+pub fn run<A: Action + 'static>() {
+    // ...
+    wasm_bindgen_futures::spawn_local(async move {
+        let (event_loop, instance) = create_action_instance::<A>().await;
+        let run_closure = Closure::once_into_js(move || start_event_loop::<A>(event_loop, instance));
+
+        // 处理运行过程中抛出的 JS 异常。
+        // 否则 wasm_bindgen_futures 队列将中断,且不再处理任何任务。
+        if let Err(error) = call_catch(&run_closure) {
+            // ...
+        }
+    }
+}

接下来,在创建了事件循环与窗口之后,我们需要在应用程序所在的 HTML 网页中添加一个画布(canvas):

rust
#[cfg(target_arch = "wasm32")]
+{
+    // Winit 不允许用 CSS 调整大小,所以在 web 环境里我们必须手动设置大小。
+    use winit::dpi::PhysicalSize;
+    window.set_inner_size(PhysicalSize::new(450, 400));
+
+    use winit::platform::web::WindowExtWebSys;
+    web_sys::window()
+        .and_then(|win| win.document())
+        .and_then(|doc| {
+            let dst = doc.get_element_by_id("wasm-example")?;
+            let canvas = web_sys::Element::from(window.canvas());
+            dst.append_child(&canvas).ok()?;
+            Some(())
+        })
+        .expect("无法将画布添加到网页上");
+}

"wasm-example" 这个 ID 是针对我的项目(也就是本教程)的。你可以你在 HTML 中使用任何 ID 来代替,或者,你也可以直接将画布添加到 <body> 中,就像 wgpu 源码仓库中所做的那样,这部分最终由你决定。

上边这些就是我们现在需要的所有 web 专用代码。接下来要做的就是构建 Web Assembly 本身。

译者注:以下关于 wasm-pack 的内容来自原文。但是由于它和 WebGPU 接口都尚未稳定,译者暂时不推荐用它构建此教程中的项目。参考本教程和原作者的仓库,这里给出一个使用 cargo build 的简易构建过程,如有疏漏请 PR 指正。

  1. 如果要支持 WebGL,那么在 Cargo.toml 中加入以下描述来启用 cargo 的 --features 参数,参考 wgpu 的运行指南
toml
[features]
+default = []
+webgl = ["wgpu/webgl"]
  1. 运行 cargo build --target wasm32-unknown-unknown --features webgl
  2. 安装 wasm-bindgen 并运行:
shell
cargo install -f wasm-bindgen-cli --version 0.2.84
+wasm-bindgen --no-typescript --out-dir {你的输出目录,例如 ./tutorial1_window_output} --web {wasm 所在的目录,例如 .\\target\\wasm32-unknown-unknown\\release\\tutorial1_window.wasm}
  1. 此时会得到一个包含 .wasm 和 .js 文件的文件夹。可以用下文的 html 引入该 .js 文件。如果直接在浏览器打开该 html 文件,可能遇到 CORS 问题;如果正常运行,则可能出现一个警告 Using exceptions for control flow, don't mind me. This isn't actually an error!,忽略即可。

Wasm Pack

你可以只用 wasm-bindgen 来构建一个 wgpu 应用程序,但我在这样做的时候遇到了一些问题。首先,你需要在电脑上安装 wasm-bindgen,并将其作为一个依赖项。作为依赖关系的版本需要与你安装的版本完全一致,否则构建将会失败。

为了克服这个缺点,并使阅读这篇教程的人更容易上手,我选择在组合中加入 wasm-pack。wasm-pack 可以为你安装正确的 wasm-bindgen 版本,而且它还支持为不同类型的 web 目标进行构建:浏览器、NodeJS 和 webpack 等打包工具。

使用 wasm-pack 前,你需要先安装

完成安装后,就可以用它来构建我们的项目了。当你的项目是一个独立的(crate)时,可以直接使用 wasm-pack build。如果是工作区(workspace),就必须指定你要构建的包。想象一下是一个名为 game 的目录,你就会使用:

bash
wasm-pack build game

译者注wasm-pack build 需要如之前所说的那样加入 [lib] 等来构建静态库。

一旦 wasm-pack 完成构建,在你的目录下就会有一个 pkg 目录,运行 WASM 代码所需的所有 javascript 代码都在这里。然后在 javascript 中导入 WASM 模块:

js
const init = await import("./pkg/game.js");
+init().then(() => console.log("WASM Loaded"));

这个网站使用了 VitePress,并且是在 Vue 组件中加载 WASM。如果想看看我是怎么做的,可以查看这里

如果打算在一个普通的 HTML 网站中使用你的 WASM 模块,只需告诉 wasm-pack 以 web 为构建目标:

bash
wasm-pack build --target web

然后就可以在一个 ES6 模块中运行 WASM 代码:

html
<!DOCTYPE html>
+<html lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>Pong with WASM</title>
+  </head>
+
+  <body id="wasm-example">
+    <script type="module">
+      import init from "./pkg/pong.js";
+      init().then(() => {
+        console.log("WASM Loaded");
+      });
+    </script>
+    <style>
+      canvas {
+        background-color: black;
+      }
+    </style>
+  </body>
+</html>

点击下面的按钮查看示例代码运行!

`,56);function r(d,E,g,o,c,y){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),e("div",null,[k,i(a,{example:"tutorial1_window"}),i(n)])}const u=l(t,[["render",r]]);export{b as __pageData,u as default}; diff --git a/assets/beginner_tutorial1-window.md.Phs9gXri.lean.js b/assets/beginner_tutorial1-window.md.Phs9gXri.lean.js new file mode 100644 index 000000000..d3824905a --- /dev/null +++ b/assets/beginner_tutorial1-window.md.Phs9gXri.lean.js @@ -0,0 +1 @@ +import{_ as l,D as s,o as p,c as e,I as i,R as h}from"./chunks/framework.bMtwhlie.js";const b=JSON.parse('{"title":"依赖与窗口","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial1-window.md","filePath":"beginner/tutorial1-window.md","lastUpdated":1703303099000}'),t={name:"beginner/tutorial1-window.md"},k=h("",56);function r(d,E,g,o,c,y){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),e("div",null,[k,i(a,{example:"tutorial1_window"}),i(n)])}const u=l(t,[["render",r]]);export{b as __pageData,u as default}; diff --git a/assets/beginner_tutorial2-surface_index.md.74c14ab6.js b/assets/beginner_tutorial2-surface_index.md.74c14ab6.js deleted file mode 100644 index 2f13601e7..000000000 --- a/assets/beginner_tutorial2-surface_index.md.74c14ab6.js +++ /dev/null @@ -1,253 +0,0 @@ -import{_ as e,E as s,o as r,c,J as n,S as t,k as a,a as D}from"./chunks/framework.adbf3c9e.js";const F="/learn-wgpu-zh/assets/cleared-window.f143363a.png",y="/learn-wgpu-zh/assets/no-clear.304e22f0.png",w=JSON.parse('{"title":"展示平面 (Surface)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial2-surface/index.md","filePath":"beginner/tutorial2-surface/index.md","lastUpdated":1701933923000}'),C={name:"beginner/tutorial2-surface/index.md"},i=t(`

展示平面 (Surface)

封装 State

为方便起见,我们将所有字段封装在一个结构体内,并在其上添加一些函数:

rust
// lib.rs
-use winit::window::Window;
-
-struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-}
-
-impl State {
-    // 创建某些 wgpu 类型需要使用异步代码
-    async fn new(window: &Window) -> Self {
-        todo!()
-    }
-
-    fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-        todo!()
-    }
-
-    fn input(&mut self, event: &WindowEvent) -> bool {
-        todo!()
-    }
-
-    fn update(&mut self) {
-        todo!()
-    }
-
-    fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
-        todo!()
-    }
-}

此处省略了 State 的字段概述,在后续章节中解释这些函数背后的代码时,它们才会变得更有意义。

surfacedevicequeueconfig 等对象是每个 wgpu 程序都需要的,且它们的创建过程涉及到很多模板代码,所以,从第 3 章开始,我将它们统一封装到了 AppSurface 对象中。

State 中的这些函数在所有章节示例中都有用到,所以,在第 3 ~ 8 章,我将其抽象为了 Action trait:

rust
pub trait Action {
-    fn new(app: app_surface::AppSurface) -> Self;
-    fn get_adapter_info(&self) -> wgpu::AdapterInfo;
-    fn current_window_id(&self) -> WindowId;
-    fn resize(&mut self);
-    fn request_redraw(&mut self);
-    fn input(&mut self, _event: &WindowEvent) -> bool {
-        false
-    }
-    fn update(&mut self) {}
-    fn render(&mut self) -> Result<(), wgpu::SurfaceError>;
-}

实例化 State

这段代码很简单,但还是值得好好讲讲:

rust
impl State {
-    // ...
-    async fn new(window: &Window) -> Self {
-        let size = window.inner_size();
-
-        // instance 变量是 GPU 实例
-        // Backends::all 对应 Vulkan、Metal、DX12、WebGL 等所有后端图形驱动
-        let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
-            backends: wgpu::Backends::all(),
-            ..Default::default()
-        });
-        let surface = unsafe { instance.create_surface(window).unwrap() };
-        let adapter = instance
-            .request_adapter(&wgpu::RequestAdapterOptions {
-                compatible_surface: Some(&surface),
-                ..Default::default()
-            }).await.unwrap();

GPU 实例与适配器

GPU 实例(Instance)是使用 wgpu 时所需创建的第一个对象,其主要用途是创建适配器(Adapter)和展示平面(Surface)。

适配器(Adapter)是指向 WebGPU API 实现的实例,一个系统上往往存在多个 WebGPU API 实现实例。也就是说,适配器是固定在特定图形后端的。假如你使用的是 Windows 且有 2 个显卡(集成显卡 + 独立显卡),则至少有 4 个适配器可供使用,分别有 2 个固定在 Vulkan 和 DirectX 后端。我们可以用它获取关联显卡的信息,例如显卡名称与其所适配到的后端图形驱动等。稍后我们会用它来创建逻辑设备命令队列。现在先讨论一下 RequestAdapterOptions 所涉及的字段。

此处传递给 request_adapter 的参数不能保证对所有设备都有效,但是应该对大多数设备都有效。当 wgpu 找不到符合要求的适配器,request_adapter 将返回 None。如果你想获取某个特定图形后端的所有适配器,可以使用 enumerate_adapters 函数,它会返回一个迭代器,你可以遍历检查其中是否有满足需求的适配器。

rust
let adapter = instance
-    .enumerate_adapters(wgpu::Backends::all())
-    .filter(|adapter| {
-        // 检查该适配器是否兼容我们的展示平面
-        adapter.is_surface_supported(&surface)
-    })
-    .next()
-    .unwrap();

更多可用于优化适配器搜索的函数,请查看文档

展示平面

展示平面(Surface)是我们绘制到窗口的部分,需要它来将绘制结果展示(或者说,呈现)到屏幕上。窗口程序需要实现 raw-window-handle HasRawWindowHandle trait 来创建展示平面。所幸 winit 的 Window 符合这个要求。我们还需要展示平面来请求适配器

逻辑设备与命令队列

让我们使用适配器来创建逻辑设备 (Device) 和命令队列 (Queue)。

rust
let (device, queue) = adapter.request_device(
-    &wgpu::DeviceDescriptor {
-        features: wgpu::Features::empty(),
-        // WebGL 后端并不支持 wgpu 的所有功能,
-        // 所以如果要以 web 为构建目标,就必须禁用一些功能。
-        limits: if cfg!(target_arch = "wasm32") {
-            wgpu::Limits::downlevel_webgl2_defaults()
-        } else {
-            wgpu::Limits::default()
-        },
-        label: None,
-    },
-    None, // 追踪 API 调用路径
-).await.unwrap();

DeviceDescriptor上的 features 字段允许我们指定想要的扩展功能。对于这个简单的例子,我决定不使用任何额外的功能。

显卡会限制可用的扩展功能,所以如果想使用某些功能,你可能需要限制支持的设备或提供变通函数。

可以使用 adapter.features()device.features() 获取设备支持的扩展功能列表。

如果有需要,请查看完整的扩展功能列表

limits 字段描述了创建某些类型的资源的限制。我们在本教程中使用默认值,所以可以支持大多数设备。你可以在这里查看限制列表。

rust
let caps = surface.get_capabilities(&adapter);
-let config = wgpu::SurfaceConfiguration {
-    usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
-    format: caps.formats[0],
-    width: size.width,
-    height: size.height,
-    present_mode: wgpu::PresentMode::Fifo,
-    alpha_mode: caps.alpha_modes[0],
-    view_formats: vec![],
-};
-surface.configure(&device, &config);

这里我们为展示平面定义了一个配置。它将定义展示平面如何创建其底层的 SurfaceTexture。讲 render 函数时我们再具体讨论 SurfaceTexture,现在先谈谈此配置的字段。

usage 字段描述了 SurfaceTexture 如何被使用。RENDER_ATTACHMENT 指定将被用来渲染到屏幕的纹理(我们将在后面讨论更多的 TextureUsages 枚举值)。

format 定义了 SurfaceTexture 在 GPU 内存上如何被存储。不同的显示设备偏好不同的纹理格式。我们使用surface.get_capabilities(&adapter).formats 来获取当前显示设备的最佳格式。

widthheight 指定 SurfaceTexture 的宽度和高度(物理像素,等于逻辑像素乘以屏幕缩放因子)。这通常就是窗口的宽和高。

需要确保 SurfaceTexture 的宽高不能为 0,这会导致你的应用程序崩溃。

present_mode 指定的 wgpu::PresentMode 枚举值决定了展示平面如何与显示设备同步。我们选择的PresentMode::Fifo 指定了显示设备的刷新率做为渲染的帧速率,这本质上就是垂直同步(VSync),所有平台都得支持这种呈现模式(PresentMode)。你可以在文档中查看所有的模式。

当你想让用户来选择他们使用的呈现模式时,可以使用 surface.get_capabilities(&adapter) 获取展示平面支持的所有呈现模式的列表:

rust
let modes = surface.get_capabilities(&adapter).present_modes;

PresentMode::Fifo 模式无论如何都是被支持的,PresentMode::AutoVsyncPresentMode::AutoNoVsync 支持回退,因此也能工作在所有平台上。

现在已经正确地配置了展示平面,我们在函数的末尾添加上这些新字段:

rust
Self {
-            surface,
-            device,
-            queue,
-            config,
-            size,
-        }
-    }
-    // ...
-}

由于 State::new() 函数是异步的,因此需要把 run() 也改成异步的,以便可以在函数调用处等待它。

rust
pub async fn run() {
-    // 窗口设置...
-
-    let mut state = State::new(&window).await;
-
-    // 事件遍历...
-}

现在 run() 是异步的了,main() 需要某种方式来等待它执行完成。我们可以使用 tokioasync-std 等异步,但我打算使用更轻量级的 pollster。在 "Cargo.toml" 中添加以下依赖:

toml
[dependencies]
-# 其他依赖...
-pollster = "0.3"

然后我们使用 pollster 提供的 block_on 函数来等待异步任务执行完成:

rust
fn main() {
-    pollster::block_on(run());
-}

WASM 环境中不能在异步函数里使用 block_onFuture(异步函数的返回对象)必须使用浏览器的执行器来运行。如果你试图使用自己的执行器,一旦遇到没有立即执行的 Future 时代码就会崩溃。

如果现在尝试构建 WASM 将会失败,因为 wasm-bindgen 不支持使用异步函数作为“开始”函数。你可以改成在 javascript 中手动调用 run,但为了简单起见,我们将把 wasm-bindgen-futures 添加到 WASM 依赖项中,因为这不需要改变任何代码。你的依赖项应该是这样的:

toml
[dependencies]
-cfg-if = "1"
-winit = "0.28.7"
-env_logger = "0.10"
-log = "0.4"
-wgpu = "0.17"
-pollster = "0.3"
-
-[target.'cfg(target_arch = "wasm32")'.dependencies]
-console_error_panic_hook = "0.1.7"
-console_log = "1.0"
-wasm-bindgen = "0.2.87"
-wasm-bindgen-futures = "0.4.34"
-web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-]}

调整展示平面的宽高

如果要在应用程序中支持调整展示平面的宽高,将需要在每次窗口的大小改变时重新配置 surface。这就是我们存储物理 size 和用于配置 surfaceconfig 的原因。有了这些,实现 resize 函数就非常简单了。

rust
// impl State
-pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-    if new_size.width > 0 && new_size.height > 0 {
-        self.size = new_size;
-        self.config.width = new_size.width;
-        self.config.height = new_size.height;
-        self.surface.configure(&self.device, &self.config);
-    }
-}

这里和最初的 surface 配置没什么不同,所以就不再赘述。

run() 函数的事件循环中,我们在以下事件中调用 resize() 函数。

rust
match event {
-    // ...
-
-    } if window_id == window.id() => if !state.input(event) {
-        match event {
-            // ...
-
-            WindowEvent::Resized(physical_size) => {
-                state.resize(*physical_size);
-            }
-            WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
-                // new_inner_size 是 &&mut 类型,因此需要解引用两次
-                state.resize(**new_inner_size);
-            }
-            // ...
-}

事件输入

input() 函数返回一个 bool(布尔值),表示一个事件是否已经被处理。如果该函数返回 true,主循环就不再继续处理该事件。

我们现在没有任何想要捕获的事件,只需要返回 false。

rust
// impl State
-fn input(&mut self, event: &WindowEvent) -> bool {
-    false
-}

还需要在事件循环中多做一点工作,我们希望 Staterun() 函数内的事件处理中拥有第一优先级。修改后(加上之前的修改)的代码看起来像是这样的:

rust
// run()
-event_loop.run(move |event, _, control_flow| {
-    match event {
-        Event::WindowEvent {
-            ref event,
-            window_id,
-        } if window_id == window.id() => if !state.input(event) { // 更新!
-            match event {
-                WindowEvent::CloseRequested
-                | WindowEvent::KeyboardInput {
-                    input:
-                        KeyboardInput {
-                            state: ElementState::Pressed,
-                            virtual_keycode: Some(VirtualKeyCode::Escape),
-                            ..
-                        },
-                    ..
-                } => *control_flow = ControlFlow::Exit,
-                WindowEvent::Resized(physical_size) => {
-                    state.resize(*physical_size);
-                }
-                WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
-                    state.resize(**new_inner_size);
-                }
-                _ => {}
-            }
-        }
-        _ => {}
-    }
-});

更新

目前还没有任何东西需要更新,所以令这个函数为空。

rust
fn update(&mut self) {
-    // remove \`todo!()\`
-}

我们稍后将在这里添加一些代码,以便让绘制对象动起来。

渲染

这里就是奇迹发生的地方。首先,我们需要获取一个(Frame)对象以供渲染:

rust
// impl State
-
-fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
-    let output = self.surface.get_current_texture()?;

get_current_texture 函数会等待 surface 提供一个新的 SurfaceTexture。我们将它存储在 output 变量中以便后续使用。

rust
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());

这一行创建了一个默认设置的纹理视图(TextureView),渲染代码需要利用纹理视图来与纹理交互。

我们还需要创建一个命令编码器(CommandEncoder)来记录实际的命令发送给 GPU。大多数现代图形框架希望命令在被发送到 GPU 之前存储在一个命令缓冲区中。命令编码器创建了一个命令缓冲区,然后我们可以将其发送给 GPU。

rust
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-    label: Some("Render Encoder"),
-});

现在可以开始执行期盼已久的清屏(用统一的颜色填充指定渲染区域)了。我们需要使用 encoder 来创建渲染通道RenderPass)。渲染通道编码所有实际绘制的命令。创建渲染通道的代码嵌套层级有点深,所以在谈论它之前,我先把代码全部复制到这里:

rust
{
-        let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-            label: Some("Render Pass"),
-            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
-                view: &view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(wgpu::Color {
-                        r: 0.1,
-                        g: 0.2,
-                        b: 0.3,
-                        a: 1.0,
-                    }),
-                    store: wgpu::StoreOp::Store
-                },
-            })],
-            ..Default::default()
-        });
-    }
-
-    // submit 命令能接受任何实现了 IntoIter trait 的参数
-    self.queue.submit(std::iter::once(encoder.finish()));
-    output.present();
-
-    Ok(())
-}

首先,我们来谈谈 encoder.begin_render_pass(...) 周围用 {} 开辟出来的块空间。begin_render_pass() 以可变方式借用了encoder(又称 &mut self),在释放这个可变借用之前,我们不能调用 encoder.finish()。这个块空间告诉 rust,当代码离开这个范围时,丢弃其中的任何变量,从而释放 encoder 上的可变借用,并允许我们 finish() 它。如果你不喜欢 {},也可以使用 drop(render_pass) 来达到同样的效果。

代码的最后几行告诉 wgpu 完成命令缓冲区,并将其提交给 gpu 的渲染队列

我们需再次更新事件循环以调用 render() 函数,还会在它之前先调用 update()

rust
// run()
-event_loop.run(move |event, _, control_flow| {
-    match event {
-        // ...
-        Event::RedrawRequested(window_id) if window_id == window.id() => {
-            state.update();
-            match state.render() {
-                Ok(_) => {}
-                // 当展示平面的上下文丢失,就需重新配置
-                Err(wgpu::SurfaceError::Lost) => state.resize(state.size),
-                // 系统内存不足时,程序应该退出。
-                Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
-                // 所有其他错误(过期、超时等)应在下一帧解决
-                Err(e) => eprintln!("{:?}", e),
-            }
-        }
-        Event::MainEventsCleared => {
-            // 除非我们手动请求,RedrawRequested 将只会触发一次。
-            window.request_redraw();
-        }
-        // ...
-    }
-});

基于以上这些,你就能获得如下渲染效果:

蓝色背景的窗口

关于渲染通道描述符

部分读者可能光看代码就能理解,但如果我不把它介绍一遍,那就是失职。让我们再看一下代码:

rust
&wgpu::RenderPassDescriptor {
-    label: Some("Render Pass"),
-    color_attachments: &[
-        // ...
-    ],
-    ..Default::default()
-}

渲染通道描述符RenderPassDescriptor)只有三个字段: label, color_attachmentsdepth_stencil_attachmentcolor_attachments 描述了要将颜色绘制到哪里。我们使用之前创建的纹理视图来确保渲染到屏幕上。

color_attachments 字段是一个稀疏数组。这允许你使用有多个渲染目标的管线,并且最终只绘制到你所关心的某个渲染目标。

我们后面会使用到 depth_stencil_attachment,现在先将它设置为 None

rust
Some(wgpu::RenderPassColorAttachment {
-    view: &view,
-    resolve_target: None,
-    ops: wgpu::Operations {
-        load: wgpu::LoadOp::Clear(wgpu::Color {
-            r: 0.1,
-            g: 0.2,
-            b: 0.3,
-            a: 1.0,
-        }),
-        store: wgpu::StoreOp::Store
-    },
-})

RenderPassColorAttachment 有一个 view 字段,用于通知 wgpu 将颜色保存到什么纹理。这里我们指定使用 surface.get_current_texture() 创建的 view,这意味着向此附件(Attachment)上绘制的任何颜色都会被绘制到屏幕上。

resolve_target 是接收多重采样解析输出的纹理。除非启用了多重采样, 否则不需要设置它,保留为 None 即可。

ops 字段需要一个 wpgu::Operations 对象。它告诉 wgpu 如何处理屏幕上的颜色(由 view 指定)。load 字段告诉 wgpu 如何处理存储在前一帧的颜色。目前,我们正在用蓝色清屏store 字段告诉 wgpu 是否要将渲染的结果存储到纹理视图后面的纹理上(在这个例子中是 SurfaceTexture )。我们希望存储渲染结果,所以设置为 true

当屏幕被场景对象完全遮挡,那么不清屏是很常见的。但如果你的场景没有覆盖整个屏幕,就会出现类似下边的情况:

./no-clear.png

验证错误?

如果你的机器上运行的是 Vulkan SDK 的旧版本, wgpu 在你的机器上使用 Vulkan 后端时可能会遇到验证错误。至少需要使用 1.2.182 及以上版本,因为旧版本可能会产生一些误报。如果错误持续存在,那可能是遇到了 wgpu 的错误。你可以在 https://github.com/gfx-rs/wgpu 上提交此问题。

挑战

修改 input() 函数以捕获鼠标事件,并使用该函数来更新清屏的颜色。提示:你可能需要用到 WindowEvent::CursorMoved

',88),A=a("h2",{id:"加入-wgpu-微信学习交流群",tabindex:"-1"},[D("加入 wgpu 微信学习交流群 "),a("a",{class:"header-anchor",href:"#加入-wgpu-微信学习交流群","aria-label":'Permalink to "加入 wgpu 微信学习交流群"'},"​")],-1);function u(d,b,m,g,B,f){const l=s("WasmExample"),p=s("AutoGithubLink"),o=s("JoinWeiChatGroup");return r(),c("div",null,[i,n(l,{example:"tutorial2_surface"}),n(p),A,n(o)])}const v=e(C,[["render",u]]);export{w as __pageData,v as default}; diff --git a/assets/beginner_tutorial2-surface_index.md.74c14ab6.lean.js b/assets/beginner_tutorial2-surface_index.md.74c14ab6.lean.js deleted file mode 100644 index 1cc9b4f65..000000000 --- a/assets/beginner_tutorial2-surface_index.md.74c14ab6.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as e,E as s,o as r,c,J as n,S as t,k as a,a as D}from"./chunks/framework.adbf3c9e.js";const F="/learn-wgpu-zh/assets/cleared-window.f143363a.png",y="/learn-wgpu-zh/assets/no-clear.304e22f0.png",w=JSON.parse('{"title":"展示平面 (Surface)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial2-surface/index.md","filePath":"beginner/tutorial2-surface/index.md","lastUpdated":1701933923000}'),C={name:"beginner/tutorial2-surface/index.md"},i=t("",88),A=a("h2",{id:"加入-wgpu-微信学习交流群",tabindex:"-1"},[D("加入 wgpu 微信学习交流群 "),a("a",{class:"header-anchor",href:"#加入-wgpu-微信学习交流群","aria-label":'Permalink to "加入 wgpu 微信学习交流群"'},"​")],-1);function u(d,b,m,g,B,f){const l=s("WasmExample"),p=s("AutoGithubLink"),o=s("JoinWeiChatGroup");return r(),c("div",null,[i,n(l,{example:"tutorial2_surface"}),n(p),A,n(o)])}const v=e(C,[["render",u]]);export{w as __pageData,v as default}; diff --git a/assets/beginner_tutorial2-surface_index.md.YCJw9OGa.js b/assets/beginner_tutorial2-surface_index.md.YCJw9OGa.js new file mode 100644 index 000000000..ee408664d --- /dev/null +++ b/assets/beginner_tutorial2-surface_index.md.YCJw9OGa.js @@ -0,0 +1,253 @@ +import{_ as h,D as s,o as e,c as k,I as i,R as t,k as a,a as r}from"./chunks/framework.bMtwhlie.js";const d="/learn-wgpu-zh/assets/cleared-window.NdJYunVt.png",E="/learn-wgpu-zh/assets/no-clear.CuVpOO_N.png",B=JSON.parse('{"title":"展示平面 (Surface)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial2-surface/index.md","filePath":"beginner/tutorial2-surface/index.md","lastUpdated":1703303099000}'),g={name:"beginner/tutorial2-surface/index.md"},c=t(`

展示平面 (Surface)

封装 State

为方便起见,我们将所有字段封装在一个结构体内,并在其上添加一些函数:

rust
// lib.rs
+use winit::window::Window;
+
+struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+}
+
+impl State {
+    // 创建某些 wgpu 类型需要使用异步代码
+    async fn new(window: &Window) -> Self {
+        todo!()
+    }
+
+    fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+        todo!()
+    }
+
+    fn input(&mut self, event: &WindowEvent) -> bool {
+        todo!()
+    }
+
+    fn update(&mut self) {
+        todo!()
+    }
+
+    fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
+        todo!()
+    }
+}

此处省略了 State 的字段概述,在后续章节中解释这些函数背后的代码时,它们才会变得更有意义。

surfacedevicequeueconfig 等对象是每个 wgpu 程序都需要的,且它们的创建过程涉及到很多模板代码,所以,从第 3 章开始,我将它们统一封装到了 AppSurface 对象中。

State 中的这些函数在所有章节示例中都有用到,所以,在第 3 ~ 8 章,我将其抽象为了 Action trait:

rust
pub trait Action {
+    fn new(app: app_surface::AppSurface) -> Self;
+    fn get_adapter_info(&self) -> wgpu::AdapterInfo;
+    fn current_window_id(&self) -> WindowId;
+    fn resize(&mut self);
+    fn request_redraw(&mut self);
+    fn input(&mut self, _event: &WindowEvent) -> bool {
+        false
+    }
+    fn update(&mut self) {}
+    fn render(&mut self) -> Result<(), wgpu::SurfaceError>;
+}

实例化 State

这段代码很简单,但还是值得好好讲讲:

rust
impl State {
+    // ...
+    async fn new(window: &Window) -> Self {
+        let size = window.inner_size();
+
+        // instance 变量是 GPU 实例
+        // Backends::all 对应 Vulkan、Metal、DX12、WebGL 等所有后端图形驱动
+        let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
+            backends: wgpu::Backends::all(),
+            ..Default::default()
+        });
+        let surface = unsafe { instance.create_surface(window).unwrap() };
+        let adapter = instance
+            .request_adapter(&wgpu::RequestAdapterOptions {
+                compatible_surface: Some(&surface),
+                ..Default::default()
+            }).await.unwrap();

GPU 实例与适配器

GPU 实例(Instance)是使用 wgpu 时所需创建的第一个对象,其主要用途是创建适配器(Adapter)和展示平面(Surface)。

适配器(Adapter)是指向 WebGPU API 实现的实例,一个系统上往往存在多个 WebGPU API 实现实例。也就是说,适配器是固定在特定图形后端的。假如你使用的是 Windows 且有 2 个显卡(集成显卡 + 独立显卡),则至少有 4 个适配器可供使用,分别有 2 个固定在 Vulkan 和 DirectX 后端。我们可以用它获取关联显卡的信息,例如显卡名称与其所适配到的后端图形驱动等。稍后我们会用它来创建逻辑设备命令队列。现在先讨论一下 RequestAdapterOptions 所涉及的字段。

此处传递给 request_adapter 的参数不能保证对所有设备都有效,但是应该对大多数设备都有效。当 wgpu 找不到符合要求的适配器,request_adapter 将返回 None。如果你想获取某个特定图形后端的所有适配器,可以使用 enumerate_adapters 函数,它会返回一个迭代器,你可以遍历检查其中是否有满足需求的适配器。

rust
let adapter = instance
+    .enumerate_adapters(wgpu::Backends::all())
+    .filter(|adapter| {
+        // 检查该适配器是否兼容我们的展示平面
+        adapter.is_surface_supported(&surface)
+    })
+    .next()
+    .unwrap();

更多可用于优化适配器搜索的函数,请查看文档

展示平面

展示平面(Surface)是我们绘制到窗口的部分,需要它来将绘制结果展示(或者说,呈现)到屏幕上。窗口程序需要实现 raw-window-handle HasRawWindowHandle trait 来创建展示平面。所幸 winit 的 Window 符合这个要求。我们还需要展示平面来请求适配器

逻辑设备与命令队列

让我们使用适配器来创建逻辑设备 (Device) 和命令队列 (Queue)。

rust
let (device, queue) = adapter.request_device(
+    &wgpu::DeviceDescriptor {
+        features: wgpu::Features::empty(),
+        // WebGL 后端并不支持 wgpu 的所有功能,
+        // 所以如果要以 web 为构建目标,就必须禁用一些功能。
+        limits: if cfg!(target_arch = "wasm32") {
+            wgpu::Limits::downlevel_webgl2_defaults()
+        } else {
+            wgpu::Limits::default()
+        },
+        label: None,
+    },
+    None, // 追踪 API 调用路径
+).await.unwrap();

DeviceDescriptor上的 features 字段允许我们指定想要的扩展功能。对于这个简单的例子,我决定不使用任何额外的功能。

显卡会限制可用的扩展功能,所以如果想使用某些功能,你可能需要限制支持的设备或提供变通函数。

可以使用 adapter.features()device.features() 获取设备支持的扩展功能列表。

如果有需要,请查看完整的扩展功能列表

limits 字段描述了创建某些类型的资源的限制。我们在本教程中使用默认值,所以可以支持大多数设备。你可以在这里查看限制列表。

rust
let caps = surface.get_capabilities(&adapter);
+let config = wgpu::SurfaceConfiguration {
+    usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
+    format: caps.formats[0],
+    width: size.width,
+    height: size.height,
+    present_mode: wgpu::PresentMode::Fifo,
+    alpha_mode: caps.alpha_modes[0],
+    view_formats: vec![],
+};
+surface.configure(&device, &config);

这里我们为展示平面定义了一个配置。它将定义展示平面如何创建其底层的 SurfaceTexture。讲 render 函数时我们再具体讨论 SurfaceTexture,现在先谈谈此配置的字段。

usage 字段描述了 SurfaceTexture 如何被使用。RENDER_ATTACHMENT 指定将被用来渲染到屏幕的纹理(我们将在后面讨论更多的 TextureUsages 枚举值)。

format 定义了 SurfaceTexture 在 GPU 内存上如何被存储。不同的显示设备偏好不同的纹理格式。我们使用surface.get_capabilities(&adapter).formats 来获取当前显示设备的最佳格式。

widthheight 指定 SurfaceTexture 的宽度和高度(物理像素,等于逻辑像素乘以屏幕缩放因子)。这通常就是窗口的宽和高。

需要确保 SurfaceTexture 的宽高不能为 0,这会导致你的应用程序崩溃。

present_mode 指定的 wgpu::PresentMode 枚举值决定了展示平面如何与显示设备同步。我们选择的PresentMode::Fifo 指定了显示设备的刷新率做为渲染的帧速率,这本质上就是垂直同步(VSync),所有平台都得支持这种呈现模式(PresentMode)。你可以在文档中查看所有的模式。

当你想让用户来选择他们使用的呈现模式时,可以使用 surface.get_capabilities(&adapter) 获取展示平面支持的所有呈现模式的列表:

rust
let modes = surface.get_capabilities(&adapter).present_modes;

PresentMode::Fifo 模式无论如何都是被支持的,PresentMode::AutoVsyncPresentMode::AutoNoVsync 支持回退,因此也能工作在所有平台上。

现在已经正确地配置了展示平面,我们在函数的末尾添加上这些新字段:

rust
        Self {
+            surface,
+            device,
+            queue,
+            config,
+            size,
+        }
+    }
+    // ...
+}

由于 State::new() 函数是异步的,因此需要把 run() 也改成异步的,以便可以在函数调用处等待它。

rust
pub async fn run() {
+    // 窗口设置...
+
+    let mut state = State::new(&window).await;
+
+    // 事件遍历...
+}

现在 run() 是异步的了,main() 需要某种方式来等待它执行完成。我们可以使用 tokioasync-std 等异步,但我打算使用更轻量级的 pollster。在 "Cargo.toml" 中添加以下依赖:

toml
[dependencies]
+# 其他依赖...
+pollster = "0.3"

然后我们使用 pollster 提供的 block_on 函数来等待异步任务执行完成:

rust
fn main() {
+    pollster::block_on(run());
+}

WASM 环境中不能在异步函数里使用 block_onFuture(异步函数的返回对象)必须使用浏览器的执行器来运行。如果你试图使用自己的执行器,一旦遇到没有立即执行的 Future 时代码就会崩溃。

如果现在尝试构建 WASM 将会失败,因为 wasm-bindgen 不支持使用异步函数作为“开始”函数。你可以改成在 javascript 中手动调用 run,但为了简单起见,我们将把 wasm-bindgen-futures 添加到 WASM 依赖项中,因为这不需要改变任何代码。你的依赖项应该是这样的:

toml
[dependencies]
+cfg-if = "1"
+winit = "0.28.7"
+env_logger = "0.10"
+log = "0.4"
+wgpu = "0.17"
+pollster = "0.3"
+
+[target.'cfg(target_arch = "wasm32")'.dependencies]
+console_error_panic_hook = "0.1.7"
+console_log = "1.0"
+wasm-bindgen = "0.2.87"
+wasm-bindgen-futures = "0.4.34"
+web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+]}

调整展示平面的宽高

如果要在应用程序中支持调整展示平面的宽高,将需要在每次窗口的大小改变时重新配置 surface。这就是我们存储物理 size 和用于配置 surfaceconfig 的原因。有了这些,实现 resize 函数就非常简单了。

rust
// impl State
+pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+    if new_size.width > 0 && new_size.height > 0 {
+        self.size = new_size;
+        self.config.width = new_size.width;
+        self.config.height = new_size.height;
+        self.surface.configure(&self.device, &self.config);
+    }
+}

这里和最初的 surface 配置没什么不同,所以就不再赘述。

run() 函数的事件循环中,我们在以下事件中调用 resize() 函数。

rust
match event {
+    // ...
+
+    } if window_id == window.id() => if !state.input(event) {
+        match event {
+            // ...
+
+            WindowEvent::Resized(physical_size) => {
+                state.resize(*physical_size);
+            }
+            WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
+                // new_inner_size 是 &&mut 类型,因此需要解引用两次
+                state.resize(**new_inner_size);
+            }
+            // ...
+}

事件输入

input() 函数返回一个 bool(布尔值),表示一个事件是否已经被处理。如果该函数返回 true,主循环就不再继续处理该事件。

我们现在没有任何想要捕获的事件,只需要返回 false。

rust
// impl State
+fn input(&mut self, event: &WindowEvent) -> bool {
+    false
+}

还需要在事件循环中多做一点工作,我们希望 Staterun() 函数内的事件处理中拥有第一优先级。修改后(加上之前的修改)的代码看起来像是这样的:

rust
// run()
+event_loop.run(move |event, _, control_flow| {
+    match event {
+        Event::WindowEvent {
+            ref event,
+            window_id,
+        } if window_id == window.id() => if !state.input(event) { // 更新!
+            match event {
+                WindowEvent::CloseRequested
+                | WindowEvent::KeyboardInput {
+                    input:
+                        KeyboardInput {
+                            state: ElementState::Pressed,
+                            virtual_keycode: Some(VirtualKeyCode::Escape),
+                            ..
+                        },
+                    ..
+                } => *control_flow = ControlFlow::Exit,
+                WindowEvent::Resized(physical_size) => {
+                    state.resize(*physical_size);
+                }
+                WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
+                    state.resize(**new_inner_size);
+                }
+                _ => {}
+            }
+        }
+        _ => {}
+    }
+});

更新

目前还没有任何东西需要更新,所以令这个函数为空。

rust
fn update(&mut self) {
+    // remove \`todo!()\`
+}

我们稍后将在这里添加一些代码,以便让绘制对象动起来。

渲染

这里就是奇迹发生的地方。首先,我们需要获取一个(Frame)对象以供渲染:

rust
// impl State
+
+fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
+    let output = self.surface.get_current_texture()?;

get_current_texture 函数会等待 surface 提供一个新的 SurfaceTexture。我们将它存储在 output 变量中以便后续使用。

rust
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());

这一行创建了一个默认设置的纹理视图(TextureView),渲染代码需要利用纹理视图来与纹理交互。

我们还需要创建一个命令编码器(CommandEncoder)来记录实际的命令发送给 GPU。大多数现代图形框架希望命令在被发送到 GPU 之前存储在一个命令缓冲区中。命令编码器创建了一个命令缓冲区,然后我们可以将其发送给 GPU。

rust
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+    label: Some("Render Encoder"),
+});

现在可以开始执行期盼已久的清屏(用统一的颜色填充指定渲染区域)了。我们需要使用 encoder 来创建渲染通道RenderPass)。渲染通道编码所有实际绘制的命令。创建渲染通道的代码嵌套层级有点深,所以在谈论它之前,我先把代码全部复制到这里:

rust
    {
+        let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+            label: Some("Render Pass"),
+            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
+                view: &view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(wgpu::Color {
+                        r: 0.1,
+                        g: 0.2,
+                        b: 0.3,
+                        a: 1.0,
+                    }),
+                    store: wgpu::StoreOp::Store
+                },
+            })],
+            ..Default::default()
+        });
+    }
+
+    // submit 命令能接受任何实现了 IntoIter trait 的参数
+    self.queue.submit(std::iter::once(encoder.finish()));
+    output.present();
+
+    Ok(())
+}

首先,我们来谈谈 encoder.begin_render_pass(...) 周围用 {} 开辟出来的块空间。begin_render_pass() 以可变方式借用了encoder(又称 &mut self),在释放这个可变借用之前,我们不能调用 encoder.finish()。这个块空间告诉 rust,当代码离开这个范围时,丢弃其中的任何变量,从而释放 encoder 上的可变借用,并允许我们 finish() 它。如果你不喜欢 {},也可以使用 drop(render_pass) 来达到同样的效果。

代码的最后几行告诉 wgpu 完成命令缓冲区,并将其提交给 gpu 的渲染队列

我们需再次更新事件循环以调用 render() 函数,还会在它之前先调用 update()

rust
// run()
+event_loop.run(move |event, _, control_flow| {
+    match event {
+        // ...
+        Event::RedrawRequested(window_id) if window_id == window.id() => {
+            state.update();
+            match state.render() {
+                Ok(_) => {}
+                // 当展示平面的上下文丢失,就需重新配置
+                Err(wgpu::SurfaceError::Lost) => state.resize(state.size),
+                // 系统内存不足时,程序应该退出。
+                Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
+                // 所有其他错误(过期、超时等)应在下一帧解决
+                Err(e) => eprintln!("{:?}", e),
+            }
+        }
+        Event::MainEventsCleared => {
+            // 除非我们手动请求,RedrawRequested 将只会触发一次。
+            window.request_redraw();
+        }
+        // ...
+    }
+});

基于以上这些,你就能获得如下渲染效果:

蓝色背景的窗口

关于渲染通道描述符

部分读者可能光看代码就能理解,但如果我不把它介绍一遍,那就是失职。让我们再看一下代码:

rust
&wgpu::RenderPassDescriptor {
+    label: Some("Render Pass"),
+    color_attachments: &[
+        // ...
+    ],
+    ..Default::default()
+}

渲染通道描述符RenderPassDescriptor)只有三个字段: label, color_attachmentsdepth_stencil_attachmentcolor_attachments 描述了要将颜色绘制到哪里。我们使用之前创建的纹理视图来确保渲染到屏幕上。

color_attachments 字段是一个稀疏数组。这允许你使用有多个渲染目标的管线,并且最终只绘制到你所关心的某个渲染目标。

我们后面会使用到 depth_stencil_attachment,现在先将它设置为 None

rust
Some(wgpu::RenderPassColorAttachment {
+    view: &view,
+    resolve_target: None,
+    ops: wgpu::Operations {
+        load: wgpu::LoadOp::Clear(wgpu::Color {
+            r: 0.1,
+            g: 0.2,
+            b: 0.3,
+            a: 1.0,
+        }),
+        store: wgpu::StoreOp::Store
+    },
+})

RenderPassColorAttachment 有一个 view 字段,用于通知 wgpu 将颜色保存到什么纹理。这里我们指定使用 surface.get_current_texture() 创建的 view,这意味着向此附件(Attachment)上绘制的任何颜色都会被绘制到屏幕上。

resolve_target 是接收多重采样解析输出的纹理。除非启用了多重采样, 否则不需要设置它,保留为 None 即可。

ops 字段需要一个 wpgu::Operations 对象。它告诉 wgpu 如何处理屏幕上的颜色(由 view 指定)。load 字段告诉 wgpu 如何处理存储在前一帧的颜色。目前,我们正在用蓝色清屏store 字段告诉 wgpu 是否要将渲染的结果存储到纹理视图后面的纹理上(在这个例子中是 SurfaceTexture )。我们希望存储渲染结果,所以设置为 true

当屏幕被场景对象完全遮挡,那么不清屏是很常见的。但如果你的场景没有覆盖整个屏幕,就会出现类似下边的情况:

./no-clear.png

验证错误?

如果你的机器上运行的是 Vulkan SDK 的旧版本, wgpu 在你的机器上使用 Vulkan 后端时可能会遇到验证错误。至少需要使用 1.2.182 及以上版本,因为旧版本可能会产生一些误报。如果错误持续存在,那可能是遇到了 wgpu 的错误。你可以在 https://github.com/gfx-rs/wgpu 上提交此问题。

挑战

修改 input() 函数以捕获鼠标事件,并使用该函数来更新清屏的颜色。提示:你可能需要用到 WindowEvent::CursorMoved

',88),y=a("h2",{id:"加入-wgpu-微信学习交流群",tabindex:"-1"},[r("加入 wgpu 微信学习交流群 "),a("a",{class:"header-anchor",href:"#加入-wgpu-微信学习交流群","aria-label":'Permalink to "加入 wgpu 微信学习交流群"'},"​")],-1);function F(o,u,b,m,A,D){const n=s("WasmExample"),p=s("AutoGithubLink"),l=s("JoinWeiChatGroup");return e(),k("div",null,[c,i(n,{example:"tutorial2_surface"}),i(p),y,i(l)])}const v=h(g,[["render",F]]);export{B as __pageData,v as default}; diff --git a/assets/beginner_tutorial2-surface_index.md.YCJw9OGa.lean.js b/assets/beginner_tutorial2-surface_index.md.YCJw9OGa.lean.js new file mode 100644 index 000000000..931f9a125 --- /dev/null +++ b/assets/beginner_tutorial2-surface_index.md.YCJw9OGa.lean.js @@ -0,0 +1 @@ +import{_ as h,D as s,o as e,c as k,I as i,R as t,k as a,a as r}from"./chunks/framework.bMtwhlie.js";const d="/learn-wgpu-zh/assets/cleared-window.NdJYunVt.png",E="/learn-wgpu-zh/assets/no-clear.CuVpOO_N.png",B=JSON.parse('{"title":"展示平面 (Surface)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial2-surface/index.md","filePath":"beginner/tutorial2-surface/index.md","lastUpdated":1703303099000}'),g={name:"beginner/tutorial2-surface/index.md"},c=t("",88),y=a("h2",{id:"加入-wgpu-微信学习交流群",tabindex:"-1"},[r("加入 wgpu 微信学习交流群 "),a("a",{class:"header-anchor",href:"#加入-wgpu-微信学习交流群","aria-label":'Permalink to "加入 wgpu 微信学习交流群"'},"​")],-1);function F(o,u,b,m,A,D){const n=s("WasmExample"),p=s("AutoGithubLink"),l=s("JoinWeiChatGroup");return e(),k("div",null,[c,i(n,{example:"tutorial2_surface"}),i(p),y,i(l)])}const v=h(g,[["render",F]]);export{B as __pageData,v as default}; diff --git a/assets/beginner_tutorial3-pipeline_index.md.QN_XsHwE.js b/assets/beginner_tutorial3-pipeline_index.md.QN_XsHwE.js new file mode 100644 index 000000000..b0a3e553d --- /dev/null +++ b/assets/beginner_tutorial3-pipeline_index.md.QN_XsHwE.js @@ -0,0 +1,120 @@ +import{_ as p,D as s,o as l,c as e,I as i,R as h}from"./chunks/framework.bMtwhlie.js";const k="/learn-wgpu-zh/assets/tutorial3-pipeline-vertices.potCe9z0.png",t="/learn-wgpu-zh/assets/tutorial3-pipeline-triangle.OIWoZQoT.png",m=JSON.parse('{"title":"管线 (Pipeline)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial3-pipeline/index.md","filePath":"beginner/tutorial3-pipeline/index.md","lastUpdated":1703303099000}'),r={name:"beginner/tutorial3-pipeline/index.md"},d=h('

管线 (Pipeline)

什么是管线?

管线ComputePipelineRenderPipeline)由一系列资源绑定、可编程阶段(着色器)设置及固定功能状态组成。它代表了由 GPU 硬件、驱动程序和用户代理组合而成的完整功能对象,描述了 GPU 将对一组数据执行的所有操作。在本节中,我们将具体创建一个渲染管线RenderPipeline)。

什么是着色器?

着色器(Shader)是你发送给 GPU 的微型程序,用于对数据进行操作。有三种主要类型的着色器:顶点(Vertex)、片元(Fragment)和计算(Compute)着色器。另外还有其他的如几何着色器,但它们属于进阶话题。现在,我们只需要使用顶点和片元着色器。

什么是顶点和片元?

顶点(Vertex)就是三维(或二维)空间中的一个点。这些顶点会两个一组以构成线段集合,或者三个一组以构成三角形集合。

Vertices Graphic

从简单的立方体到复杂的人体结构,大多数现代渲染系统都使用三角形来建模所有图形。这些三角形被存储为构成三角形角的顶点。

我们使用顶点着色器来操作顶点,以便按我们想要的样子做图形的变换。

然后顶点经过光栅化(rasterization)后流转到片元着色阶段,片元着色器决定了片元的颜色。渲染结果图像中的每个像素至少对应一个片元,每个片元可输出一个颜色,该颜色会被存储到其相应的像素上(准确的说,片元的输出是存储到 Color Attachment 的纹素上)。

WebGPU 着色器语言: WGSL

WGSL (WebGPU Shading Language) 是 WebGPU 的着色器语言。 WGSL 的开发重点是让它轻松转换为与后端对应的着色器语言;例如,Vulkan 的 SPIR-V、Metal 的 MSL、DX12 的 HLSL 和 OpenGL 的 GLSL。 这种转换是在内部完成的,我们不需要关心这些细节。 就 wgpu 而言,它是由名为 naga完成的。

WGSL 着色器语言 一章中,有对 WGSL 的由来及语法的更详细介绍。

WGSL 规范及其在 WGPU 中的应用仍在开发中。如果在使用中遇到问题,你或许希望 https://app.element.io/#/room/#wgpu:matrix.org 社区的人帮忙看一下你的代码。

编写着色器

main.rs 所在的目录中创建一个 shader.wgsl 文件。在其中写入以下代码:

rust
// 顶点着色器
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+};
+
+@vertex
+fn vs_main(
+    @builtin(vertex_index) in_vertex_index: u32,
+) -> VertexOutput {
+    var out: VertexOutput;
+    let x = f32(1 - i32(in_vertex_index)) * 0.5;
+    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
+    out.clip_position = vec4f(x, y, 0.0, 1.0);
+    return out;
+}

首先,声明一个 struct 来存储顶点着色器的输出。目前只有一个字段,即 clip_position@builtin(position) 属性标记了此字段将作为顶点在裁剪坐标系中的位置来使用。这类似于 GLSL 的 gl_Position 变量。

形如 vec4 的向量类型是泛型。目前你必须指定向量将包含的值的类型。因此一个使用 32 位浮点数的 3 维向量写做 vec3f

着色器代码的下一部分是 vs_main 函数。@vertex 属性标记了这个函数是顶点着色器的有效入口。我们预期有一个 u32 类型的变量 in_vertex_index,它的值来自 @builtin(vertex_index)

然后使用 VertexOutput 结构体声明一个名为 out 的变量。我们为顶点的裁剪空间坐标创建另外两个 x y 变量。

f32()i32() 表示类型强制转换,将括号里的值转换为此类型。

现在我们可以把 clip_position 保存到 out。然后只需返回 out 就完成了顶点着色器的工作!

我们也可以不使用 stuct,直接按以下代码来实现:

rust
@vertex
+fn vs_main(
+    @builtin(vertex_index) in_vertex_index: u32
+) -> @builtin(position) vec4f {
+    // 顶点着色器 code...
+}

接下来是片元着色器。还是在 shader.wgsl 中添加以下代码:

rust
// 片元着色器
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(0.3, 0.2, 0.1, 1.0);
+}

这将当前片元的颜色设置为棕色。

注意,顶点和片元着色器的入口点分别被命名为 vs_mainfs_main。在 wgpu 的早期版本中,这两个函数有相同的名字是可以的,但较新版本的 WGSL spec 要求这些名字必须不同。因此在整个教程中都使用(从 wgpu demo 中采用)上述命名方案。

@location(0) 属性标记了该函数返回的 vec4 值将存储在第一个颜色附件(Color Attachment)中。

使用着色器

终于要用到本章节标题提到的概念 管线(Pipeline)了。首先,我们来修改 State 以包括以下代码。

rust
// lib.rs
+struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+    // 新添加!
+    render_pipeline: wgpu::RenderPipeline,
+}

现在,开始在 new() 函数内创建管线。我们需要载入先前写的,渲染管线所需要的着色器。

rust
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
+    label: Some("Shader"),
+    source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
+});

也可以使用 include_wgsl! 宏作为创建 ShaderModuleDescriptor 的快捷方式。

rust
let shader = device.create_shader_module(include_wgsl!("shader.wgsl"));

还需要创建一个 PipelineLayout。在讲完缓冲区Buffer)之后,我们会对它有更多地了解。

rust
let render_pipeline_layout =
+    device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+        label: Some("Render Pipeline Layout"),
+        bind_group_layouts: &[],
+        push_constant_ranges: &[],
+    });

最后,我们就获得了创建 render_pipeline 所需的全部资源:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    label: Some("Render Pipeline"),
+    layout: Some(&render_pipeline_layout),
+    vertex: wgpu::VertexState {
+        module: &shader,
+        entry_point: "vs_main", // 1.
+        buffers: &[], // 2.
+    },
+    fragment: Some(wgpu::FragmentState { // 3.
+        module: &shader,
+        entry_point: "fs_main",
+        targets: &[Some(wgpu::ColorTargetState { // 4.
+            format: config.format,
+            blend: Some(wgpu::BlendState::REPLACE),
+            write_mask: wgpu::ColorWrites::ALL,
+        })],
+    }),
+    // ...

有几点需要注意:

  1. 可以在这里指定着色器中的哪个函数应该是入口点( entry_point)。那是我们用 @vertex@fragment 标记的函数。
  2. buffers 字段告诉 wgpu 要把什么类型的顶点数据传递给顶点着色器。我们会在顶点着色器中指定顶点,所以这里先留空。下一个教程中会在此加入一些数据。
  3. fragment 字段是 Option 类型,所以必须用 Some() 来包装 FragmentState 实例。如果想把颜色数据存储到 surface 就需要用到它 。
  4. targets 字段告诉 wgpu 应该设置哪些颜色输出目标。目前只需设置一个输出目标。格式指定为使用 surface 的格式,并且指定混合模式为仅用新的像素数据替换旧的。我们还告诉 wgpu 可写入全部 4 个颜色通道:红、蓝、绿和透明度。在讨论纹理时会更多地介绍 color_state
rust
primitive: wgpu::PrimitiveState {
+    topology: wgpu::PrimitiveTopology::TriangleList, // 1.
+    strip_index_format: None,
+    front_face: wgpu::FrontFace::Ccw, // 2.
+    cull_mode: Some(wgpu::Face::Back),
+    // 将此设置为 Fill 以外的任何值都要需要开启 Feature::NON_FILL_POLYGON_MODE
+    polygon_mode: wgpu::PolygonMode::Fill,
+    // 需要开启 Features::DEPTH_CLIP_CONTROL
+    unclipped_depth: false,
+    // 需要开启 Features::CONSERVATIVE_RASTERIZATION
+    conservative: false,
+},
+// continued ...

图元(primitive)字段描述了将如何解释顶点来转换为三角形。

  1. PrimitiveTopology::TriangleList 意味着每三个顶点组成一个三角形。
  2. front_face 字段告诉 wgpu 如何确定三角形的朝向。FrontFace::Ccw 指定顶点的帧缓冲区坐标(framebuffer coordinates)按逆时针顺序给出的三角形为朝前(面向屏幕外)。
  3. cull_mode 字段告诉 wgpu 如何做三角形剔除。CullMode::Back 指定朝后(面向屏幕内)的三角形会被剔除(不被渲染)。我们会在讨论缓冲区(Buffer)时详细介绍剔除问题。
rust
    depth_stencil: None, // 1.
+    multisample: wgpu::MultisampleState {
+        count: 1, // 2.
+        mask: !0, // 3.
+        alpha_to_coverage_enabled: false, // 4.
+    },
+    multiview: None, // 5.
+});

该函数的其余部分非常简单:

  1. 我们目前没有使用深度/模板缓冲区,因此将 depth_stencil 保留为 None以后会用到
  2. count 确定管线将使用多少个采样。多重采样是一个复杂的主题,因此不会在这里展开讨论。
  3. mask 指定哪些采样应处于活动状态。目前我们使用全部采样。
  4. alpha_to_coverage_enabled 与抗锯齿有关。在这里不介绍抗锯齿,因此将其保留为 false。
  5. multiview 表示渲染附件可以有多少数组层。我们不会渲染到数组纹理,因此将其设置为 None

现在我们要做的就是把 render_pipeline 添加到 State,然后就可以使用它了!

rust
// new()
+Self {
+    surface,
+    device,
+    queue,
+    config,
+    size,
+    // 新添加!
+    render_pipeline,
+}

使用管线

如果现在运行程序,它会花更多的时间来启动,但仍然只会显示我们在上一节得到的蓝屏。因为虽然我们创建了 render_pipeline,但还需要修改 render() 函数中的代码来实际使用它:

rust
// render()
+
+// ...
+{
+    // 1.
+    let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+        label: Some("Render Pass"),
+        color_attachments: &[
+            // 这就是片元着色器中 @location(0) 标记指向的颜色附件
+            Some(wgpu::RenderPassColorAttachment {
+                view: &view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(
+                        wgpu::Color {
+                            r: 0.1,
+                            g: 0.2,
+                            b: 0.3,
+                            a: 1.0,
+                        }
+                    ),
+                    store: wgpu::StoreOp::Store
+                }
+            })
+        ],
+        ..Default::default()
+    });
+
+    // 新添加!
+    render_pass.set_pipeline(&self.render_pipeline); // 2.
+    render_pass.draw(0..3, 0..1); // 3.
+}
+// ...

上面代码所做的少量修改:

  1. _render_pass 声明为可变变量并重命名为 render_pass
  2. render_pass 上设置刚刚创建的管线
  3. 告诉 wgpu 用 3 个顶点和 1 个实例(实例的索引就是 @builtin(vertex_index) 的由来)来进行绘制。

修改完代码后,运行程序应该就能看到一个可爱的棕色三角形:

可爱的棕色三角形

挑战

创建第二个管线,使用三角形顶点的位置数据来创建一个颜色并发送给片元着色器。当你按下空格键时让应用程序交替使用两个管线。提示:你需要修改 VertexOutput

',59);function E(g,c,y,o,F,u){const a=s("WasmExample"),n=s("AutoGithubLink");return l(),e("div",null,[d,i(a,{example:"tutorial3_pipeline"}),i(n)])}const A=p(r,[["render",E]]);export{m as __pageData,A as default}; diff --git a/assets/beginner_tutorial3-pipeline_index.md.QN_XsHwE.lean.js b/assets/beginner_tutorial3-pipeline_index.md.QN_XsHwE.lean.js new file mode 100644 index 000000000..bf6f6b023 --- /dev/null +++ b/assets/beginner_tutorial3-pipeline_index.md.QN_XsHwE.lean.js @@ -0,0 +1 @@ +import{_ as p,D as s,o as l,c as e,I as i,R as h}from"./chunks/framework.bMtwhlie.js";const k="/learn-wgpu-zh/assets/tutorial3-pipeline-vertices.potCe9z0.png",t="/learn-wgpu-zh/assets/tutorial3-pipeline-triangle.OIWoZQoT.png",m=JSON.parse('{"title":"管线 (Pipeline)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial3-pipeline/index.md","filePath":"beginner/tutorial3-pipeline/index.md","lastUpdated":1703303099000}'),r={name:"beginner/tutorial3-pipeline/index.md"},d=h("",59);function E(g,c,y,o,F,u){const a=s("WasmExample"),n=s("AutoGithubLink");return l(),e("div",null,[d,i(a,{example:"tutorial3_pipeline"}),i(n)])}const A=p(r,[["render",E]]);export{m as __pageData,A as default}; diff --git a/assets/beginner_tutorial3-pipeline_index.md.de2ce2ed.js b/assets/beginner_tutorial3-pipeline_index.md.de2ce2ed.js deleted file mode 100644 index 6a76f40b4..000000000 --- a/assets/beginner_tutorial3-pipeline_index.md.de2ce2ed.js +++ /dev/null @@ -1,120 +0,0 @@ -import{_ as p,E as s,o as e,c as o,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/tutorial3-pipeline-vertices.eaa25f33.png",t="/learn-wgpu-zh/assets/tutorial3-pipeline-triangle.d560b6f2.png",g=JSON.parse('{"title":"管线 (Pipeline)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial3-pipeline/index.md","filePath":"beginner/tutorial3-pipeline/index.md","lastUpdated":1701933923000}'),F={name:"beginner/tutorial3-pipeline/index.md"},D=r('

管线 (Pipeline)

什么是管线?

管线ComputePipelineRenderPipeline)由一系列资源绑定、可编程阶段(着色器)设置及固定功能状态组成。它代表了由 GPU 硬件、驱动程序和用户代理组合而成的完整功能对象,描述了 GPU 将对一组数据执行的所有操作。在本节中,我们将具体创建一个渲染管线RenderPipeline)。

什么是着色器?

着色器(Shader)是你发送给 GPU 的微型程序,用于对数据进行操作。有三种主要类型的着色器:顶点(Vertex)、片元(Fragment)和计算(Compute)着色器。另外还有其他的如几何着色器,但它们属于进阶话题。现在,我们只需要使用顶点和片元着色器。

什么是顶点和片元?

顶点(Vertex)就是三维(或二维)空间中的一个点。这些顶点会两个一组以构成线段集合,或者三个一组以构成三角形集合。

Vertices Graphic

从简单的立方体到复杂的人体结构,大多数现代渲染系统都使用三角形来建模所有图形。这些三角形被存储为构成三角形角的顶点。

我们使用顶点着色器来操作顶点,以便按我们想要的样子做图形的变换。

然后顶点经过光栅化(rasterization)后流转到片元着色阶段,片元着色器决定了片元的颜色。渲染结果图像中的每个像素至少对应一个片元,每个片元可输出一个颜色,该颜色会被存储到其相应的像素上(准确的说,片元的输出是存储到 Color Attachment 的纹素上)。

WebGPU 着色器语言: WGSL

WGSL (WebGPU Shading Language) 是 WebGPU 的着色器语言。 WGSL 的开发重点是让它轻松转换为与后端对应的着色器语言;例如,Vulkan 的 SPIR-V、Metal 的 MSL、DX12 的 HLSL 和 OpenGL 的 GLSL。 这种转换是在内部完成的,我们不需要关心这些细节。 就 wgpu 而言,它是由名为 naga完成的。

WGSL 着色器语言 一章中,有对 WGSL 的由来及语法的更详细介绍。

WGSL 规范及其在 WGPU 中的应用仍在开发中。如果在使用中遇到问题,你或许希望 https://app.element.io/#/room/#wgpu:matrix.org 社区的人帮忙看一下你的代码。

编写着色器

main.rs 所在的目录中创建一个 shader.wgsl 文件。在其中写入以下代码:

rust
// 顶点着色器
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-};
-
-@vertex
-fn vs_main(
-    @builtin(vertex_index) in_vertex_index: u32,
-) -> VertexOutput {
-    var out: VertexOutput;
-    let x = f32(1 - i32(in_vertex_index)) * 0.5;
-    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
-    out.clip_position = vec4f(x, y, 0.0, 1.0);
-    return out;
-}

首先,声明一个 struct 来存储顶点着色器的输出。目前只有一个字段,即 clip_position@builtin(position) 属性标记了此字段将作为顶点在裁剪坐标系中的位置来使用。这类似于 GLSL 的 gl_Position 变量。

形如 vec4 的向量类型是泛型。目前你必须指定向量将包含的值的类型。因此一个使用 32 位浮点数的 3 维向量写做 vec3f

着色器代码的下一部分是 vs_main 函数。@vertex 属性标记了这个函数是顶点着色器的有效入口。我们预期有一个 u32 类型的变量 in_vertex_index,它的值来自 @builtin(vertex_index)

然后使用 VertexOutput 结构体声明一个名为 out 的变量。我们为顶点的裁剪空间坐标创建另外两个 x y 变量。

f32()i32() 表示类型强制转换,将括号里的值转换为此类型。

现在我们可以把 clip_position 保存到 out。然后只需返回 out 就完成了顶点着色器的工作!

我们也可以不使用 stuct,直接按以下代码来实现:

rust
@vertex
-fn vs_main(
-    @builtin(vertex_index) in_vertex_index: u32
-) -> @builtin(position) vec4f {
-    // 顶点着色器 code...
-}

接下来是片元着色器。还是在 shader.wgsl 中添加以下代码:

rust
// 片元着色器
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(0.3, 0.2, 0.1, 1.0);
-}

这将当前片元的颜色设置为棕色。

注意,顶点和片元着色器的入口点分别被命名为 vs_mainfs_main。在 wgpu 的早期版本中,这两个函数有相同的名字是可以的,但较新版本的 WGSL spec 要求这些名字必须不同。因此在整个教程中都使用(从 wgpu demo 中采用)上述命名方案。

@location(0) 属性标记了该函数返回的 vec4 值将存储在第一个颜色附件(Color Attachment)中。

使用着色器

终于要用到本章节标题提到的概念 管线(Pipeline)了。首先,我们来修改 State 以包括以下代码。

rust
// lib.rs
-struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-    // 新添加!
-    render_pipeline: wgpu::RenderPipeline,
-}

现在,开始在 new() 函数内创建管线。我们需要载入先前写的,渲染管线所需要的着色器。

rust
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
-    label: Some("Shader"),
-    source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
-});

也可以使用 include_wgsl! 宏作为创建 ShaderModuleDescriptor 的快捷方式。

rust
let shader = device.create_shader_module(include_wgsl!("shader.wgsl"));

还需要创建一个 PipelineLayout。在讲完缓冲区Buffer)之后,我们会对它有更多地了解。

rust
let render_pipeline_layout =
-    device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-        label: Some("Render Pipeline Layout"),
-        bind_group_layouts: &[],
-        push_constant_ranges: &[],
-    });

最后,我们就获得了创建 render_pipeline 所需的全部资源:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    label: Some("Render Pipeline"),
-    layout: Some(&render_pipeline_layout),
-    vertex: wgpu::VertexState {
-        module: &shader,
-        entry_point: "vs_main", // 1.
-        buffers: &[], // 2.
-    },
-    fragment: Some(wgpu::FragmentState { // 3.
-        module: &shader,
-        entry_point: "fs_main",
-        targets: &[Some(wgpu::ColorTargetState { // 4.
-            format: config.format,
-            blend: Some(wgpu::BlendState::REPLACE),
-            write_mask: wgpu::ColorWrites::ALL,
-        })],
-    }),
-    // ...

有几点需要注意:

  1. 可以在这里指定着色器中的哪个函数应该是入口点( entry_point)。那是我们用 @vertex@fragment 标记的函数。
  2. buffers 字段告诉 wgpu 要把什么类型的顶点数据传递给顶点着色器。我们会在顶点着色器中指定顶点,所以这里先留空。下一个教程中会在此加入一些数据。
  3. fragment 字段是 Option 类型,所以必须用 Some() 来包装 FragmentState 实例。如果想把颜色数据存储到 surface 就需要用到它 。
  4. targets 字段告诉 wgpu 应该设置哪些颜色输出目标。目前只需设置一个输出目标。格式指定为使用 surface 的格式,并且指定混合模式为仅用新的像素数据替换旧的。我们还告诉 wgpu 可写入全部 4 个颜色通道:红、蓝、绿和透明度。在讨论纹理时会更多地介绍 color_state
rust
primitive: wgpu::PrimitiveState {
-    topology: wgpu::PrimitiveTopology::TriangleList, // 1.
-    strip_index_format: None,
-    front_face: wgpu::FrontFace::Ccw, // 2.
-    cull_mode: Some(wgpu::Face::Back),
-    // 将此设置为 Fill 以外的任何值都要需要开启 Feature::NON_FILL_POLYGON_MODE
-    polygon_mode: wgpu::PolygonMode::Fill,
-    // 需要开启 Features::DEPTH_CLIP_CONTROL
-    unclipped_depth: false,
-    // 需要开启 Features::CONSERVATIVE_RASTERIZATION
-    conservative: false,
-},
-// continued ...

图元(primitive)字段描述了将如何解释顶点来转换为三角形。

  1. PrimitiveTopology::TriangleList 意味着每三个顶点组成一个三角形。
  2. front_face 字段告诉 wgpu 如何确定三角形的朝向。FrontFace::Ccw 指定顶点的帧缓冲区坐标(framebuffer coordinates)按逆时针顺序给出的三角形为朝前(面向屏幕外)。
  3. cull_mode 字段告诉 wgpu 如何做三角形剔除。CullMode::Back 指定朝后(面向屏幕内)的三角形会被剔除(不被渲染)。我们会在讨论缓冲区(Buffer)时详细介绍剔除问题。
rust
depth_stencil: None, // 1.
-    multisample: wgpu::MultisampleState {
-        count: 1, // 2.
-        mask: !0, // 3.
-        alpha_to_coverage_enabled: false, // 4.
-    },
-    multiview: None, // 5.
-});

该函数的其余部分非常简单:

  1. 我们目前没有使用深度/模板缓冲区,因此将 depth_stencil 保留为 None以后会用到
  2. count 确定管线将使用多少个采样。多重采样是一个复杂的主题,因此不会在这里展开讨论。
  3. mask 指定哪些采样应处于活动状态。目前我们使用全部采样。
  4. alpha_to_coverage_enabled 与抗锯齿有关。在这里不介绍抗锯齿,因此将其保留为 false。
  5. multiview 表示渲染附件可以有多少数组层。我们不会渲染到数组纹理,因此将其设置为 None

现在我们要做的就是把 render_pipeline 添加到 State,然后就可以使用它了!

rust
// new()
-Self {
-    surface,
-    device,
-    queue,
-    config,
-    size,
-    // 新添加!
-    render_pipeline,
-}

使用管线

如果现在运行程序,它会花更多的时间来启动,但仍然只会显示我们在上一节得到的蓝屏。因为虽然我们创建了 render_pipeline,但还需要修改 render() 函数中的代码来实际使用它:

rust
// render()
-
-// ...
-{
-    // 1.
-    let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-        label: Some("Render Pass"),
-        color_attachments: &[
-            // 这就是片元着色器中 @location(0) 标记指向的颜色附件
-            Some(wgpu::RenderPassColorAttachment {
-                view: &view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(
-                        wgpu::Color {
-                            r: 0.1,
-                            g: 0.2,
-                            b: 0.3,
-                            a: 1.0,
-                        }
-                    ),
-                    store: wgpu::StoreOp::Store
-                }
-            })
-        ],
-        ..Default::default()
-    });
-
-    // 新添加!
-    render_pass.set_pipeline(&self.render_pipeline); // 2.
-    render_pass.draw(0..3, 0..1); // 3.
-}
-// ...

上面代码所做的少量修改:

  1. _render_pass 声明为可变变量并重命名为 render_pass
  2. render_pass 上设置刚刚创建的管线
  3. 告诉 wgpu 用 3 个顶点和 1 个实例(实例的索引就是 @builtin(vertex_index) 的由来)来进行绘制。

修改完代码后,运行程序应该就能看到一个可爱的棕色三角形:

可爱的棕色三角形

挑战

创建第二个管线,使用三角形顶点的位置数据来创建一个颜色并发送给片元着色器。当你按下空格键时让应用程序交替使用两个管线。提示:你需要修改 VertexOutput

',59);function i(y,C,A,d,u,b){const a=s("WasmExample"),l=s("AutoGithubLink");return e(),o("div",null,[D,n(a,{example:"tutorial3_pipeline"}),n(l)])}const B=p(F,[["render",i]]);export{g as __pageData,B as default}; diff --git a/assets/beginner_tutorial3-pipeline_index.md.de2ce2ed.lean.js b/assets/beginner_tutorial3-pipeline_index.md.de2ce2ed.lean.js deleted file mode 100644 index 8c048c1a9..000000000 --- a/assets/beginner_tutorial3-pipeline_index.md.de2ce2ed.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o as e,c as o,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/tutorial3-pipeline-vertices.eaa25f33.png",t="/learn-wgpu-zh/assets/tutorial3-pipeline-triangle.d560b6f2.png",g=JSON.parse('{"title":"管线 (Pipeline)","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial3-pipeline/index.md","filePath":"beginner/tutorial3-pipeline/index.md","lastUpdated":1701933923000}'),F={name:"beginner/tutorial3-pipeline/index.md"},D=r("",59);function i(y,C,A,d,u,b){const a=s("WasmExample"),l=s("AutoGithubLink");return e(),o("div",null,[D,n(a,{example:"tutorial3_pipeline"}),n(l)])}const B=p(F,[["render",i]]);export{g as __pageData,B as default}; diff --git a/assets/beginner_tutorial4-buffer_index.md.41a21df0.js b/assets/beginner_tutorial4-buffer_index.md.41a21df0.js deleted file mode 100644 index 001adf2cd..000000000 --- a/assets/beginner_tutorial4-buffer_index.md.41a21df0.js +++ /dev/null @@ -1,225 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/vb_desc.4c4c981a.png",t="/learn-wgpu-zh/assets/triangle.59fc4a55.png",F="/learn-wgpu-zh/assets/pentagon.23871f8a.png",D="/learn-wgpu-zh/assets/indexed-pentagon.e22309a1.png",g=JSON.parse('{"title":"缓冲区与索引","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial4-buffer/index.md","filePath":"beginner/tutorial4-buffer/index.md","lastUpdated":1701933923000}'),y={name:"beginner/tutorial4-buffer/index.md"},C=r(`

缓冲区与索引

终于要讨论它们了!

你可能已经厌倦了我老说 "我们会在讨论缓冲区的时候再详细介绍" 之类的话。现在终于到了谈论缓冲区的时候了,但首先...

什么是缓冲区?

缓冲区(Buffer)一个可用于 GPU 操作的内存块。缓冲区数据是以线性布局存储的,这意味着分配的每个字节都可以通过其从缓冲区开始的偏移量来寻址,但要根据操作的不同而有对齐限制。

缓冲区常用于存储结构体或数组等简单的数据,但也可以存储更复杂的数据,如树等图结构(只要所有节点都存储在一起,且不引用缓冲区以外的任何数据)。我们会经常用到缓冲区,所以让我们从最重要的两个开始:顶点缓冲区(Vertex Buffer)和索引缓冲区(Index Buffer)。

顶点缓冲区

之前我们是直接在顶点着色器中存储的顶点数据。这在学习的起始阶段很有效,但这不是长远之计,因为需要绘制的对象的类型会有不同的大小,且每当需要更新模型时就得重新编译着色器,这会大大减慢我们的程序。我们将改为使用顶点缓冲区来存储想要绘制的顶点数据。在此之前,需要创建一个新的结构体来描述顶点:

rust
// lib.rs
-#[repr(C)]
-#[derive(Copy, Clone, Debug)]
-struct Vertex {
-    position: [f32; 3],
-    color: [f32; 3],
-}

每个顶点都会有一个位置(position)和颜色(color)字段。位置代表顶点在三维空间中的 x、y 和 z 坐标。颜色是顶点的红、绿、蓝三通道色值。我们需要令 Vertex 支持 Copy trait,这样就可以用它创建一个缓冲区。

接下来,需要构成三角形的实际顶点数据。在 Vertex 下面添加以下代码:

rust
// lib.rs
-const VERTICES: &[Vertex] = &[
-    Vertex { position: [0.0, 0.5, 0.0], color: [1.0, 0.0, 0.0] },
-    Vertex { position: [-0.5, -0.5, 0.0], color: [0.0, 1.0, 0.0] },
-    Vertex { position: [0.5, -0.5, 0.0], color: [0.0, 0.0, 1.0] },
-];

按逆时针顺序排列顶点:上、左下、右下。这样做的部分理由是出于惯例,但主要是因为我们在 render_pipelineprimitive 中指定了三角形的 front_faceCcw(counter-clockwise),这样就可以做背面剔除。这意味着任何面向我们的三角形的顶点都应该是按逆时针顺序排列。

现在有了顶点数据,需要将其存储在一个缓冲区中。让我们给 State 添加再一个 vertex_buffer 字段:

rust
// lib.rs
-struct State {
-    // ...
-    render_pipeline: wgpu::RenderPipeline,
-
-    // 新添加!
-    vertex_buffer: wgpu::Buffer,
-
-    // ...
-}

接着在 new() 函数中创建顶点缓冲区:

rust
// new()
-let vertex_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Vertex Buffer"),
-        contents: bytemuck::cast_slice(VERTICES),
-        usage: wgpu::BufferUsages::VERTEX,
-    }
-);

为了访问 wgpu::Device 上的 create_buffer_init 方法,我们须导入 DeviceExt 扩展 trait。关于扩展 trait 的更多信息,请查看这篇文章

要导入扩展 trait,只需在 lib.rs 的顶部放上这一行:

rust
use wgpu::util::DeviceExt;

你应该注意到我们使用了 bytemuck 来将 VERTICES 转换为 &[u8]create_buffer_init() 函数的参数类型是 &[u8],而 bytemuck::cast_slice 为我们实现了此类型转换。为此需在 Cargo.toml 中添加以下依赖项:

toml
bytemuck = { version = "1.14", features = [ "derive" ] }

我们还需要实现两个 trait 来使 bytemuck 工作。它们是 bytemuck::Podbytemuck::ZeroablePod 表示 Vertex"Plain Old Data" 数据类型,因此可以被解释为 &[u8] 类型。Zeroable 表示可以对其使用 std::mem::zeroed()。下面修改 Vertex 结构体来派生这些 trait:

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-struct Vertex {
-    position: [f32; 3],
-    color: [f32; 3],
-}

结构体里包含了没有实现 PodZeroable 的类型时,就需要手动实现这些 trait。这些 trait 不需要我们实现任何函数,只需像下面这样来让代码工作:

rust
unsafe impl bytemuck::Pod for Vertex {}
-unsafe impl bytemuck::Zeroable for Vertex {}

最终,我们可以把 vertex_buffer 添加到 State 结构体中了:

rust
Self {
-    surface,
-    device,
-    queue,
-    config,
-    size,
-    render_pipeline,
-    vertex_buffer,
-}

接下来怎么做?

我们需要告诉 render_pipeline 在绘制时使用这个缓冲区,但首先需要告诉它如何读取此缓冲区。顶点缓冲区布局(VertexBufferLayout)对象和 vertex_buffers 字段可以用来完成这件事,我保证在创建 render_pipeline 时会详细讨论这个问题。

顶点缓冲区布局对象定义了缓冲区在内存中的表示方式,render_pipeline 需要它来在着色器中映射缓冲区。下面是填充了顶点的一个缓冲区的布局:

rust
wgpu::VertexBufferLayout {
-    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress, // 1.
-    step_mode: wgpu::VertexStepMode::Vertex, // 2.
-    attributes: &[ // 3.
-        wgpu::VertexAttribute {
-            offset: 0, // 4.
-            shader_location: 0, // 5.
-            format: wgpu::VertexFormat::Float32x3, // 6.
-        },
-        wgpu::VertexAttribute {
-            offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-            shader_location: 1,
-            format: wgpu::VertexFormat::Float32x3,
-        }
-    ]
-}
  1. array_stride 定义了一个顶点所占的字节数。当着色器读取下一个顶点时,它将跳过 array_stride 的字节数。在我们的例子中,array_stride 是 24 个字节。
  2. step_mode 告诉管线此缓冲区中的数组数据中的每个元素代表的是每个顶点还是每个实例的数据,如果只想在开始绘制一个新实例时改变顶点,就可以设置为 wgpu::VertexStepMode::Instance。在后面的教程里我们会讲解实例化绘制。
  3. attributes 描述顶点的各个属性(Attribute)的布局。一般来说,这与结构体的字段是 1:1 映射的,在我们的案例中也是如此。
  4. offset 定义了属性在一个顶点元素中的字节偏移量。对于第一个属性,偏移量通常为零。其后属性的偏移量应为在其之前各属性的 size_of 之和。
  5. shader_location 告诉着色器要在什么位置存储这个属性。例如 @location(0) x: vec3f 在顶点着色器中对应于 Vertex 结构体的 position 字段,而 @location(1) x: vec3f 对应 color 字段。
  6. format 告诉着色器该属性的数据格式。Float32x3对应于着色器代码中的 vec3f。我们可以在一个属性中存储的最大值是Float32x4Uint32x4Sint32x4 也可以)。当我们需要存储比 Float32x4 更大的东西时请记住这一点。

对于视觉学习者来说,我们的顶点缓冲区看起来是这样的:

A figure of the VertexBufferLayout

让我们在 Vertex 上创建一个静态函数来返回此布局对象:

rust
// lib.rs
-impl Vertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        wgpu::VertexBufferLayout {
-            array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    shader_location: 0,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-                    shader_location: 1,
-                    format: wgpu::VertexFormat::Float32x3,
-                }
-            ]
-        }
-    }
-}

像上边那样指定属性是非常冗长的。我们可以使用 wgpu 提供的 vertex_attr_array 宏来清理一下。现在 VertexBufferLayout 变成了这样:

rust
wgpu::VertexBufferLayout {
-    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
-    step_mode: wgpu::VertexStepMode::Vertex,
-    attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3],
-}

这无疑很棒,但 Rust 认为 vertex_attr_array 的结果是一个临时值,所以需要进行调整才能从一个函数中返回。我们可以将wgpu::VertexBufferLayout 的生命周期改为 'static,或者使其成为 const。示例如下:

rust
impl Vertex {
-    const ATTRIBS: [wgpu::VertexAttribute; 2] =
-        wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3];
-
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<Self>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &Self::ATTRIBS,
-        }
-    }
-}

不管怎么说,我觉得展示数据如何被映射是件好事,所以暂时不会使用这个宏。

现在我们可以在创建 render_pipeline 时使用它了:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    vertex: wgpu::VertexState {
-        // ...
-        buffers: &[
-            Vertex::desc(),
-        ],
-    },
-    // ...
-});

还需要在渲染函数中实际设置顶点缓冲区,否则程序会崩溃。

rust
// render()
-render_pass.set_pipeline(&self.render_pipeline);
-// 新添加!
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.draw(0..3, 0..1);

set_vertex_buffer 函数接收两个参数,第一个参数是顶点缓冲区要使用的缓冲槽索引。你可以连续设置多个顶点缓冲区。

第二个参数是要使用的缓冲区的数据片断。你可以在硬件允许的情况下在一个缓冲区中存储尽可能多的对象,所以 slice 允许我们指定使用缓冲区的哪一部分。我们用 .. 来指定整个缓冲区。

在继续之前,我们需要修改 render_pass.draw() 的调用来使用 VERTICES 所指定的顶点数量。在 State 中添加一个num_vertices,令其值等于 VERTICES.len()

rust
// lib.rs
-
-struct State {
-    // ...
-    num_vertices: u32,
-}
-
-impl State {
-    // ...
-    fn new(...) -> Self {
-        // ...
-        let num_vertices = VERTICES.len() as u32;
-
-        Self {
-            surface,
-            device,
-            queue,
-            config,
-            render_pipeline,
-            vertex_buffer,
-            num_vertices,
-            size,
-        }
-    }
-}

然后在绘制命令中使用它:

rust
// render
-render_pass.draw(0..self.num_vertices, 0..1);

在上面的修改生效之前,还需要更新着色器,以便从顶点缓冲区中获取数据。

rust
// 顶点着色器
-
-struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) color: vec3f,
-};
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) color: vec3f,
-};
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    var out: VertexOutput;
-    out.color = model.color;
-    out.clip_position = vec4f(model.position, 1.0);
-    return out;
-}
-
-// 片元着色器
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(in.color, 1.0);
-}

如果做的正确无误,运行程序应该就能看到一个下边这样的三角形:

A colorful triangle

索引缓冲区

从技术的角度来看,目前的示例并不需要索引缓冲区,但它们仍然很有用。当开始使用有大量三角形的模型时,索引缓冲区就会发挥作用。考虑一下下边的五边形:

A pentagon made of 3 triangles

它总共有 5 个顶点和 3 个三角形。现在,如果我们想只用顶点来显示这样的东西,我们就需要以下顶点数据:

rust
const VERTICES: &[Vertex] = &[
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
-
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
-
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
-];

你会注意到有些顶点被使用了不止一次。C 和 B 顶点被使用了两次,而 E 顶点被重复使用了 3 次。假设每个浮点数是 4 个字节,那么这意味着在我们用于 VERTICES 的 216 个字节中,有 96 个字节是重复的数据。如果能只把这些顶点列出来一次不是很好吗?我们可以做到这一点!

这,就是索引缓冲区发挥作用的地方。

大体上来说,我们在 VERTICES 中存储所有唯一的顶点,我们创建另一个缓冲区,将索引存储在 VERTICES 中的元素以创建三角形。下面还是以五边形为例:

rust
// lib.rs
-const VERTICES: &[Vertex] = &[
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
-];
-
-const INDICES: &[u16] = &[
-    0, 1, 4,
-    1, 2, 4,
-    2, 3, 4,
-];

现在这种设置下,VERTICES 占用了 120 个字节,而 INDICES 只有 18 个字节,因为 u16 类型是 2 个字节长。在这种情况下,wgpu 会自动增加 2 个字节的填充,以确保缓冲区被对齐到 4 个字节,但它仍然只有 20 个字节。五边形总共是 140 字节,这意味着我们节省了 76 个字节! 这可能看起来不多,但当处理数十万的三角形时,索引可以节省大量的内存。

为了使用索引,有几处我们需要修改。首先需要创建一个缓冲区来存储索引。在 Statenew() 函数中,创建了 vertex_buffer 之后创建 index_buffer。同时将 num_vertices 改为num_indices,令其值等于 INDICES.len()

rust
let vertex_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Vertex Buffer"),
-        contents: bytemuck::cast_slice(VERTICES),
-        usage: wgpu::BufferUsages::VERTEX,
-    }
-);
-// 新添加!
-let index_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Index Buffer"),
-        contents: bytemuck::cast_slice(INDICES),
-        usage: wgpu::BufferUsages::INDEX,
-    }
-);
-let num_indices = INDICES.len() as u32;

我们不需要为索引实现 PodZeroable,因为 bytemuck 已经为 u16 等基本类型实现了它们。只需将 index_buffernum_indices 添加到 State 结构体中。

rust
struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-    render_pipeline: wgpu::RenderPipeline,
-    vertex_buffer: wgpu::Buffer,
-    // 新添加!
-    index_buffer: wgpu::Buffer,
-    num_indices: u32,
-}

然后在构造函数中填充这些字段:

rust
Self {
-    surface,
-    device,
-    queue,
-    config,
-    size,
-    render_pipeline,
-    vertex_buffer,
-    // 新添加!
-    index_buffer,
-    num_indices,
-}

我们现在所要做的就是更新 render() 函数来使用 index_buffer

rust
// render()
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); // 1.
-render_pass.draw_indexed(0..self.num_indices, 0, 0..1); // 2.

有几点需要注意:

  1. 命令名称是 set_index_buffer 而不是 set_index_buffers, 一次绘制(draw_XXX())只能设置一个索引缓冲区。但是,你可以在一个渲染通道内调用多次绘制,每次都设置不同的索引缓冲区。
  2. 当使用索引缓冲区时,需使用 draw_indexed 来绘制,draw 命令会忽略索引缓冲区。还需确保你使用的是索引数(num_indices)而非顶点数,否则你的模型要么画错,要么因为没有足够的索引数而导致程序恐慌(panic)。

完成这些后,运行程序应该就能看到窗口里有一个洋红色的五边形了:

Magenta pentagon in window

颜色校正

如果在洋红色五角星上使用取色器,你会得到一个 #BC00BC 的十六进制值。如果把它转换成 RGB 值会得到(188, 0, 188),将这些值除以 255 使其映射进 [0,1] 范围,大致会得到(0.737254902,0,0.737254902)。这与我们赋给顶点颜色的值不同,后者是(0.5, 0.0, 0.5)。其原因与色彩空间(Color Space)有关。

大多数显示器使用的色彩空间被称为 sRGB(事实上,目前市面上的中高端显示器已经支持 DisplayP3 甚至是 BT.2100 等广色域色彩空间,macOS 与 iOS 设备默认使用的就是 DisplayP3 色彩空间)。我们的展示平面(完全取决于从 surface.get_capabilities(&adapter).formats 返回的格式)默认支持 sRGB 纹理格式。sRGB 格式是根据颜色的相对亮度而不是实际亮度来存储的。其原因是人眼对光线的感知不是线性的。我们注意到较深的颜色比较浅的颜色有更多差异。

可以用下面的公式得到一个正确颜色的近似值。srgb_color = (rgb_color / 255) ^ 2.2。在 RGB 值为 (188, 0, 188) 的情况下,我们将得到 (0.511397819, 0.0, 0.511397819)。与我们的(0.5, 0.0, 0.5)有点偏差。虽然你可以通过调整公式来获得所需的数值,但使用纹理可能会节省很多时间,因为它们默认是以 sRGB 方式存储的,所以不会像顶点颜色那样出现颜色不准确的情况。我们会在下一课中介绍纹理。

假如你的显示设备使用的是 DisplayP3 或 BT.2100 等广色域色彩空间,那么当你使用取色器检查屏幕上的渲染结果时,拾取到的色值将与着色器内的返回值不一致。

这是因为目前 WebGPU 仅支持较小色域的 sRGB 色彩空间,而硬件会执行色彩空间转换(color space conversion)将 sRGB 色值映射到更广的色域来显示到屏幕上,因此,使用取色器拾取到的色值是经过转换后的值。

挑战

使用顶点缓冲区和索引缓冲区创建一个比教程里做的更复杂的形状(也就是三个以上的三角形),并用空格键在两者之间切换。

',80);function i(A,u,b,d,m,B){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[C,n(a,{example:"tutorial4_buffer"}),n(l)])}const _=p(y,[["render",i]]);export{g as __pageData,_ as default}; diff --git a/assets/beginner_tutorial4-buffer_index.md.41a21df0.lean.js b/assets/beginner_tutorial4-buffer_index.md.41a21df0.lean.js deleted file mode 100644 index 351f61de8..000000000 --- a/assets/beginner_tutorial4-buffer_index.md.41a21df0.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/vb_desc.4c4c981a.png",t="/learn-wgpu-zh/assets/triangle.59fc4a55.png",F="/learn-wgpu-zh/assets/pentagon.23871f8a.png",D="/learn-wgpu-zh/assets/indexed-pentagon.e22309a1.png",g=JSON.parse('{"title":"缓冲区与索引","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial4-buffer/index.md","filePath":"beginner/tutorial4-buffer/index.md","lastUpdated":1701933923000}'),y={name:"beginner/tutorial4-buffer/index.md"},C=r("",80);function i(A,u,b,d,m,B){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[C,n(a,{example:"tutorial4_buffer"}),n(l)])}const _=p(y,[["render",i]]);export{g as __pageData,_ as default}; diff --git a/assets/beginner_tutorial4-buffer_index.md.Afffgpgg.js b/assets/beginner_tutorial4-buffer_index.md.Afffgpgg.js new file mode 100644 index 000000000..9311f1b95 --- /dev/null +++ b/assets/beginner_tutorial4-buffer_index.md.Afffgpgg.js @@ -0,0 +1,225 @@ +import{_ as h,D as s,o as p,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/vb_desc.PyYTSl9s.png",e="/learn-wgpu-zh/assets/triangle.o8T8tm7l.png",r="/learn-wgpu-zh/assets/pentagon.GLgl43gd.png",E="/learn-wgpu-zh/assets/indexed-pentagon.hkUQ8suQ.png",B=JSON.parse('{"title":"缓冲区与索引","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial4-buffer/index.md","filePath":"beginner/tutorial4-buffer/index.md","lastUpdated":1703303099000}'),d={name:"beginner/tutorial4-buffer/index.md"},g=k(`

缓冲区与索引

终于要讨论它们了!

你可能已经厌倦了我老说 "我们会在讨论缓冲区的时候再详细介绍" 之类的话。现在终于到了谈论缓冲区的时候了,但首先...

什么是缓冲区?

缓冲区(Buffer)一个可用于 GPU 操作的内存块。缓冲区数据是以线性布局存储的,这意味着分配的每个字节都可以通过其从缓冲区开始的偏移量来寻址,但要根据操作的不同而有对齐限制。

缓冲区常用于存储结构体或数组等简单的数据,但也可以存储更复杂的数据,如树等图结构(只要所有节点都存储在一起,且不引用缓冲区以外的任何数据)。我们会经常用到缓冲区,所以让我们从最重要的两个开始:顶点缓冲区(Vertex Buffer)和索引缓冲区(Index Buffer)。

顶点缓冲区

之前我们是直接在顶点着色器中存储的顶点数据。这在学习的起始阶段很有效,但这不是长远之计,因为需要绘制的对象的类型会有不同的大小,且每当需要更新模型时就得重新编译着色器,这会大大减慢我们的程序。我们将改为使用顶点缓冲区来存储想要绘制的顶点数据。在此之前,需要创建一个新的结构体来描述顶点:

rust
// lib.rs
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+struct Vertex {
+    position: [f32; 3],
+    color: [f32; 3],
+}

每个顶点都会有一个位置(position)和颜色(color)字段。位置代表顶点在三维空间中的 x、y 和 z 坐标。颜色是顶点的红、绿、蓝三通道色值。我们需要令 Vertex 支持 Copy trait,这样就可以用它创建一个缓冲区。

接下来,需要构成三角形的实际顶点数据。在 Vertex 下面添加以下代码:

rust
// lib.rs
+const VERTICES: &[Vertex] = &[
+    Vertex { position: [0.0, 0.5, 0.0], color: [1.0, 0.0, 0.0] },
+    Vertex { position: [-0.5, -0.5, 0.0], color: [0.0, 1.0, 0.0] },
+    Vertex { position: [0.5, -0.5, 0.0], color: [0.0, 0.0, 1.0] },
+];

按逆时针顺序排列顶点:上、左下、右下。这样做的部分理由是出于惯例,但主要是因为我们在 render_pipelineprimitive 中指定了三角形的 front_faceCcw(counter-clockwise),这样就可以做背面剔除。这意味着任何面向我们的三角形的顶点都应该是按逆时针顺序排列。

现在有了顶点数据,需要将其存储在一个缓冲区中。让我们给 State 添加再一个 vertex_buffer 字段:

rust
// lib.rs
+struct State {
+    // ...
+    render_pipeline: wgpu::RenderPipeline,
+
+    // 新添加!
+    vertex_buffer: wgpu::Buffer,
+
+    // ...
+}

接着在 new() 函数中创建顶点缓冲区:

rust
// new()
+let vertex_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Vertex Buffer"),
+        contents: bytemuck::cast_slice(VERTICES),
+        usage: wgpu::BufferUsages::VERTEX,
+    }
+);

为了访问 wgpu::Device 上的 create_buffer_init 方法,我们须导入 DeviceExt 扩展 trait。关于扩展 trait 的更多信息,请查看这篇文章

要导入扩展 trait,只需在 lib.rs 的顶部放上这一行:

rust
use wgpu::util::DeviceExt;

你应该注意到我们使用了 bytemuck 来将 VERTICES 转换为 &[u8]create_buffer_init() 函数的参数类型是 &[u8],而 bytemuck::cast_slice 为我们实现了此类型转换。为此需在 Cargo.toml 中添加以下依赖项:

toml
bytemuck = { version = "1.14", features = [ "derive" ] }

我们还需要实现两个 trait 来使 bytemuck 工作。它们是 bytemuck::Podbytemuck::ZeroablePod 表示 Vertex"Plain Old Data" 数据类型,因此可以被解释为 &[u8] 类型。Zeroable 表示可以对其使用 std::mem::zeroed()。下面修改 Vertex 结构体来派生这些 trait:

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+struct Vertex {
+    position: [f32; 3],
+    color: [f32; 3],
+}

结构体里包含了没有实现 PodZeroable 的类型时,就需要手动实现这些 trait。这些 trait 不需要我们实现任何函数,只需像下面这样来让代码工作:

rust
unsafe impl bytemuck::Pod for Vertex {}
+unsafe impl bytemuck::Zeroable for Vertex {}

最终,我们可以把 vertex_buffer 添加到 State 结构体中了:

rust
Self {
+    surface,
+    device,
+    queue,
+    config,
+    size,
+    render_pipeline,
+    vertex_buffer,
+}

接下来怎么做?

我们需要告诉 render_pipeline 在绘制时使用这个缓冲区,但首先需要告诉它如何读取此缓冲区。顶点缓冲区布局(VertexBufferLayout)对象和 vertex_buffers 字段可以用来完成这件事,我保证在创建 render_pipeline 时会详细讨论这个问题。

顶点缓冲区布局对象定义了缓冲区在内存中的表示方式,render_pipeline 需要它来在着色器中映射缓冲区。下面是填充了顶点的一个缓冲区的布局:

rust
wgpu::VertexBufferLayout {
+    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress, // 1.
+    step_mode: wgpu::VertexStepMode::Vertex, // 2.
+    attributes: &[ // 3.
+        wgpu::VertexAttribute {
+            offset: 0, // 4.
+            shader_location: 0, // 5.
+            format: wgpu::VertexFormat::Float32x3, // 6.
+        },
+        wgpu::VertexAttribute {
+            offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+            shader_location: 1,
+            format: wgpu::VertexFormat::Float32x3,
+        }
+    ]
+}
  1. array_stride 定义了一个顶点所占的字节数。当着色器读取下一个顶点时,它将跳过 array_stride 的字节数。在我们的例子中,array_stride 是 24 个字节。
  2. step_mode 告诉管线此缓冲区中的数组数据中的每个元素代表的是每个顶点还是每个实例的数据,如果只想在开始绘制一个新实例时改变顶点,就可以设置为 wgpu::VertexStepMode::Instance。在后面的教程里我们会讲解实例化绘制。
  3. attributes 描述顶点的各个属性(Attribute)的布局。一般来说,这与结构体的字段是 1:1 映射的,在我们的案例中也是如此。
  4. offset 定义了属性在一个顶点元素中的字节偏移量。对于第一个属性,偏移量通常为零。其后属性的偏移量应为在其之前各属性的 size_of 之和。
  5. shader_location 告诉着色器要在什么位置存储这个属性。例如 @location(0) x: vec3f 在顶点着色器中对应于 Vertex 结构体的 position 字段,而 @location(1) x: vec3f 对应 color 字段。
  6. format 告诉着色器该属性的数据格式。Float32x3对应于着色器代码中的 vec3f。我们可以在一个属性中存储的最大值是Float32x4Uint32x4Sint32x4 也可以)。当我们需要存储比 Float32x4 更大的东西时请记住这一点。

对于视觉学习者来说,我们的顶点缓冲区看起来是这样的:

A figure of the VertexBufferLayout

让我们在 Vertex 上创建一个静态函数来返回此布局对象:

rust
// lib.rs
+impl Vertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        wgpu::VertexBufferLayout {
+            array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    shader_location: 0,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+                    shader_location: 1,
+                    format: wgpu::VertexFormat::Float32x3,
+                }
+            ]
+        }
+    }
+}

像上边那样指定属性是非常冗长的。我们可以使用 wgpu 提供的 vertex_attr_array 宏来清理一下。现在 VertexBufferLayout 变成了这样:

rust
wgpu::VertexBufferLayout {
+    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
+    step_mode: wgpu::VertexStepMode::Vertex,
+    attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3],
+}

这无疑很棒,但 Rust 认为 vertex_attr_array 的结果是一个临时值,所以需要进行调整才能从一个函数中返回。我们可以将wgpu::VertexBufferLayout 的生命周期改为 'static,或者使其成为 const。示例如下:

rust
impl Vertex {
+    const ATTRIBS: [wgpu::VertexAttribute; 2] =
+        wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3];
+
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<Self>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &Self::ATTRIBS,
+        }
+    }
+}

不管怎么说,我觉得展示数据如何被映射是件好事,所以暂时不会使用这个宏。

现在我们可以在创建 render_pipeline 时使用它了:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    vertex: wgpu::VertexState {
+        // ...
+        buffers: &[
+            Vertex::desc(),
+        ],
+    },
+    // ...
+});

还需要在渲染函数中实际设置顶点缓冲区,否则程序会崩溃。

rust
// render()
+render_pass.set_pipeline(&self.render_pipeline);
+// 新添加!
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.draw(0..3, 0..1);

set_vertex_buffer 函数接收两个参数,第一个参数是顶点缓冲区要使用的缓冲槽索引。你可以连续设置多个顶点缓冲区。

第二个参数是要使用的缓冲区的数据片断。你可以在硬件允许的情况下在一个缓冲区中存储尽可能多的对象,所以 slice 允许我们指定使用缓冲区的哪一部分。我们用 .. 来指定整个缓冲区。

在继续之前,我们需要修改 render_pass.draw() 的调用来使用 VERTICES 所指定的顶点数量。在 State 中添加一个num_vertices,令其值等于 VERTICES.len()

rust
// lib.rs
+
+struct State {
+    // ...
+    num_vertices: u32,
+}
+
+impl State {
+    // ...
+    fn new(...) -> Self {
+        // ...
+        let num_vertices = VERTICES.len() as u32;
+
+        Self {
+            surface,
+            device,
+            queue,
+            config,
+            render_pipeline,
+            vertex_buffer,
+            num_vertices,
+            size,
+        }
+    }
+}

然后在绘制命令中使用它:

rust
// render
+render_pass.draw(0..self.num_vertices, 0..1);

在上面的修改生效之前,还需要更新着色器,以便从顶点缓冲区中获取数据。

rust
// 顶点着色器
+
+struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) color: vec3f,
+};
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) color: vec3f,
+};
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    var out: VertexOutput;
+    out.color = model.color;
+    out.clip_position = vec4f(model.position, 1.0);
+    return out;
+}
+
+// 片元着色器
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(in.color, 1.0);
+}

如果做的正确无误,运行程序应该就能看到一个下边这样的三角形:

A colorful triangle

索引缓冲区

从技术的角度来看,目前的示例并不需要索引缓冲区,但它们仍然很有用。当开始使用有大量三角形的模型时,索引缓冲区就会发挥作用。考虑一下下边的五边形:

A pentagon made of 3 triangles

它总共有 5 个顶点和 3 个三角形。现在,如果我们想只用顶点来显示这样的东西,我们就需要以下顶点数据:

rust
const VERTICES: &[Vertex] = &[
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+];

你会注意到有些顶点被使用了不止一次。C 和 B 顶点被使用了两次,而 E 顶点被重复使用了 3 次。假设每个浮点数是 4 个字节,那么这意味着在我们用于 VERTICES 的 216 个字节中,有 96 个字节是重复的数据。如果能只把这些顶点列出来一次不是很好吗?我们可以做到这一点!

这,就是索引缓冲区发挥作用的地方。

大体上来说,我们在 VERTICES 中存储所有唯一的顶点,我们创建另一个缓冲区,将索引存储在 VERTICES 中的元素以创建三角形。下面还是以五边形为例:

rust
// lib.rs
+const VERTICES: &[Vertex] = &[
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+];
+
+const INDICES: &[u16] = &[
+    0, 1, 4,
+    1, 2, 4,
+    2, 3, 4,
+];

现在这种设置下,VERTICES 占用了 120 个字节,而 INDICES 只有 18 个字节,因为 u16 类型是 2 个字节长。在这种情况下,wgpu 会自动增加 2 个字节的填充,以确保缓冲区被对齐到 4 个字节,但它仍然只有 20 个字节。五边形总共是 140 字节,这意味着我们节省了 76 个字节! 这可能看起来不多,但当处理数十万的三角形时,索引可以节省大量的内存。

为了使用索引,有几处我们需要修改。首先需要创建一个缓冲区来存储索引。在 Statenew() 函数中,创建了 vertex_buffer 之后创建 index_buffer。同时将 num_vertices 改为num_indices,令其值等于 INDICES.len()

rust
let vertex_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Vertex Buffer"),
+        contents: bytemuck::cast_slice(VERTICES),
+        usage: wgpu::BufferUsages::VERTEX,
+    }
+);
+// 新添加!
+let index_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Index Buffer"),
+        contents: bytemuck::cast_slice(INDICES),
+        usage: wgpu::BufferUsages::INDEX,
+    }
+);
+let num_indices = INDICES.len() as u32;

我们不需要为索引实现 PodZeroable,因为 bytemuck 已经为 u16 等基本类型实现了它们。只需将 index_buffernum_indices 添加到 State 结构体中。

rust
struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+    render_pipeline: wgpu::RenderPipeline,
+    vertex_buffer: wgpu::Buffer,
+    // 新添加!
+    index_buffer: wgpu::Buffer,
+    num_indices: u32,
+}

然后在构造函数中填充这些字段:

rust
Self {
+    surface,
+    device,
+    queue,
+    config,
+    size,
+    render_pipeline,
+    vertex_buffer,
+    // 新添加!
+    index_buffer,
+    num_indices,
+}

我们现在所要做的就是更新 render() 函数来使用 index_buffer

rust
// render()
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); // 1.
+render_pass.draw_indexed(0..self.num_indices, 0, 0..1); // 2.

有几点需要注意:

  1. 命令名称是 set_index_buffer 而不是 set_index_buffers, 一次绘制(draw_XXX())只能设置一个索引缓冲区。但是,你可以在一个渲染通道内调用多次绘制,每次都设置不同的索引缓冲区。
  2. 当使用索引缓冲区时,需使用 draw_indexed 来绘制,draw 命令会忽略索引缓冲区。还需确保你使用的是索引数(num_indices)而非顶点数,否则你的模型要么画错,要么因为没有足够的索引数而导致程序恐慌(panic)。

完成这些后,运行程序应该就能看到窗口里有一个洋红色的五边形了:

Magenta pentagon in window

颜色校正

如果在洋红色五角星上使用取色器,你会得到一个 #BC00BC 的十六进制值。如果把它转换成 RGB 值会得到(188, 0, 188),将这些值除以 255 使其映射进 [0,1] 范围,大致会得到(0.737254902,0,0.737254902)。这与我们赋给顶点颜色的值不同,后者是(0.5, 0.0, 0.5)。其原因与色彩空间(Color Space)有关。

大多数显示器使用的色彩空间被称为 sRGB(事实上,目前市面上的中高端显示器已经支持 DisplayP3 甚至是 BT.2100 等广色域色彩空间,macOS 与 iOS 设备默认使用的就是 DisplayP3 色彩空间)。我们的展示平面(完全取决于从 surface.get_capabilities(&adapter).formats 返回的格式)默认支持 sRGB 纹理格式。sRGB 格式是根据颜色的相对亮度而不是实际亮度来存储的。其原因是人眼对光线的感知不是线性的。我们注意到较深的颜色比较浅的颜色有更多差异。

可以用下面的公式得到一个正确颜色的近似值。srgb_color = (rgb_color / 255) ^ 2.2。在 RGB 值为 (188, 0, 188) 的情况下,我们将得到 (0.511397819, 0.0, 0.511397819)。与我们的(0.5, 0.0, 0.5)有点偏差。虽然你可以通过调整公式来获得所需的数值,但使用纹理可能会节省很多时间,因为它们默认是以 sRGB 方式存储的,所以不会像顶点颜色那样出现颜色不准确的情况。我们会在下一课中介绍纹理。

假如你的显示设备使用的是 DisplayP3 或 BT.2100 等广色域色彩空间,那么当你使用取色器检查屏幕上的渲染结果时,拾取到的色值将与着色器内的返回值不一致。

这是因为目前 WebGPU 仅支持较小色域的 sRGB 色彩空间,而硬件会执行色彩空间转换(color space conversion)将 sRGB 色值映射到更广的色域来显示到屏幕上,因此,使用取色器拾取到的色值是经过转换后的值。

挑战

使用顶点缓冲区和索引缓冲区创建一个比教程里做的更复杂的形状(也就是三个以上的三角形),并用空格键在两者之间切换。

',80);function y(F,c,o,u,b,C){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),l("div",null,[g,i(a,{example:"tutorial4_buffer"}),i(n)])}const A=h(d,[["render",y]]);export{B as __pageData,A as default}; diff --git a/assets/beginner_tutorial4-buffer_index.md.Afffgpgg.lean.js b/assets/beginner_tutorial4-buffer_index.md.Afffgpgg.lean.js new file mode 100644 index 000000000..84ff65ca8 --- /dev/null +++ b/assets/beginner_tutorial4-buffer_index.md.Afffgpgg.lean.js @@ -0,0 +1 @@ +import{_ as h,D as s,o as p,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/vb_desc.PyYTSl9s.png",e="/learn-wgpu-zh/assets/triangle.o8T8tm7l.png",r="/learn-wgpu-zh/assets/pentagon.GLgl43gd.png",E="/learn-wgpu-zh/assets/indexed-pentagon.hkUQ8suQ.png",B=JSON.parse('{"title":"缓冲区与索引","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial4-buffer/index.md","filePath":"beginner/tutorial4-buffer/index.md","lastUpdated":1703303099000}'),d={name:"beginner/tutorial4-buffer/index.md"},g=k("",80);function y(F,c,o,u,b,C){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),l("div",null,[g,i(a,{example:"tutorial4_buffer"}),i(n)])}const A=h(d,[["render",y]]);export{B as __pageData,A as default}; diff --git a/assets/beginner_tutorial5-textures_index.md.DkmoMANr.js b/assets/beginner_tutorial5-textures_index.md.DkmoMANr.js new file mode 100644 index 000000000..f4986de08 --- /dev/null +++ b/assets/beginner_tutorial5-textures_index.md.DkmoMANr.js @@ -0,0 +1,351 @@ +import{_ as p,D as s,o as h,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/happy-tree.tK0WX7Fz.png",e="/learn-wgpu-zh/assets/address_mode.h6IYUOwy.png",r="/learn-wgpu-zh/assets/upside-down.sY39m84B.png",E="/learn-wgpu-zh/assets/happy-tree-uv-coords.3QtDvIcC.png",d="/learn-wgpu-zh/assets/rightside-up.yUas2Hsc.png",A=JSON.parse('{"title":"纹理和绑定组","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial5-textures/index.md","filePath":"beginner/tutorial5-textures/index.md","lastUpdated":1703303099000}'),g={name:"beginner/tutorial5-textures/index.md"},y=k('

纹理和绑定组

目前为止,我们一直在绘制简单的图形。当然可以只用三角形来做游戏,而试图绘制高精度的对象又会极大地限制能运行我们游戏的设备。不过,可以用 纹理 来解决此问题。

纹理(Textures)是叠加在三角形网格(Mesh)上的图像,使其看起来有丰富的细节。有多种类型的纹理,如法线贴图(Normal Maps,也就是法线纹理)、凹凸贴图(Bump Maps)、镜面贴图和漫反射贴图。下边将讨论漫反射贴图,简单来说也就是颜色纹理。

加载图像文件

要把一个图像映射到对象网格上,首先是需要有一个图像文件。就使用下边这棵快乐的小树吧:

一棵快乐的树

我们将使用 image 包 来加载这棵树。先把它添加到依赖项中:

toml
[dependencies.image]
+version = "0.24"
+default-features = false
+features = ["png", "jpeg"]

image 包含的 jpeg 解码器使用 rayon 来加速线程的解码速度。WASM 目前不支持线程,所以我们需要禁用这一特性,这样代码在尝试加载网络上的 jpeg 时就不会崩溃。

在 WASM 中解码 jpeg 性能不高。如果你想在 WASM 中加快图像加载速度,可以选择使用浏览器的内置解码器来替换 wasm-bindgen 构建时使用 的 image。这涉及到在 Rust 中创建一个 <img> 标记来获取图像,然后创建一个 <canvas> 来获取像素数据,我把这留作读者的练习。

Statenew() 函数中,于 surface.configure() 之后添加以下代码:

rust
surface.configure(&device, &config);
+// 新添加!
+
+let diffuse_bytes = include_bytes!("happy-tree.png");
+let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
+let diffuse_rgba = diffuse_image.to_rgba8();
+
+use image::GenericImageView;
+let dimensions = diffuse_image.dimensions();

此处代码从图像文件中读取字节,并将其加载到 image 对象中,然后转换为 rgba 动态数组。我们还保存了图像的尺寸信息以便在创建实际纹理时使用。

现在我们来创建纹理:

rust
let texture_size = wgpu::Extent3d {
+    width: dimensions.0,
+    height: dimensions.1,
+    depth_or_array_layers: 1,
+};
+let diffuse_texture = device.create_texture(
+    &wgpu::TextureDescriptor {
+        // 所有纹理都是以 3D 形式存储的,我们通过设置深度 1 来表示 2D 纹理
+        size: texture_size,
+        mip_level_count: 1, // 后面会详细介绍此字段
+        sample_count: 1,
+        dimension: wgpu::TextureDimension::D2,
+        // 大多数图像都是使用 sRGB 来存储的,我们需要在这里指定。
+        format: wgpu::TextureFormat::Rgba8UnormSrgb,
+        // TEXTURE_BINDING 表示我们要在着色器中使用这个纹理。
+        // COPY_DST 表示我们能将数据复制到这个纹理上。
+        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
+        label: Some("diffuse_texture"),
+        view_formats: &[],
+    }
+);

填充数据到纹理中

Texture 结构体没有函数可以直接与数据交互。但我们可以使用之前创建的命令队列上的 write_texture 命令来填充纹理数据。下边是具体代码:

rust
queue.write_texture(
+    // 告诉 wgpu 从何处复制像素数据
+    wgpu::ImageCopyTexture {
+        texture: &diffuse_texture,
+        mip_level: 0,
+        origin: wgpu::Origin3d::ZERO,
+        aspect: wgpu::TextureAspect::All,
+    },
+    // 实际像素数据
+    &diffuse_rgba,
+    // 纹理的内存布局
+    wgpu::ImageDataLayout {
+        offset: 0,
+        bytes_per_row: Some(4 * dimensions.0),
+        rows_per_image: Some(dimensions.1),
+    },
+    texture_size,
+);

填充纹理数据的经典方式是将像素数据先复制到一个缓冲区,然后再从缓冲区复制到纹理中。使用 write_texture 更有效率,因为它少用了一个缓冲区 -- 不过这里还是介绍一下,以防读者有需要:

rust
let buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Temp Buffer"),
+        contents: &diffuse_rgba,
+        usage: wgpu::BufferUsages::COPY_SRC,
+    }
+);
+
+let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+    label: Some("texture_buffer_copy_encoder"),
+});
+
+encoder.copy_buffer_to_texture(
+    wgpu::ImageCopyBuffer {
+        buffer: &buffer,
+        offset: 0,
+        bytes_per_row: 4 * dimensions.0,
+        rows_per_image: dimensions.1,
+    },
+    wgpu::ImageCopyTexture {
+        texture: &diffuse_texture,
+        mip_level: 0,
+        array_layer: 0,
+        origin: wgpu::Origin3d::ZERO,
+    },
+    size,
+);
+
+queue.submit(std::iter::once(encoder.finish()));

值得注意的是 bytes_per_row 字段,这个值需要是 256 的倍数。查看 gif 教程 以了解更多细节。

纹理视图与采样器

现在纹理中已经有了数据,我们需要一种方法来使用它。这,就是纹理视图TextureView)和采样器Sampler)的用处。

纹理视图描述纹理及其关联的元数据。采样器控制纹理如何被 采样。采样工作类似于 GIMP/Photoshop 中的滴管工具。我们的程序在纹理上提供一个坐标(被称为 纹理坐标 ),然后采样器根据纹理和一些内部参数返回相应的颜色。

现在我们来定义 diffuse_texture_viewdiffuse_sampler

rust
// 我们不需要过多地配置纹理视图,所以使用 wgpu 的默认值。
+let diffuse_texture_view = diffuse_texture.create_view(&wgpu::TextureViewDescriptor::default());
+let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
+    address_mode_u: wgpu::AddressMode::ClampToEdge,
+    address_mode_v: wgpu::AddressMode::ClampToEdge,
+    address_mode_w: wgpu::AddressMode::ClampToEdge,
+    mag_filter: wgpu::FilterMode::Linear,
+    min_filter: wgpu::FilterMode::Nearest,
+    mipmap_filter: wgpu::FilterMode::Nearest,
+    ..Default::default()
+});

address_mode_* 参数指定了如果采样器得到的纹理坐标超出了纹理边界时该如何处理。我们有几个选项可供选择:

address_mode.png

mag_filtermin_filter 字段描述了当采样足迹小于或大于一个纹素(Texel)时该如何处理。当场景中的贴图远离或靠近 camera 时,这两个字段的设置通常会有效果。

有 2 个选项:

Mipmaps 是一个复杂的话题,需要在未来单独写一个章节。现在,我们可以说 mipmap_filter 的功能有点类似于 (mag/min)_filter,因为它告诉采样器如何在 mipmaps 之间混合。

其他字段使用了默认值。如果想了解字段详情,请查看 wgpu 文档

现在,我们需要用到 BindGroupPipelineLayout 来将所有这些不同的资源都接入。

绑定组

绑定组BindGroup)描述了一组资源以及如何通过着色器访问它们。我们先来创建一个绑定组布局BindGroupLayout):

rust
let texture_bind_group_layout =
+            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+                entries: &[
+                    wgpu::BindGroupLayoutEntry {
+                        binding: 0,
+                        visibility: wgpu::ShaderStages::FRAGMENT,
+                        ty: wgpu::BindingType::Texture {
+                            multisampled: false,
+                            view_dimension: wgpu::TextureViewDimension::D2,
+                            sample_type: wgpu::TextureSampleType::Float { filterable: true },
+                        },
+                        count: None,
+                    },
+                    wgpu::BindGroupLayoutEntry {
+                        binding: 1,
+                        visibility: wgpu::ShaderStages::FRAGMENT,
+                        // This should match the filterable field of the
+                        // corresponding Texture entry above.
+                        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
+                        count: None,
+                    },
+                ],
+                label: Some("texture_bind_group_layout"),
+            });

texture_bind_group_layout 有两个条目:一个是绑定到 0 资源槽的纹理,另一个是绑定到 1 资源槽的采样器。这两个绑定只对由 visibility 字段指定的片元着色器可见。这个字段的可选值是 NONEVERTEXFRAGMENTCOMPUTE 的任意按位或(|)组合。

现在使用绑定组布局texture_bind_group_layout)来创建绑定组:

rust
let diffuse_bind_group = device.create_bind_group(
+    &wgpu::BindGroupDescriptor {
+        layout: &texture_bind_group_layout,
+        entries: &[
+            wgpu::BindGroupEntry {
+                binding: 0,
+                resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
+            },
+            wgpu::BindGroupEntry {
+                binding: 1,
+                resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
+            }
+        ],
+        label: Some("diffuse_bind_group"),
+    }
+);

看着这个,你可能会有一点似曾相识的感觉! 这是因为绑定组绑定组布局的一个更具体的声明。它们分开的原因是,只要是共享同一个绑定组布局的绑定组,就能在运行时实时切换。创建的每个纹理和采样器都需要添加到一个绑定组中。为了达成目的,我们将为每个纹理创建一个新的绑定组。

让我们把 diffuse_bind_group 添加到 State 结构体中:

rust
struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+    render_pipeline: wgpu::RenderPipeline,
+    vertex_buffer: wgpu::Buffer,
+    index_buffer: wgpu::Buffer,
+    num_indices: u32,
+    diffuse_bind_group: wgpu::BindGroup, // 新添加!
+}

确保我们在 new() 函数中返回这个字段:

rust
impl State {
+    async fn new() -> Self {
+        // ...
+        Self {
+            // ...
+            // 新添加!
+            diffuse_bind_group,
+        }
+    }
+}

现在,我们来在 render() 函数中使用绑定组:

rust
// render()
+// ...
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); // NEW!
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
+
+render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

管线布局

还记得在管线章节创建的管线布局PipelineLayout)吗?现在我们终于可以使用它了! 管线布局包含一个管线可以使用的绑定组布局的列表。修改 render_pipeline_layout 以使用 texture_bind_group_layout

rust
async fn new(...) {
+    // ...
+    let render_pipeline_layout = device.create_pipeline_layout(
+        &wgpu::PipelineLayoutDescriptor {
+            label: Some("Render Pipeline Layout"),
+            bind_group_layouts: &[&texture_bind_group_layout], // 新添加!
+            push_constant_ranges: &[],
+        }
+    );
+    // ...
+}

修改 VERTICES 常量

对于 Vertex 的定义有几处需要修改。到目前为止,我们一直在使用 color 字段来设置网格颜色。现在我们要用 tex_coords 代替 color,这些坐标会被传递给采样器以获取纹素(Texel)的颜色。

由于 tex_coords 是二维的,需要修改这个字段的类型为两个浮点数的数组。

先来修改 Vertex 结构体:

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+struct Vertex {
+    position: [f32; 3],
+    tex_coords: [f32; 2], // 新添加!
+}

然后在 VertexBufferLayout 中反映这些变化:

rust
impl Vertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    shader_location: 0,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+                    shader_location: 1,
+                    format: wgpu::VertexFormat::Float32x2, // NEW!
+                },
+            ]
+        }
+    }
+}

最后,需要修改 VERTICES,用以下代码替换现有的定义:

rust
// Changed
+const VERTICES: &[Vertex] = &[
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.99240386], }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.56958647], }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.05060294], }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.1526709], }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.7347359], }, // E
+];

修改着色器

有了新的 Vertex 结构体,现在是时候更新着色器了。首先需要将 tex_coords 传递给顶点着色器,然后将它们用于片元着色器,以便从采样器获得最终的颜色。让我们从顶点着色器开始:

rust
// 顶点着色器
+
+struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f,
+}
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+}
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.clip_position = vec4f(model.position, 1.0);
+    return out;
+}

现在顶点着色器输出了 tex_coords,我们需要改变片元着色器来接收它们。有了这些坐标,就可以使用采样器从纹理中获取纹素的颜色了:

rust
// 片元着色器
+
+@group(0) @binding(0)
+var t_diffuse: texture_2d<f32>;
+@group(0)@binding(1)
+var s_diffuse: sampler;
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return textureSample(t_diffuse, s_diffuse, in.tex_coords);
+}

变量 t_diffuses_diffuse 就是所谓的 uniforms。我们将在 相机部分 中进一步讨论 uniforms。现在,我们需要知道的是,@group(x) 对应于 set_bind_group() 中的第一个参数,@binding(x) 与我们创建绑定组布局绑定组时指定的 binding 值对应。

渲染结果

现在运行我们的程序,将得到如下渲染效果:

an upside down tree on a hexagon

很奇怪,我们的树是颠倒的! 这是因为 wgpu 的世界坐标的 Y 轴朝上,而纹理坐标的 Y 轴朝下。换句话说,纹理坐标中的(0,0)对应于图像的左上方,而(1,1)是右下方:

happy-tree-uv-coords.png

我们可以通过将每个纹理坐标的 y 坐标替换为 1 - y 来得到纹理的正确朝向:

rust
const VERTICES: &[Vertex] = &[
+    // 修改后的
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397], }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732914], }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
+];

现在我们就把树正确地放在五边形上了:

our happy tree as it should be

代码整理

为方便起见,让我们把纹理代码放到自己的模块中。我们首先将 anyhow 添加到 Cargo.toml 文件中,以简化错误处理:

toml
[dependencies]
+image = "0.23"
+glam = "0.24"
+winit = "0.28.7"
+env_logger = "0.10"
+log = "0.4"
+pollster = "0.3"
+wgpu = "0.17"
+bytemuck = { version = "1.14", features = [ "derive" ] }
+anyhow = "1.0" # NEW!

然后,在一个名为 src/texture.rs 的新文件中,添加以下代码:

rust
use image::GenericImageView;
+use anyhow::*;
+
+pub struct Texture {
+    pub texture: wgpu::Texture,
+    pub view: wgpu::TextureView,
+    pub sampler: wgpu::Sampler,
+}
+
+impl Texture {
+    pub fn from_bytes(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        bytes: &[u8],
+        label: &str
+    ) -> Result<Self> {
+        let img = image::load_from_memory(bytes)?;
+        Self::from_image(device, queue, &img, Some(label))
+    }
+
+    pub fn from_image(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        img: &image::DynamicImage,
+        label: Option<&str>
+    ) -> Result<Self> {
+        let rgba = img.to_rgba8();
+        let dimensions = img.dimensions();
+
+        let size = wgpu::Extent3d {
+            width: dimensions.0,
+            height: dimensions.1,
+            depth_or_array_layers: 1,
+        };
+        let texture = device.create_texture(
+            &wgpu::TextureDescriptor {
+                label,
+                size,
+                mip_level_count: 1,
+                sample_count: 1,
+                dimension: wgpu::TextureDimension::D2,
+                format: wgpu::TextureFormat::Rgba8UnormSrgb,
+                usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
+                view_formats: &[],
+            }
+        );
+
+        queue.write_texture(
+            wgpu::ImageCopyTexture {
+                aspect: wgpu::TextureAspect::All,
+                texture: &texture,
+                mip_level: 0,
+                origin: wgpu::Origin3d::ZERO,
+            },
+            &rgba,
+            wgpu::ImageDataLayout {
+                offset: 0,
+                bytes_per_row: Some(4 * dimensions.0),
+                rows_per_image: Some(dimensions.1),
+            },
+            size,
+        );
+
+        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
+        let sampler = device.create_sampler(
+            &wgpu::SamplerDescriptor {
+                address_mode_u: wgpu::AddressMode::ClampToEdge,
+                address_mode_v: wgpu::AddressMode::ClampToEdge,
+                address_mode_w: wgpu::AddressMode::ClampToEdge,
+                mag_filter: wgpu::FilterMode::Linear,
+                min_filter: wgpu::FilterMode::Nearest,
+                mipmap_filter: wgpu::FilterMode::Nearest,
+                ..Default::default()
+            }
+        );
+
+        Ok(Self { texture, view, sampler })
+    }
+}

注意,我们使用的是 to_rgba8() 而不是 as_rgba8()。PNG 使用 as_rgba8() 没问题,因为它们有一个 alpha 通道。但是 JPEG 没有 alpha 通道,如果我们试图在 JPEG 纹理图像上调用 as_rgba8(),代码就会陷入恐慌。相反,我们可以使用 to_rgba8() 来处理没有 alpha 通道的图像,它会生成一个新的图像缓冲区。

lib.rs 文件的顶部添加以下代码来将 texture.rs 作为一个模块导入:

rust
mod texture;

new() 函数中的纹理创建代码现在变得简化多了:

rust
surface.configure(&device, &config);
+let diffuse_bytes = include_bytes!("happy-tree.png"); // CHANGED!
+let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); // CHANGED!
+
+// 到 \`let texture_bind_group_layout = ...\` 行为止的所有代码现在都可以移除了。

我们仍然需要单独存储绑定组,因为纹理无须知道绑定组的布局。修改创建 diffuse_bind_group 的过程以使用diffuse_textureviewsampler 字段:

rust
let diffuse_bind_group = device.create_bind_group(
+    &wgpu::BindGroupDescriptor {
+        layout: &texture_bind_group_layout,
+        entries: &[
+            wgpu::BindGroupEntry {
+                binding: 0,
+                resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), // CHANGED!
+            },
+            wgpu::BindGroupEntry {
+                binding: 1,
+                resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), // CHANGED!
+            }
+        ],
+        label: Some("diffuse_bind_group"),
+    }
+);

最后,需要更新 State 中的字段以使用全新 Texture 结构体,在未来的教程中还会用到它:

rust
struct State {
+    // ...
+    diffuse_bind_group: wgpu::BindGroup,
+    diffuse_texture: texture::Texture, // NEW
+}
rust
impl State {
+    async fn new() -> Self {
+        // ...
+        Self {
+            // ...
+            num_indices,
+            diffuse_bind_group,
+            diffuse_texture, // NEW
+        }
+    }
+}

经过上边的整理,代码的工作方式还和以前一样,但我们现在有了一个更便利的方式来创建纹理。

挑战

另创建一个纹理,并在你按下空格键时交替使用。

`,91);function F(c,u,b,o,m,C){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),l("div",null,[y,i(a,{example:"tutorial5_textures"}),i(n)])}const B=p(g,[["render",F]]);export{A as __pageData,B as default}; diff --git a/assets/beginner_tutorial5-textures_index.md.DkmoMANr.lean.js b/assets/beginner_tutorial5-textures_index.md.DkmoMANr.lean.js new file mode 100644 index 000000000..465a87350 --- /dev/null +++ b/assets/beginner_tutorial5-textures_index.md.DkmoMANr.lean.js @@ -0,0 +1 @@ +import{_ as p,D as s,o as h,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/happy-tree.tK0WX7Fz.png",e="/learn-wgpu-zh/assets/address_mode.h6IYUOwy.png",r="/learn-wgpu-zh/assets/upside-down.sY39m84B.png",E="/learn-wgpu-zh/assets/happy-tree-uv-coords.3QtDvIcC.png",d="/learn-wgpu-zh/assets/rightside-up.yUas2Hsc.png",A=JSON.parse('{"title":"纹理和绑定组","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial5-textures/index.md","filePath":"beginner/tutorial5-textures/index.md","lastUpdated":1703303099000}'),g={name:"beginner/tutorial5-textures/index.md"},y=k("",91);function F(c,u,b,o,m,C){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),l("div",null,[y,i(a,{example:"tutorial5_textures"}),i(n)])}const B=p(g,[["render",F]]);export{A as __pageData,B as default}; diff --git a/assets/beginner_tutorial5-textures_index.md.dd7befe7.js b/assets/beginner_tutorial5-textures_index.md.dd7befe7.js deleted file mode 100644 index 2d097c220..000000000 --- a/assets/beginner_tutorial5-textures_index.md.dd7befe7.js +++ /dev/null @@ -1,351 +0,0 @@ -import{_ as l,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/happy-tree.cf7260cf.png",t="/learn-wgpu-zh/assets/address_mode.a4746540.png",F="/learn-wgpu-zh/assets/upside-down.3b0ae8b3.png",D="/learn-wgpu-zh/assets/happy-tree-uv-coords.dc08858a.png",y="/learn-wgpu-zh/assets/rightside-up.018bc290.png",_=JSON.parse('{"title":"纹理和绑定组","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial5-textures/index.md","filePath":"beginner/tutorial5-textures/index.md","lastUpdated":1701933923000}'),C={name:"beginner/tutorial5-textures/index.md"},i=r('

纹理和绑定组

目前为止,我们一直在绘制简单的图形。当然可以只用三角形来做游戏,而试图绘制高精度的对象又会极大地限制能运行我们游戏的设备。不过,可以用 纹理 来解决此问题。

纹理(Textures)是叠加在三角形网格(Mesh)上的图像,使其看起来有丰富的细节。有多种类型的纹理,如法线贴图(Normal Maps,也就是法线纹理)、凹凸贴图(Bump Maps)、镜面贴图和漫反射贴图。下边将讨论漫反射贴图,简单来说也就是颜色纹理。

加载图像文件

要把一个图像映射到对象网格上,首先是需要有一个图像文件。就使用下边这棵快乐的小树吧:

一棵快乐的树

我们将使用 image 包 来加载这棵树。先把它添加到依赖项中:

toml
[dependencies.image]
-version = "0.24"
-default-features = false
-features = ["png", "jpeg"]

image 包含的 jpeg 解码器使用 rayon 来加速线程的解码速度。WASM 目前不支持线程,所以我们需要禁用这一特性,这样代码在尝试加载网络上的 jpeg 时就不会崩溃。

在 WASM 中解码 jpeg 性能不高。如果你想在 WASM 中加快图像加载速度,可以选择使用浏览器的内置解码器来替换 wasm-bindgen 构建时使用 的 image。这涉及到在 Rust 中创建一个 <img> 标记来获取图像,然后创建一个 <canvas> 来获取像素数据,我把这留作读者的练习。

Statenew() 函数中,于 surface.configure() 之后添加以下代码:

rust
surface.configure(&device, &config);
-// 新添加!
-
-let diffuse_bytes = include_bytes!("happy-tree.png");
-let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
-let diffuse_rgba = diffuse_image.to_rgba8();
-
-use image::GenericImageView;
-let dimensions = diffuse_image.dimensions();

此处代码从图像文件中读取字节,并将其加载到 image 对象中,然后转换为 rgba 动态数组。我们还保存了图像的尺寸信息以便在创建实际纹理时使用。

现在我们来创建纹理:

rust
let texture_size = wgpu::Extent3d {
-    width: dimensions.0,
-    height: dimensions.1,
-    depth_or_array_layers: 1,
-};
-let diffuse_texture = device.create_texture(
-    &wgpu::TextureDescriptor {
-        // 所有纹理都是以 3D 形式存储的,我们通过设置深度 1 来表示 2D 纹理
-        size: texture_size,
-        mip_level_count: 1, // 后面会详细介绍此字段
-        sample_count: 1,
-        dimension: wgpu::TextureDimension::D2,
-        // 大多数图像都是使用 sRGB 来存储的,我们需要在这里指定。
-        format: wgpu::TextureFormat::Rgba8UnormSrgb,
-        // TEXTURE_BINDING 表示我们要在着色器中使用这个纹理。
-        // COPY_DST 表示我们能将数据复制到这个纹理上。
-        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
-        label: Some("diffuse_texture"),
-        view_formats: &[],
-    }
-);

填充数据到纹理中

Texture 结构体没有函数可以直接与数据交互。但我们可以使用之前创建的命令队列上的 write_texture 命令来填充纹理数据。下边是具体代码:

rust
queue.write_texture(
-    // 告诉 wgpu 从何处复制像素数据
-    wgpu::ImageCopyTexture {
-        texture: &diffuse_texture,
-        mip_level: 0,
-        origin: wgpu::Origin3d::ZERO,
-        aspect: wgpu::TextureAspect::All,
-    },
-    // 实际像素数据
-    &diffuse_rgba,
-    // 纹理的内存布局
-    wgpu::ImageDataLayout {
-        offset: 0,
-        bytes_per_row: Some(4 * dimensions.0),
-        rows_per_image: Some(dimensions.1),
-    },
-    texture_size,
-);

填充纹理数据的经典方式是将像素数据先复制到一个缓冲区,然后再从缓冲区复制到纹理中。使用 write_texture 更有效率,因为它少用了一个缓冲区 -- 不过这里还是介绍一下,以防读者有需要:

rust
let buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Temp Buffer"),
-        contents: &diffuse_rgba,
-        usage: wgpu::BufferUsages::COPY_SRC,
-    }
-);
-
-let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-    label: Some("texture_buffer_copy_encoder"),
-});
-
-encoder.copy_buffer_to_texture(
-    wgpu::ImageCopyBuffer {
-        buffer: &buffer,
-        offset: 0,
-        bytes_per_row: 4 * dimensions.0,
-        rows_per_image: dimensions.1,
-    },
-    wgpu::ImageCopyTexture {
-        texture: &diffuse_texture,
-        mip_level: 0,
-        array_layer: 0,
-        origin: wgpu::Origin3d::ZERO,
-    },
-    size,
-);
-
-queue.submit(std::iter::once(encoder.finish()));

值得注意的是 bytes_per_row 字段,这个值需要是 256 的倍数。查看 gif 教程 以了解更多细节。

纹理视图与采样器

现在纹理中已经有了数据,我们需要一种方法来使用它。这,就是纹理视图TextureView)和采样器Sampler)的用处。

纹理视图描述纹理及其关联的元数据。采样器控制纹理如何被 采样。采样工作类似于 GIMP/Photoshop 中的滴管工具。我们的程序在纹理上提供一个坐标(被称为 纹理坐标 ),然后采样器根据纹理和一些内部参数返回相应的颜色。

现在我们来定义 diffuse_texture_viewdiffuse_sampler

rust
// 我们不需要过多地配置纹理视图,所以使用 wgpu 的默认值。
-let diffuse_texture_view = diffuse_texture.create_view(&wgpu::TextureViewDescriptor::default());
-let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
-    address_mode_u: wgpu::AddressMode::ClampToEdge,
-    address_mode_v: wgpu::AddressMode::ClampToEdge,
-    address_mode_w: wgpu::AddressMode::ClampToEdge,
-    mag_filter: wgpu::FilterMode::Linear,
-    min_filter: wgpu::FilterMode::Nearest,
-    mipmap_filter: wgpu::FilterMode::Nearest,
-    ..Default::default()
-});

address_mode_* 参数指定了如果采样器得到的纹理坐标超出了纹理边界时该如何处理。我们有几个选项可供选择:

address_mode.png

mag_filtermin_filter 字段描述了当采样足迹小于或大于一个纹素(Texel)时该如何处理。当场景中的贴图远离或靠近 camera 时,这两个字段的设置通常会有效果。

有 2 个选项:

Mipmaps 是一个复杂的话题,需要在未来单独写一个章节。现在,我们可以说 mipmap_filter 的功能有点类似于 (mag/min)_filter,因为它告诉采样器如何在 mipmaps 之间混合。

其他字段使用了默认值。如果想了解字段详情,请查看 wgpu 文档

现在,我们需要用到 BindGroupPipelineLayout 来将所有这些不同的资源都接入。

绑定组

绑定组BindGroup)描述了一组资源以及如何通过着色器访问它们。我们先来创建一个绑定组布局BindGroupLayout):

rust
let texture_bind_group_layout =
-            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-                entries: &[
-                    wgpu::BindGroupLayoutEntry {
-                        binding: 0,
-                        visibility: wgpu::ShaderStages::FRAGMENT,
-                        ty: wgpu::BindingType::Texture {
-                            multisampled: false,
-                            view_dimension: wgpu::TextureViewDimension::D2,
-                            sample_type: wgpu::TextureSampleType::Float { filterable: true },
-                        },
-                        count: None,
-                    },
-                    wgpu::BindGroupLayoutEntry {
-                        binding: 1,
-                        visibility: wgpu::ShaderStages::FRAGMENT,
-                        // This should match the filterable field of the
-                        // corresponding Texture entry above.
-                        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
-                        count: None,
-                    },
-                ],
-                label: Some("texture_bind_group_layout"),
-            });

texture_bind_group_layout 有两个条目:一个是绑定到 0 资源槽的纹理,另一个是绑定到 1 资源槽的采样器。这两个绑定只对由 visibility 字段指定的片元着色器可见。这个字段的可选值是 NONEVERTEXFRAGMENTCOMPUTE 的任意按位或(|)组合。

现在使用绑定组布局texture_bind_group_layout)来创建绑定组:

rust
let diffuse_bind_group = device.create_bind_group(
-    &wgpu::BindGroupDescriptor {
-        layout: &texture_bind_group_layout,
-        entries: &[
-            wgpu::BindGroupEntry {
-                binding: 0,
-                resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
-            },
-            wgpu::BindGroupEntry {
-                binding: 1,
-                resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
-            }
-        ],
-        label: Some("diffuse_bind_group"),
-    }
-);

看着这个,你可能会有一点似曾相识的感觉! 这是因为绑定组绑定组布局的一个更具体的声明。它们分开的原因是,只要是共享同一个绑定组布局的绑定组,就能在运行时实时切换。创建的每个纹理和采样器都需要添加到一个绑定组中。为了达成目的,我们将为每个纹理创建一个新的绑定组。

让我们把 diffuse_bind_group 添加到 State 结构体中:

rust
struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-    render_pipeline: wgpu::RenderPipeline,
-    vertex_buffer: wgpu::Buffer,
-    index_buffer: wgpu::Buffer,
-    num_indices: u32,
-    diffuse_bind_group: wgpu::BindGroup, // 新添加!
-}

确保我们在 new() 函数中返回这个字段:

rust
impl State {
-    async fn new() -> Self {
-        // ...
-        Self {
-            // ...
-            // 新添加!
-            diffuse_bind_group,
-        }
-    }
-}

现在,我们来在 render() 函数中使用绑定组:

rust
// render()
-// ...
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); // NEW!
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
-
-render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

管线布局

还记得在管线章节创建的管线布局PipelineLayout)吗?现在我们终于可以使用它了! 管线布局包含一个管线可以使用的绑定组布局的列表。修改 render_pipeline_layout 以使用 texture_bind_group_layout

rust
async fn new(...) {
-    // ...
-    let render_pipeline_layout = device.create_pipeline_layout(
-        &wgpu::PipelineLayoutDescriptor {
-            label: Some("Render Pipeline Layout"),
-            bind_group_layouts: &[&texture_bind_group_layout], // 新添加!
-            push_constant_ranges: &[],
-        }
-    );
-    // ...
-}

修改 VERTICES 常量

对于 Vertex 的定义有几处需要修改。到目前为止,我们一直在使用 color 字段来设置网格颜色。现在我们要用 tex_coords 代替 color,这些坐标会被传递给采样器以获取纹素(Texel)的颜色。

由于 tex_coords 是二维的,需要修改这个字段的类型为两个浮点数的数组。

先来修改 Vertex 结构体:

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-struct Vertex {
-    position: [f32; 3],
-    tex_coords: [f32; 2], // 新添加!
-}

然后在 VertexBufferLayout 中反映这些变化:

rust
impl Vertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    shader_location: 0,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-                    shader_location: 1,
-                    format: wgpu::VertexFormat::Float32x2, // NEW!
-                },
-            ]
-        }
-    }
-}

最后,需要修改 VERTICES,用以下代码替换现有的定义:

rust
// Changed
-const VERTICES: &[Vertex] = &[
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.99240386], }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.56958647], }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.05060294], }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.1526709], }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.7347359], }, // E
-];

修改着色器

有了新的 Vertex 结构体,现在是时候更新着色器了。首先需要将 tex_coords 传递给顶点着色器,然后将它们用于片元着色器,以便从采样器获得最终的颜色。让我们从顶点着色器开始:

rust
// 顶点着色器
-
-struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f,
-}
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-}
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.clip_position = vec4f(model.position, 1.0);
-    return out;
-}

现在顶点着色器输出了 tex_coords,我们需要改变片元着色器来接收它们。有了这些坐标,就可以使用采样器从纹理中获取纹素的颜色了:

rust
// 片元着色器
-
-@group(0) @binding(0)
-var t_diffuse: texture_2d<f32>;
-@group(0)@binding(1)
-var s_diffuse: sampler;
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return textureSample(t_diffuse, s_diffuse, in.tex_coords);
-}

变量 t_diffuses_diffuse 就是所谓的 uniforms。我们将在 相机部分 中进一步讨论 uniforms。现在,我们需要知道的是,@group(x) 对应于 set_bind_group() 中的第一个参数,@binding(x) 与我们创建绑定组布局绑定组时指定的 binding 值对应。

渲染结果

现在运行我们的程序,将得到如下渲染效果:

an upside down tree on a hexagon

很奇怪,我们的树是颠倒的! 这是因为 wgpu 的世界坐标的 Y 轴朝上,而纹理坐标的 Y 轴朝下。换句话说,纹理坐标中的(0,0)对应于图像的左上方,而(1,1)是右下方:

happy-tree-uv-coords.png

我们可以通过将每个纹理坐标的 y 坐标替换为 1 - y 来得到纹理的正确朝向:

rust
const VERTICES: &[Vertex] = &[
-    // 修改后的
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397], }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732914], }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
-];

现在我们就把树正确地放在五边形上了:

our happy tree as it should be

代码整理

为方便起见,让我们把纹理代码放到自己的模块中。我们首先将 anyhow 添加到 Cargo.toml 文件中,以简化错误处理:

toml
[dependencies]
-image = "0.23"
-glam = "0.24"
-winit = "0.28.7"
-env_logger = "0.10"
-log = "0.4"
-pollster = "0.3"
-wgpu = "0.17"
-bytemuck = { version = "1.14", features = [ "derive" ] }
-anyhow = "1.0" # NEW!

然后,在一个名为 src/texture.rs 的新文件中,添加以下代码:

rust
use image::GenericImageView;
-use anyhow::*;
-
-pub struct Texture {
-    pub texture: wgpu::Texture,
-    pub view: wgpu::TextureView,
-    pub sampler: wgpu::Sampler,
-}
-
-impl Texture {
-    pub fn from_bytes(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        bytes: &[u8],
-        label: &str
-    ) -> Result<Self> {
-        let img = image::load_from_memory(bytes)?;
-        Self::from_image(device, queue, &img, Some(label))
-    }
-
-    pub fn from_image(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        img: &image::DynamicImage,
-        label: Option<&str>
-    ) -> Result<Self> {
-        let rgba = img.to_rgba8();
-        let dimensions = img.dimensions();
-
-        let size = wgpu::Extent3d {
-            width: dimensions.0,
-            height: dimensions.1,
-            depth_or_array_layers: 1,
-        };
-        let texture = device.create_texture(
-            &wgpu::TextureDescriptor {
-                label,
-                size,
-                mip_level_count: 1,
-                sample_count: 1,
-                dimension: wgpu::TextureDimension::D2,
-                format: wgpu::TextureFormat::Rgba8UnormSrgb,
-                usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
-                view_formats: &[],
-            }
-        );
-
-        queue.write_texture(
-            wgpu::ImageCopyTexture {
-                aspect: wgpu::TextureAspect::All,
-                texture: &texture,
-                mip_level: 0,
-                origin: wgpu::Origin3d::ZERO,
-            },
-            &rgba,
-            wgpu::ImageDataLayout {
-                offset: 0,
-                bytes_per_row: Some(4 * dimensions.0),
-                rows_per_image: Some(dimensions.1),
-            },
-            size,
-        );
-
-        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
-        let sampler = device.create_sampler(
-            &wgpu::SamplerDescriptor {
-                address_mode_u: wgpu::AddressMode::ClampToEdge,
-                address_mode_v: wgpu::AddressMode::ClampToEdge,
-                address_mode_w: wgpu::AddressMode::ClampToEdge,
-                mag_filter: wgpu::FilterMode::Linear,
-                min_filter: wgpu::FilterMode::Nearest,
-                mipmap_filter: wgpu::FilterMode::Nearest,
-                ..Default::default()
-            }
-        );
-
-        Ok(Self { texture, view, sampler })
-    }
-}

注意,我们使用的是 to_rgba8() 而不是 as_rgba8()。PNG 使用 as_rgba8() 没问题,因为它们有一个 alpha 通道。但是 JPEG 没有 alpha 通道,如果我们试图在 JPEG 纹理图像上调用 as_rgba8(),代码就会陷入恐慌。相反,我们可以使用 to_rgba8() 来处理没有 alpha 通道的图像,它会生成一个新的图像缓冲区。

lib.rs 文件的顶部添加以下代码来将 texture.rs 作为一个模块导入:

rust
mod texture;

new() 函数中的纹理创建代码现在变得简化多了:

rust
surface.configure(&device, &config);
-let diffuse_bytes = include_bytes!("happy-tree.png"); // CHANGED!
-let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); // CHANGED!
-
-// 到 \`let texture_bind_group_layout = ...\` 行为止的所有代码现在都可以移除了。

我们仍然需要单独存储绑定组,因为纹理无须知道绑定组的布局。修改创建 diffuse_bind_group 的过程以使用diffuse_textureviewsampler 字段:

rust
let diffuse_bind_group = device.create_bind_group(
-    &wgpu::BindGroupDescriptor {
-        layout: &texture_bind_group_layout,
-        entries: &[
-            wgpu::BindGroupEntry {
-                binding: 0,
-                resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), // CHANGED!
-            },
-            wgpu::BindGroupEntry {
-                binding: 1,
-                resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), // CHANGED!
-            }
-        ],
-        label: Some("diffuse_bind_group"),
-    }
-);

最后,需要更新 State 中的字段以使用全新 Texture 结构体,在未来的教程中还会用到它:

rust
struct State {
-    // ...
-    diffuse_bind_group: wgpu::BindGroup,
-    diffuse_texture: texture::Texture, // NEW
-}
rust
impl State {
-    async fn new() -> Self {
-        // ...
-        Self {
-            // ...
-            num_indices,
-            diffuse_bind_group,
-            diffuse_texture, // NEW
-        }
-    }
-}

经过上边的整理,代码的工作方式还和以前一样,但我们现在有了一个更便利的方式来创建纹理。

挑战

另创建一个纹理,并在你按下空格键时交替使用。

`,91);function A(u,b,m,d,B,g){const a=s("WasmExample"),p=s("AutoGithubLink");return o(),e("div",null,[i,n(a,{example:"tutorial5_textures"}),n(p)])}const h=l(C,[["render",A]]);export{_ as __pageData,h as default}; diff --git a/assets/beginner_tutorial5-textures_index.md.dd7befe7.lean.js b/assets/beginner_tutorial5-textures_index.md.dd7befe7.lean.js deleted file mode 100644 index 947808bc0..000000000 --- a/assets/beginner_tutorial5-textures_index.md.dd7befe7.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as l,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/happy-tree.cf7260cf.png",t="/learn-wgpu-zh/assets/address_mode.a4746540.png",F="/learn-wgpu-zh/assets/upside-down.3b0ae8b3.png",D="/learn-wgpu-zh/assets/happy-tree-uv-coords.dc08858a.png",y="/learn-wgpu-zh/assets/rightside-up.018bc290.png",_=JSON.parse('{"title":"纹理和绑定组","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial5-textures/index.md","filePath":"beginner/tutorial5-textures/index.md","lastUpdated":1701933923000}'),C={name:"beginner/tutorial5-textures/index.md"},i=r("",91);function A(u,b,m,d,B,g){const a=s("WasmExample"),p=s("AutoGithubLink");return o(),e("div",null,[i,n(a,{example:"tutorial5_textures"}),n(p)])}const h=l(C,[["render",A]]);export{_ as __pageData,h as default}; diff --git a/assets/beginner_tutorial6-uniforms_index.md.1fa0ff41.js b/assets/beginner_tutorial6-uniforms_index.md.1fa0ff41.js deleted file mode 100644 index 1978acae0..000000000 --- a/assets/beginner_tutorial6-uniforms_index.md.1fa0ff41.js +++ /dev/null @@ -1,271 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/static-tree.2a7a54d6.png",d=JSON.parse('{"title":"Uniform 缓冲区与 3D 虚拟摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial6-uniforms/index.md","filePath":"beginner/tutorial6-uniforms/index.md","lastUpdated":1701933923000}'),t={name:"beginner/tutorial6-uniforms/index.md"},D=r(`

Uniform 缓冲区与 3D 虚拟摄像机

虽然我们之前的渲染似乎都是在 2D 空间下进行的,但实际上我们一直都是在 3D 空间下渲染的!这就是为什么 Vertex 结构体的 position 是 3 个浮点数的数组而不是 2 个。由于我们是在正面观察,所以才无法真正看到场景的立体感。下面将通过创建一个虚拟摄像机Camera)来改变我们的观察视角。

透视摄像机

本教程聚焦于 wgpu 的教学,而不是线性代数,所以会略过很多涉及的数学知识。如果你对线性代数感兴趣,网上有大量的阅读材料。我们将使用 glam 来处理所有数学问题,在 Cargo.toml 中添加以下依赖:

toml
[dependencies]
-# other deps...
-glam = "0.24"

现在让我们开始使用此数学!在 State 结构体上方创建摄像机结构体:

rust
struct Camera {
-    eye: glam::Vec3,
-    target: glam::Vec3,
-    up: glam::Vec3,
-    aspect: f32,
-    fovy: f32,
-    znear: f32,
-    zfar: f32,
-}
-
-impl Camera {
-    fn build_view_projection_matrix(&self) -> glam::Mat4 {
-        // 1.
-        let view = glam::Mat4::look_at_rh(self.eye, self.target, self.up);
-        // 2.
-        let proj = glam::Mat4::perspective_rh(self.fovy.to_radians(), self.aspect, self.znear, self.zfar);
-
-        // 3.
-        return proj * view;
-    }
-}

build_view_projection_matrix 函数实现了视图投影矩阵。

  1. 视图矩阵移动并旋转世界坐标到摄像机所观察的位置。它本质上是摄像机变换的逆矩阵。
  2. 投影矩阵变换场景空间,以产生景深的效果。如果没有它,近处的物对象将与远处的大小相同。
  3. wgpu 的坐标系统是基于 DirectX 和 Metal 的坐标系,在归一化设备坐标中,x 轴和 y 轴的范围是 [-1.0, 1.0],而 z 轴是 [0.0, 1.0]。 移植 OpenGL 程序时需要注意:在 OpenGL 的归一化设备坐标中 z 轴的范围是 [-1.0, 1.0]。

现在我们来给 State 添加上 camera 字段:

rust
struct State {
-    // ...
-    camera: Camera,
-    // ...
-}
-
-async fn new(window: &Window) -> Self {
-    // let diffuse_bind_group ...
-
-    let camera = Camera {
-        // 将摄像机向上移动 1 个单位,向后移动 2 个单位
-        // +z 朝向屏幕外
-        eye: (0.0, 1.0, 2.0).into(),
-        // 摄像机看向原点
-        target: (0.0, 0.0, 0.0).into(),
-        // 定义哪个方向朝上
-        up: glam::Vec3::Y,
-        aspect: config.width as f32 / config.height as f32,
-        fovy: 45.0,
-        znear: 0.1,
-        zfar: 100.0,
-    };
-
-    Self {
-        // ...
-        camera,
-        // ...
-    }
-}

有了可以提供视图投影矩阵的摄像机,我们还需要一些方法将其引入着色器。

Uniform 缓冲区

到目前为止,我们已经使用缓冲区来存储顶点和索引数据,甚至加载纹理。我们将再次使用它来创建一个称之为 uniform 的缓冲区。Uniform 缓冲区也是一个数据块,在一组着色器的每个调用中都可以使用,从技术的角度来看,我们已经为纹理采样器使用了 Uniform 缓冲区。下面将再次使用它们来存储视图投影矩阵,我们先创建一个结构体来保存 uniform:

rust
// 此属性标注数据的内存布局兼容 C-ABI,令其可用于着色器
-#[repr(C)]
-// derive 属性自动导入的这些 trait,令其可被存入缓冲区
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct CameraUniform {
-    // glam 的数据类型不能直接用于 bytemuck
-    // 需要先将 Matrix4 矩阵转为一个 4x4 的浮点数数组
-    view_proj: [[f32; 4]; 4],
-}
-
-impl CameraUniform {
-    fn new() -> Self {
-        Self {
-            view_proj: glam::Mat4::IDENTITY.to_cols_array_2d(),
-        }
-    }
-
-    fn update_view_proj(&mut self, camera: &Camera) {
-        self.view_proj = camera.build_view_projection_matrix().to_cols_array_2d();
-    }
-}

封装好了数据,接下来创建一个名为 camera_buffer 的 Uniform 缓冲区:

rust
// 在 new() 函数中创建 \`camera\` 后
-
-let mut camera_uniform = CameraUniform::new();
-camera_uniform.update_view_proj(&camera);
-
-let camera_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Camera Buffer"),
-        contents: bytemuck::cast_slice(&[camera_uniform]),
-        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
-    }
-);

Uniform 缓冲区和绑定组

现在有了一个 Uniform 缓冲区,那该如何使用呢?答案是为它创建一个绑定组。我们得先创建绑定组的布局:

rust
let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-    entries: &[
-        wgpu::BindGroupLayoutEntry {
-            binding: 0,
-            visibility: wgpu::ShaderStages::VERTEX,     // 1
-            ty: wgpu::BindingType::Buffer {
-                ty: wgpu::BufferBindingType::Uniform,
-                has_dynamic_offset: false,              // 2
-                min_binding_size: None,
-            },
-            count: None,
-        }
-    ],
-    label: Some("camera_bind_group_layout"),
-});
  1. 我们只在顶点着色器中需要虚拟摄像机信息,因为要用它来操作顶点
  2. has_dynamic_offset 字段表示这个缓冲区是否会动态改变偏移量。如果我们想一次性在 Uniform 中存储多组数据,并实时修改偏移量来告诉着色器当前使用哪组数据时,这就很有用。

现在,我们可以创建实际的绑定组了:

rust
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-    layout: &camera_bind_group_layout,
-    entries: &[
-        wgpu::BindGroupEntry {
-            binding: 0,
-            resource: camera_buffer.as_entire_binding(),
-        }
-    ],
-    label: Some("camera_bind_group"),
-});

就像对纹理所做的那样,我们需要在管线布局描述符中注册 camera_bind_group_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(
-    &wgpu::PipelineLayoutDescriptor {
-        label: Some("Render Pipeline Layout"),
-        bind_group_layouts: &[
-            &texture_bind_group_layout,
-            &camera_bind_group_layout,
-        ],
-        push_constant_ranges: &[],
-    }
-);

现在,需要将 camera_buffercamera_bind_group 添加到 State 中:

rust
struct State {
-    // ...
-    camera: Camera,
-    camera_uniform: CameraUniform,
-    camera_buffer: wgpu::Buffer,
-    camera_bind_group: wgpu::BindGroup,
-}
-
-async fn new(window: &Window) -> Self {
-    // ...
-    Self {
-        // ...
-        camera,
-        camera_uniform,
-        camera_buffer,
-        camera_bind_group,
-    }
-}

在进入着色器之前,我们要做的最后一件事就是在 render() 函数中使用绑定组

rust
render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
-// 新添加!
-render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
-
-render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

在顶点着色器中使用 uniform

修改顶点着色器以加入如下代码:

rust
// 顶点着色器
-struct CameraUniform {
-    view_proj: mat4x4f,
-};
-@group(1) @binding(0) // 1.
-var<uniform> camera: CameraUniform;
-
-struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f,
-}
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-}
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.clip_position = camera.view_proj * vec4f(model.position, 1.0); // 2.
-    return out;
-}
  1. 因为我们已经创建了一个新的绑定组,所以需要指定在着色器中使用哪一个。这个数字由我们的 render_pipeline_layout 决定。texture_bind_group_layout 被列在第一位,因此它是 group(0),而 camera_bind_group 是第二位,因此它是 group(1)
  2. 当涉及到矩阵时,乘法的顺序很重要。向量在最右边,矩阵按重要性顺序在左边(裁剪空间坐标 = 投影矩阵 x 模型视图矩阵 x 位置向量)。

摄像机控制器

如果现在运行代码,看到的将是如下渲染效果:

./static-tree.png

形状的拉伸度降低了,但它仍然是静态的。你可以尝试移动摄像机的位置使画面动起来,就像游戏中的摄像机通常所做的那样。由于本教程聚焦于 wgpu 的使用,而非用户输入事件的处理,所以仅在此贴出摄像机控制器(CameraController)的代码:

rust
struct CameraController {
-    speed: f32,
-    is_forward_pressed: bool,
-    is_backward_pressed: bool,
-    is_left_pressed: bool,
-    is_right_pressed: bool,
-}
-
-impl CameraController {
-    fn new(speed: f32) -> Self {
-        Self {
-            speed,
-            is_forward_pressed: false,
-            is_backward_pressed: false,
-            is_left_pressed: false,
-            is_right_pressed: false,
-        }
-    }
-
-    fn process_events(&mut self, event: &WindowEvent) -> bool {
-        match event {
-            WindowEvent::KeyboardInput {
-                input: KeyboardInput {
-                    state,
-                    virtual_keycode: Some(keycode),
-                    ..
-                },
-                ..
-            } => {
-                let is_pressed = *state == ElementState::Pressed;
-                match keycode {
-                    VirtualKeyCode::W | VirtualKeyCode::Up => {
-                        self.is_forward_pressed = is_pressed;
-                        true
-                    }
-                    VirtualKeyCode::A | VirtualKeyCode::Left => {
-                        self.is_left_pressed = is_pressed;
-                        true
-                    }
-                    VirtualKeyCode::S | VirtualKeyCode::Down => {
-                        self.is_backward_pressed = is_pressed;
-                        true
-                    }
-                    VirtualKeyCode::D | VirtualKeyCode::Right => {
-                        self.is_right_pressed = is_pressed;
-                        true
-                    }
-                    _ => false,
-                }
-            }
-            _ => false,
-        }
-    }
-
-    fn update_camera(&self, camera: &mut Camera) {
-        let forward = camera.target - camera.eye;
-        let forward_norm = forward.normalize();
-        let forward_mag = forward.length();
-
-        // 防止摄像机离场景中心太近时出现问题
-        if self.is_forward_pressed && forward_mag > self.speed {
-            camera.eye += forward_norm * self.speed;
-        }
-        if self.is_backward_pressed {
-            camera.eye -= forward_norm * self.speed;
-        }
-
-        let right = forward_norm.cross(camera.up);
-
-        // 在按下前进或后退键时重做半径计算
-        let forward = camera.target - camera.eye;
-        let forward_mag = forward.length();
-
-        if self.is_right_pressed {
-            // 重新调整目标和眼睛之间的距离,以便其不发生变化。
-            // 因此,眼睛仍然位于目标和眼睛形成的圆圈上。
-            camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
-        }
-        if self.is_left_pressed {
-            camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag;
-        }
-    }
-}

这段代码并不完美。当你旋转摄像机时,摄像机会慢慢向后移动。虽然已达到了我们的目的,但你还是可以自由地改进它!

我们仍然需要把它插入到现有的代码中使其生效。将控制器添加到 State 中,并在 new() 函数中创建它的实例:

rust
struct State {
-    // ...
-    camera: Camera,
-    // 新添加!
-    camera_controller: CameraController,
-    // ...
-}
-// ...
-impl State {
-    async fn new(window: &Window) -> Self {
-        // ...
-        let camera_controller = CameraController::new(0.2);
-        // ...
-
-        Self {
-            // ...
-            camera_controller,
-            // ...
-        }
-    }
-}

将下边这行代码添加到 input() 函数中。

rust
fn input(&mut self, event: &WindowEvent) -> bool {
-    self.camera_controller.process_events(event)
-}

到目前为止,摄像机控制器还没有真正工作起来。uniform 缓冲区中的值需要被更新。有几种方式可以做到这一点:

  1. 可以创建一个单独的缓冲区,并将其数据复制到 camera_buffer。这个新的缓冲区被称为中继缓冲区(Staging Buffer)。这种方法允许主缓冲区(在这里是指 camera_buffer)的数据只被 GPU 访问,从而令 GPU 能做一些速度上的优化。如果缓冲区能被 CPU 访问,就无法实现此类优化。
  2. 可以在缓冲区本身调用内存映射函数 map_read_asyncmap_write_async。此方式允许我们直接访问缓冲区的数据,但是需要处理异步代码,也需要缓冲区使用 BufferUsages::MAP_READ 和/或 BufferUsages::MAP_WRITE。在此不再详述,如果你想了解更多,可以查看 wgpu without a window 教程。
  3. 可以在 queue 上使用 write_buffer 函数。

我们将使用第 3 种方式。

rust
fn update(&mut self) {
-    self.camera_controller.update_camera(&mut self.camera);
-    self.camera_uniform.update_view_proj(&self.camera);
-    self.queue.write_buffer(&self.camera_buffer, 0, bytemuck::cast_slice(&[self.camera_uniform]));
-}

这就是要做的全部工作了。现在运行代码,将能看到一个带有树木纹理的五边形,并可以用 wasd/arrow 键来旋转和缩放。

挑战

让上面的五边形独立于摄像机进行旋转。提示:你需要另一个矩阵来实现这一点

`,50);function F(y,C,A,i,b,u){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[D,n(a,{example:"tutorial6_uniforms"}),n(l)])}const f=p(t,[["render",F]]);export{d as __pageData,f as default}; diff --git a/assets/beginner_tutorial6-uniforms_index.md.1fa0ff41.lean.js b/assets/beginner_tutorial6-uniforms_index.md.1fa0ff41.lean.js deleted file mode 100644 index 8066fd126..000000000 --- a/assets/beginner_tutorial6-uniforms_index.md.1fa0ff41.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/static-tree.2a7a54d6.png",d=JSON.parse('{"title":"Uniform 缓冲区与 3D 虚拟摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial6-uniforms/index.md","filePath":"beginner/tutorial6-uniforms/index.md","lastUpdated":1701933923000}'),t={name:"beginner/tutorial6-uniforms/index.md"},D=r("",50);function F(y,C,A,i,b,u){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[D,n(a,{example:"tutorial6_uniforms"}),n(l)])}const f=p(t,[["render",F]]);export{d as __pageData,f as default}; diff --git a/assets/beginner_tutorial6-uniforms_index.md._uEvsrye.js b/assets/beginner_tutorial6-uniforms_index.md._uEvsrye.js new file mode 100644 index 000000000..a787f2db3 --- /dev/null +++ b/assets/beginner_tutorial6-uniforms_index.md._uEvsrye.js @@ -0,0 +1,271 @@ +import{_ as l,D as s,o as p,c as h,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const e="/learn-wgpu-zh/assets/static-tree.p1JxaguT.png",o=JSON.parse('{"title":"Uniform 缓冲区与 3D 虚拟摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial6-uniforms/index.md","filePath":"beginner/tutorial6-uniforms/index.md","lastUpdated":1703303099000}'),t={name:"beginner/tutorial6-uniforms/index.md"},r=k(`

Uniform 缓冲区与 3D 虚拟摄像机

虽然我们之前的渲染似乎都是在 2D 空间下进行的,但实际上我们一直都是在 3D 空间下渲染的!这就是为什么 Vertex 结构体的 position 是 3 个浮点数的数组而不是 2 个。由于我们是在正面观察,所以才无法真正看到场景的立体感。下面将通过创建一个虚拟摄像机Camera)来改变我们的观察视角。

透视摄像机

本教程聚焦于 wgpu 的教学,而不是线性代数,所以会略过很多涉及的数学知识。如果你对线性代数感兴趣,网上有大量的阅读材料。我们将使用 glam 来处理所有数学问题,在 Cargo.toml 中添加以下依赖:

toml
[dependencies]
+# other deps...
+glam = "0.24"

现在让我们开始使用此数学!在 State 结构体上方创建摄像机结构体:

rust
struct Camera {
+    eye: glam::Vec3,
+    target: glam::Vec3,
+    up: glam::Vec3,
+    aspect: f32,
+    fovy: f32,
+    znear: f32,
+    zfar: f32,
+}
+
+impl Camera {
+    fn build_view_projection_matrix(&self) -> glam::Mat4 {
+        // 1.
+        let view = glam::Mat4::look_at_rh(self.eye, self.target, self.up);
+        // 2.
+        let proj = glam::Mat4::perspective_rh(self.fovy.to_radians(), self.aspect, self.znear, self.zfar);
+
+        // 3.
+        return proj * view;
+    }
+}

build_view_projection_matrix 函数实现了视图投影矩阵。

  1. 视图矩阵移动并旋转世界坐标到摄像机所观察的位置。它本质上是摄像机变换的逆矩阵。
  2. 投影矩阵变换场景空间,以产生景深的效果。如果没有它,近处的物对象将与远处的大小相同。
  3. wgpu 的坐标系统是基于 DirectX 和 Metal 的坐标系,在归一化设备坐标中,x 轴和 y 轴的范围是 [-1.0, 1.0],而 z 轴是 [0.0, 1.0]。 移植 OpenGL 程序时需要注意:在 OpenGL 的归一化设备坐标中 z 轴的范围是 [-1.0, 1.0]。

现在我们来给 State 添加上 camera 字段:

rust
struct State {
+    // ...
+    camera: Camera,
+    // ...
+}
+
+async fn new(window: &Window) -> Self {
+    // let diffuse_bind_group ...
+
+    let camera = Camera {
+        // 将摄像机向上移动 1 个单位,向后移动 2 个单位
+        // +z 朝向屏幕外
+        eye: (0.0, 1.0, 2.0).into(),
+        // 摄像机看向原点
+        target: (0.0, 0.0, 0.0).into(),
+        // 定义哪个方向朝上
+        up: glam::Vec3::Y,
+        aspect: config.width as f32 / config.height as f32,
+        fovy: 45.0,
+        znear: 0.1,
+        zfar: 100.0,
+    };
+
+    Self {
+        // ...
+        camera,
+        // ...
+    }
+}

有了可以提供视图投影矩阵的摄像机,我们还需要一些方法将其引入着色器。

Uniform 缓冲区

到目前为止,我们已经使用缓冲区来存储顶点和索引数据,甚至加载纹理。我们将再次使用它来创建一个称之为 uniform 的缓冲区。Uniform 缓冲区也是一个数据块,在一组着色器的每个调用中都可以使用,从技术的角度来看,我们已经为纹理采样器使用了 Uniform 缓冲区。下面将再次使用它们来存储视图投影矩阵,我们先创建一个结构体来保存 uniform:

rust
// 此属性标注数据的内存布局兼容 C-ABI,令其可用于着色器
+#[repr(C)]
+// derive 属性自动导入的这些 trait,令其可被存入缓冲区
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct CameraUniform {
+    // glam 的数据类型不能直接用于 bytemuck
+    // 需要先将 Matrix4 矩阵转为一个 4x4 的浮点数数组
+    view_proj: [[f32; 4]; 4],
+}
+
+impl CameraUniform {
+    fn new() -> Self {
+        Self {
+            view_proj: glam::Mat4::IDENTITY.to_cols_array_2d(),
+        }
+    }
+
+    fn update_view_proj(&mut self, camera: &Camera) {
+        self.view_proj = camera.build_view_projection_matrix().to_cols_array_2d();
+    }
+}

封装好了数据,接下来创建一个名为 camera_buffer 的 Uniform 缓冲区:

rust
// 在 new() 函数中创建 \`camera\` 后
+
+let mut camera_uniform = CameraUniform::new();
+camera_uniform.update_view_proj(&camera);
+
+let camera_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Camera Buffer"),
+        contents: bytemuck::cast_slice(&[camera_uniform]),
+        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
+    }
+);

Uniform 缓冲区和绑定组

现在有了一个 Uniform 缓冲区,那该如何使用呢?答案是为它创建一个绑定组。我们得先创建绑定组的布局:

rust
let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+    entries: &[
+        wgpu::BindGroupLayoutEntry {
+            binding: 0,
+            visibility: wgpu::ShaderStages::VERTEX,     // 1
+            ty: wgpu::BindingType::Buffer {
+                ty: wgpu::BufferBindingType::Uniform,
+                has_dynamic_offset: false,              // 2
+                min_binding_size: None,
+            },
+            count: None,
+        }
+    ],
+    label: Some("camera_bind_group_layout"),
+});
  1. 我们只在顶点着色器中需要虚拟摄像机信息,因为要用它来操作顶点
  2. has_dynamic_offset 字段表示这个缓冲区是否会动态改变偏移量。如果我们想一次性在 Uniform 中存储多组数据,并实时修改偏移量来告诉着色器当前使用哪组数据时,这就很有用。

现在,我们可以创建实际的绑定组了:

rust
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+    layout: &camera_bind_group_layout,
+    entries: &[
+        wgpu::BindGroupEntry {
+            binding: 0,
+            resource: camera_buffer.as_entire_binding(),
+        }
+    ],
+    label: Some("camera_bind_group"),
+});

就像对纹理所做的那样,我们需要在管线布局描述符中注册 camera_bind_group_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(
+    &wgpu::PipelineLayoutDescriptor {
+        label: Some("Render Pipeline Layout"),
+        bind_group_layouts: &[
+            &texture_bind_group_layout,
+            &camera_bind_group_layout,
+        ],
+        push_constant_ranges: &[],
+    }
+);

现在,需要将 camera_buffercamera_bind_group 添加到 State 中:

rust
struct State {
+    // ...
+    camera: Camera,
+    camera_uniform: CameraUniform,
+    camera_buffer: wgpu::Buffer,
+    camera_bind_group: wgpu::BindGroup,
+}
+
+async fn new(window: &Window) -> Self {
+    // ...
+    Self {
+        // ...
+        camera,
+        camera_uniform,
+        camera_buffer,
+        camera_bind_group,
+    }
+}

在进入着色器之前,我们要做的最后一件事就是在 render() 函数中使用绑定组

rust
render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
+// 新添加!
+render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
+
+render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

在顶点着色器中使用 uniform

修改顶点着色器以加入如下代码:

rust
// 顶点着色器
+struct CameraUniform {
+    view_proj: mat4x4f,
+};
+@group(1) @binding(0) // 1.
+var<uniform> camera: CameraUniform;
+
+struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f,
+}
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+}
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.clip_position = camera.view_proj * vec4f(model.position, 1.0); // 2.
+    return out;
+}
  1. 因为我们已经创建了一个新的绑定组,所以需要指定在着色器中使用哪一个。这个数字由我们的 render_pipeline_layout 决定。texture_bind_group_layout 被列在第一位,因此它是 group(0),而 camera_bind_group 是第二位,因此它是 group(1)
  2. 当涉及到矩阵时,乘法的顺序很重要。向量在最右边,矩阵按重要性顺序在左边(裁剪空间坐标 = 投影矩阵 x 模型视图矩阵 x 位置向量)。

摄像机控制器

如果现在运行代码,看到的将是如下渲染效果:

./static-tree.png

形状的拉伸度降低了,但它仍然是静态的。你可以尝试移动摄像机的位置使画面动起来,就像游戏中的摄像机通常所做的那样。由于本教程聚焦于 wgpu 的使用,而非用户输入事件的处理,所以仅在此贴出摄像机控制器(CameraController)的代码:

rust
struct CameraController {
+    speed: f32,
+    is_forward_pressed: bool,
+    is_backward_pressed: bool,
+    is_left_pressed: bool,
+    is_right_pressed: bool,
+}
+
+impl CameraController {
+    fn new(speed: f32) -> Self {
+        Self {
+            speed,
+            is_forward_pressed: false,
+            is_backward_pressed: false,
+            is_left_pressed: false,
+            is_right_pressed: false,
+        }
+    }
+
+    fn process_events(&mut self, event: &WindowEvent) -> bool {
+        match event {
+            WindowEvent::KeyboardInput {
+                input: KeyboardInput {
+                    state,
+                    virtual_keycode: Some(keycode),
+                    ..
+                },
+                ..
+            } => {
+                let is_pressed = *state == ElementState::Pressed;
+                match keycode {
+                    VirtualKeyCode::W | VirtualKeyCode::Up => {
+                        self.is_forward_pressed = is_pressed;
+                        true
+                    }
+                    VirtualKeyCode::A | VirtualKeyCode::Left => {
+                        self.is_left_pressed = is_pressed;
+                        true
+                    }
+                    VirtualKeyCode::S | VirtualKeyCode::Down => {
+                        self.is_backward_pressed = is_pressed;
+                        true
+                    }
+                    VirtualKeyCode::D | VirtualKeyCode::Right => {
+                        self.is_right_pressed = is_pressed;
+                        true
+                    }
+                    _ => false,
+                }
+            }
+            _ => false,
+        }
+    }
+
+    fn update_camera(&self, camera: &mut Camera) {
+        let forward = camera.target - camera.eye;
+        let forward_norm = forward.normalize();
+        let forward_mag = forward.length();
+
+        // 防止摄像机离场景中心太近时出现问题
+        if self.is_forward_pressed && forward_mag > self.speed {
+            camera.eye += forward_norm * self.speed;
+        }
+        if self.is_backward_pressed {
+            camera.eye -= forward_norm * self.speed;
+        }
+
+        let right = forward_norm.cross(camera.up);
+
+        // 在按下前进或后退键时重做半径计算
+        let forward = camera.target - camera.eye;
+        let forward_mag = forward.length();
+
+        if self.is_right_pressed {
+            // 重新调整目标和眼睛之间的距离,以便其不发生变化。
+            // 因此,眼睛仍然位于目标和眼睛形成的圆圈上。
+            camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
+        }
+        if self.is_left_pressed {
+            camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag;
+        }
+    }
+}

这段代码并不完美。当你旋转摄像机时,摄像机会慢慢向后移动。虽然已达到了我们的目的,但你还是可以自由地改进它!

我们仍然需要把它插入到现有的代码中使其生效。将控制器添加到 State 中,并在 new() 函数中创建它的实例:

rust
struct State {
+    // ...
+    camera: Camera,
+    // 新添加!
+    camera_controller: CameraController,
+    // ...
+}
+// ...
+impl State {
+    async fn new(window: &Window) -> Self {
+        // ...
+        let camera_controller = CameraController::new(0.2);
+        // ...
+
+        Self {
+            // ...
+            camera_controller,
+            // ...
+        }
+    }
+}

将下边这行代码添加到 input() 函数中。

rust
fn input(&mut self, event: &WindowEvent) -> bool {
+    self.camera_controller.process_events(event)
+}

到目前为止,摄像机控制器还没有真正工作起来。uniform 缓冲区中的值需要被更新。有几种方式可以做到这一点:

  1. 可以创建一个单独的缓冲区,并将其数据复制到 camera_buffer。这个新的缓冲区被称为中继缓冲区(Staging Buffer)。这种方法允许主缓冲区(在这里是指 camera_buffer)的数据只被 GPU 访问,从而令 GPU 能做一些速度上的优化。如果缓冲区能被 CPU 访问,就无法实现此类优化。
  2. 可以在缓冲区本身调用内存映射函数 map_read_asyncmap_write_async。此方式允许我们直接访问缓冲区的数据,但是需要处理异步代码,也需要缓冲区使用 BufferUsages::MAP_READ 和/或 BufferUsages::MAP_WRITE。在此不再详述,如果你想了解更多,可以查看 wgpu without a window 教程。
  3. 可以在 queue 上使用 write_buffer 函数。

我们将使用第 3 种方式。

rust
fn update(&mut self) {
+    self.camera_controller.update_camera(&mut self.camera);
+    self.camera_uniform.update_view_proj(&self.camera);
+    self.queue.write_buffer(&self.camera_buffer, 0, bytemuck::cast_slice(&[self.camera_uniform]));
+}

这就是要做的全部工作了。现在运行代码,将能看到一个带有树木纹理的五边形,并可以用 wasd/arrow 键来旋转和缩放。

挑战

让上面的五边形独立于摄像机进行旋转。提示:你需要另一个矩阵来实现这一点

`,50);function E(d,g,y,c,F,b){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),h("div",null,[r,i(a,{example:"tutorial6_uniforms"}),i(n)])}const m=l(t,[["render",E]]);export{o as __pageData,m as default}; diff --git a/assets/beginner_tutorial6-uniforms_index.md._uEvsrye.lean.js b/assets/beginner_tutorial6-uniforms_index.md._uEvsrye.lean.js new file mode 100644 index 000000000..0447d1691 --- /dev/null +++ b/assets/beginner_tutorial6-uniforms_index.md._uEvsrye.lean.js @@ -0,0 +1 @@ +import{_ as l,D as s,o as p,c as h,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const e="/learn-wgpu-zh/assets/static-tree.p1JxaguT.png",o=JSON.parse('{"title":"Uniform 缓冲区与 3D 虚拟摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial6-uniforms/index.md","filePath":"beginner/tutorial6-uniforms/index.md","lastUpdated":1703303099000}'),t={name:"beginner/tutorial6-uniforms/index.md"},r=k("",50);function E(d,g,y,c,F,b){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),h("div",null,[r,i(a,{example:"tutorial6_uniforms"}),i(n)])}const m=l(t,[["render",E]]);export{o as __pageData,m as default}; diff --git a/assets/beginner_tutorial7-instancing_index.md.442b964d.js b/assets/beginner_tutorial7-instancing_index.md.442b964d.js deleted file mode 100644 index b0223de11..000000000 --- a/assets/beginner_tutorial7-instancing_index.md.442b964d.js +++ /dev/null @@ -1,143 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/forest.7453fae9.png",d=JSON.parse('{"title":"实例化绘制","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial7-instancing/index.md","filePath":"beginner/tutorial7-instancing/index.md","lastUpdated":1701933923000}'),c={name:"beginner/tutorial7-instancing/index.md"},F=r(`

实例化绘制

我们目前的场景非常简单:仅有一个以坐标 (0,0,0) 为中心的对象。如果想要绘制更多的对象呢? 这,就是实例化绘制(Instancing)的用武之地了。

实例化绘制允许我们以不同的属性(位置、方向、大小、颜色等)多次绘制同一个对象。有多种方式可以实现实例化绘制。其中一种方式是修改 Uniform 缓冲区以加入这些属性,并在绘制每个对象实例之前更新它。

出于性能原因,我们不推荐这种方式。因为逐实例更新时,uniform 缓冲区需要为每一帧复制多个缓冲区而消耗 GPU 内存带宽, 且随实例数增加的绘制命令更是会消耗 GPU 的执行时间。

如果查阅 wgpu 文档draw_indexed 函数的参数 ,我们可以看到解决这一问题的方式:

rust
pub fn draw_indexed(
-    &mut self,
-    indices: Range<u32>,
-    base_vertex: i32,
-    instances: Range<u32> // <-- 在这里
-)

instances 参数是范围Range<u32>)类型的值。它命令 GPU 绘制指定对象的多少个实例。目前我们指定的是0..1,它命令 GPU 绘制 1 个实例后停止。如果使用 0..5,我们的代码就绘制 5 个实例。

instances范围类型可能看起来很奇怪,因为使用 1..2 仍然是绘制 1 个实例。似乎直接使用 u32 类型会更简单,对吧?这里是范围类型的原因是:有时我们不想绘制出所有对象; 有时因为其他实例可能不该出现在这一中而只想绘制指定部分的实例; 又或者我们正在调试某组特定的实例。

好了,现在我们知道了如何绘制 1 个对象的多个实例,那么如何告诉 wgpu 要绘制哪些指定的实例呢?我们将要用到实例缓冲区(Instance Buffer)的概念。

实例缓冲区

我们将以类似于创建 Uniform 缓冲区的方式创建一个实例缓冲区。首先,声明一个名为 Instance 的结构体:

rust
// lib.rs
-// ...
-
-// 新增!
-struct Instance {
-    position: glam::Vec3,
-    rotation: glam::Quat,
-}

四元数(Quaternion) 是一种通常用来表示旋转的数学结构。这里不会介绍它背后的数学原理(涉及虚数和 4 维空间)。如果你想深入了解四元数,这里有一篇 Wolfram Alpha 的文章。

着色器中直接使用这些值会有麻烦,因为 WGSL 里没有四元数的数据类型。我不想在着色器中做四元数运算,所以把 Instance 数据转换成了矩阵,并将其存储在一个名为 InstanceRaw 的结构体中:

rust
// 新增!
-#[repr(C)]
-#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct InstanceRaw {
-    model: [[f32; 4]; 4],
-}

这就是将要写入缓冲区的数据。我们拆分出 InstanceRaw 之后,就可以自由地更新 Instance 而无需涉及矩阵,因为 raw 数据只需要在绘制之前更新。

让我们在 Instance 上创建一个函数来计算并返回 InstanceRaw

rust
// 新增!
-impl Instance {
-    fn to_raw(&self) -> InstanceRaw {
-        InstanceRaw {
-            model: (glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
-        }
-    }
-}

现在需要给 State 添加两个字段:instancesinstance_buffer

rust
struct State {
-    instances: Vec<Instance>,
-    instance_buffer: wgpu::Buffer,
-}

接下来在 new() 函数中创建实例数据,先定义几个常量用于简化代码:

rust
const NUM_INSTANCES_PER_ROW: u32 = 10;
-const INSTANCE_DISPLACEMENT: glam::Vec3 = glam::Vec3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);

我们将创建一组 10 行 10 列空间排列均匀的实例数据,下边是具体代码:

rust
impl State {
-    async fn new(window: &Window) -> Self {
-        // ...
-        let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
-            (0..NUM_INSTANCES_PER_ROW).map(move |x| {
-                let position = glam::Vec3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
-
-                let rotation = if position.length().abs() <= std::f32::EPSILON {
-                    // 这一行特殊确保在坐标 (0, 0, 0) 处的对象不会被缩放到 0
-                    // 因为错误的四元数会影响到缩放
-                    glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
-                } else {
-                    glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
-                };
-
-                Instance {
-                    position, rotation,
-                }
-            })
-        }).collect::<Vec<_>>();
-        // ...
-    }
-}

现在数据已经有了,我们来创建实际的实例缓冲区

rust
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
-let instance_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Instance Buffer"),
-        contents: bytemuck::cast_slice(&instance_data),
-        usage: wgpu::BufferUsages::VERTEX,
-    }
-);

需要为 InstanceRaw 创建一个新的顶点缓冲区布局

rust
impl InstanceRaw {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
-            // step_mode 的值需要从 Vertex 改为 Instance
-            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
-            step_mode: wgpu::VertexStepMode::Instance,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
-                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
-                    shader_location: 5,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
-                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
-                    shader_location: 6,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
-                    shader_location: 7,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
-                    shader_location: 8,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-            ],
-        }
-    }
-}

我们需要将此布局添加到渲染管线中,以便在渲染时可以使用它:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    vertex: wgpu::VertexState {
-        // ...
-        // 更新!
-        buffers: &[Vertex::desc(), InstanceRaw::desc()],
-    },
-    // ...
-});

别忘了要返回新增的变量:

rust
Self {
-    // ...
-    // 新添加!
-    instances,
-    instance_buffer,
-}

最后,在 render() 函数中绑定 instance_buffer,并修改 draw_indexed() 绘制命令以使用我们实际的实例数:

rust
render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
-render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-// 新添加!
-render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
-
-// 更新!
-render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as _);

当你向数组添加新的实例时,请确保重新创建了 instance_buffercamera_bind_group,否则新实例不会正确显示。

shader.wgsl 中需要引入我们新增的矩阵,这样才能在实例中使用它。请在 shader.wgsl 文件的顶部添加以下代码:

rust
struct InstanceInput {
-    @location(5) model_matrix_0: vec4f,
-    @location(6) model_matrix_1: vec4f,
-    @location(7) model_matrix_2: vec4f,
-    @location(8) model_matrix_3: vec4f,
-};

在使用之前,我们需要将矩阵重新组装出来:

rust
@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    let model_matrix = mat4x4f(
-        instance.model_matrix_0,
-        instance.model_matrix_1,
-        instance.model_matrix_2,
-        instance.model_matrix_3,
-    );
-    // Continued...
-}

我们得在应用 camera_uniform.view_proj 之前先应用 model_matrix。因为 view_proj 将坐标系从世界空间(World Space)变换为相机空间(Camera Space),而 model_matrix 是一个世界空间的变换,所以在使用它时不希望处于相机空间

rust
@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    // ...
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.clip_position = camera.view_proj * model_matrix * vec4f(model.position, 1.0);
-    return out;
-}

完成后,应该就能看到一片树林了!

./forest.png

挑战

逐帧变更实例的位置 和/或 旋转弧度。

',45);function D(y,C,A,i,b,u){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[F,n(a,{example:"tutorial7_instancing"}),n(l)])}const B=p(c,[["render",D]]);export{d as __pageData,B as default}; diff --git a/assets/beginner_tutorial7-instancing_index.md.442b964d.lean.js b/assets/beginner_tutorial7-instancing_index.md.442b964d.lean.js deleted file mode 100644 index afdec8f9f..000000000 --- a/assets/beginner_tutorial7-instancing_index.md.442b964d.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/forest.7453fae9.png",d=JSON.parse('{"title":"实例化绘制","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial7-instancing/index.md","filePath":"beginner/tutorial7-instancing/index.md","lastUpdated":1701933923000}'),c={name:"beginner/tutorial7-instancing/index.md"},F=r("",45);function D(y,C,A,i,b,u){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[F,n(a,{example:"tutorial7_instancing"}),n(l)])}const B=p(c,[["render",D]]);export{d as __pageData,B as default}; diff --git a/assets/beginner_tutorial7-instancing_index.md.VCbHZtRg.js b/assets/beginner_tutorial7-instancing_index.md.VCbHZtRg.js new file mode 100644 index 000000000..b672b7aa6 --- /dev/null +++ b/assets/beginner_tutorial7-instancing_index.md.VCbHZtRg.js @@ -0,0 +1,143 @@ +import{_ as p,D as s,o as h,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/forest.KVLLckoU.png",u=JSON.parse('{"title":"实例化绘制","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial7-instancing/index.md","filePath":"beginner/tutorial7-instancing/index.md","lastUpdated":1703303099000}'),e={name:"beginner/tutorial7-instancing/index.md"},r=k(`

实例化绘制

我们目前的场景非常简单:仅有一个以坐标 (0,0,0) 为中心的对象。如果想要绘制更多的对象呢? 这,就是实例化绘制(Instancing)的用武之地了。

实例化绘制允许我们以不同的属性(位置、方向、大小、颜色等)多次绘制同一个对象。有多种方式可以实现实例化绘制。其中一种方式是修改 Uniform 缓冲区以加入这些属性,并在绘制每个对象实例之前更新它。

出于性能原因,我们不推荐这种方式。因为逐实例更新时,uniform 缓冲区需要为每一帧复制多个缓冲区而消耗 GPU 内存带宽, 且随实例数增加的绘制命令更是会消耗 GPU 的执行时间。

如果查阅 wgpu 文档draw_indexed 函数的参数 ,我们可以看到解决这一问题的方式:

rust
pub fn draw_indexed(
+    &mut self,
+    indices: Range<u32>,
+    base_vertex: i32,
+    instances: Range<u32> // <-- 在这里
+)

instances 参数是范围Range<u32>)类型的值。它命令 GPU 绘制指定对象的多少个实例。目前我们指定的是0..1,它命令 GPU 绘制 1 个实例后停止。如果使用 0..5,我们的代码就绘制 5 个实例。

instances范围类型可能看起来很奇怪,因为使用 1..2 仍然是绘制 1 个实例。似乎直接使用 u32 类型会更简单,对吧?这里是范围类型的原因是:有时我们不想绘制出所有对象; 有时因为其他实例可能不该出现在这一中而只想绘制指定部分的实例; 又或者我们正在调试某组特定的实例。

好了,现在我们知道了如何绘制 1 个对象的多个实例,那么如何告诉 wgpu 要绘制哪些指定的实例呢?我们将要用到实例缓冲区(Instance Buffer)的概念。

实例缓冲区

我们将以类似于创建 Uniform 缓冲区的方式创建一个实例缓冲区。首先,声明一个名为 Instance 的结构体:

rust
// lib.rs
+// ...
+
+// 新增!
+struct Instance {
+    position: glam::Vec3,
+    rotation: glam::Quat,
+}

四元数(Quaternion) 是一种通常用来表示旋转的数学结构。这里不会介绍它背后的数学原理(涉及虚数和 4 维空间)。如果你想深入了解四元数,这里有一篇 Wolfram Alpha 的文章。

着色器中直接使用这些值会有麻烦,因为 WGSL 里没有四元数的数据类型。我不想在着色器中做四元数运算,所以把 Instance 数据转换成了矩阵,并将其存储在一个名为 InstanceRaw 的结构体中:

rust
// 新增!
+#[repr(C)]
+#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct InstanceRaw {
+    model: [[f32; 4]; 4],
+}

这就是将要写入缓冲区的数据。我们拆分出 InstanceRaw 之后,就可以自由地更新 Instance 而无需涉及矩阵,因为 raw 数据只需要在绘制之前更新。

让我们在 Instance 上创建一个函数来计算并返回 InstanceRaw

rust
// 新增!
+impl Instance {
+    fn to_raw(&self) -> InstanceRaw {
+        InstanceRaw {
+            model: (glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
+        }
+    }
+}

现在需要给 State 添加两个字段:instancesinstance_buffer

rust
struct State {
+    instances: Vec<Instance>,
+    instance_buffer: wgpu::Buffer,
+}

接下来在 new() 函数中创建实例数据,先定义几个常量用于简化代码:

rust
const NUM_INSTANCES_PER_ROW: u32 = 10;
+const INSTANCE_DISPLACEMENT: glam::Vec3 = glam::Vec3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);

我们将创建一组 10 行 10 列空间排列均匀的实例数据,下边是具体代码:

rust
impl State {
+    async fn new(window: &Window) -> Self {
+        // ...
+        let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
+            (0..NUM_INSTANCES_PER_ROW).map(move |x| {
+                let position = glam::Vec3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
+
+                let rotation = if position.length().abs() <= std::f32::EPSILON {
+                    // 这一行特殊确保在坐标 (0, 0, 0) 处的对象不会被缩放到 0
+                    // 因为错误的四元数会影响到缩放
+                    glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
+                } else {
+                    glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
+                };
+
+                Instance {
+                    position, rotation,
+                }
+            })
+        }).collect::<Vec<_>>();
+        // ...
+    }
+}

现在数据已经有了,我们来创建实际的实例缓冲区

rust
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
+let instance_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Instance Buffer"),
+        contents: bytemuck::cast_slice(&instance_data),
+        usage: wgpu::BufferUsages::VERTEX,
+    }
+);

需要为 InstanceRaw 创建一个新的顶点缓冲区布局

rust
impl InstanceRaw {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
+            // step_mode 的值需要从 Vertex 改为 Instance
+            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
+            step_mode: wgpu::VertexStepMode::Instance,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
+                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
+                    shader_location: 5,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
+                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
+                    shader_location: 6,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
+                    shader_location: 7,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
+                    shader_location: 8,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+            ],
+        }
+    }
+}

我们需要将此布局添加到渲染管线中,以便在渲染时可以使用它:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    vertex: wgpu::VertexState {
+        // ...
+        // 更新!
+        buffers: &[Vertex::desc(), InstanceRaw::desc()],
+    },
+    // ...
+});

别忘了要返回新增的变量:

rust
Self {
+    // ...
+    // 新添加!
+    instances,
+    instance_buffer,
+}

最后,在 render() 函数中绑定 instance_buffer,并修改 draw_indexed() 绘制命令以使用我们实际的实例数:

rust
render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
+render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+// 新添加!
+render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
+
+// 更新!
+render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as _);

当你向数组添加新的实例时,请确保重新创建了 instance_buffercamera_bind_group,否则新实例不会正确显示。

shader.wgsl 中需要引入我们新增的矩阵,这样才能在实例中使用它。请在 shader.wgsl 文件的顶部添加以下代码:

rust
struct InstanceInput {
+    @location(5) model_matrix_0: vec4f,
+    @location(6) model_matrix_1: vec4f,
+    @location(7) model_matrix_2: vec4f,
+    @location(8) model_matrix_3: vec4f,
+};

在使用之前,我们需要将矩阵重新组装出来:

rust
@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    let model_matrix = mat4x4f(
+        instance.model_matrix_0,
+        instance.model_matrix_1,
+        instance.model_matrix_2,
+        instance.model_matrix_3,
+    );
+    // Continued...
+}

我们得在应用 camera_uniform.view_proj 之前先应用 model_matrix。因为 view_proj 将坐标系从世界空间(World Space)变换为相机空间(Camera Space),而 model_matrix 是一个世界空间的变换,所以在使用它时不希望处于相机空间

rust
@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    // ...
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.clip_position = camera.view_proj * model_matrix * vec4f(model.position, 1.0);
+    return out;
+}

完成后,应该就能看到一片树林了!

./forest.png

挑战

逐帧变更实例的位置 和/或 旋转弧度。

',45);function E(d,g,y,F,c,o){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),l("div",null,[r,i(a,{example:"tutorial7_instancing"}),i(n)])}const m=p(e,[["render",E]]);export{u as __pageData,m as default}; diff --git a/assets/beginner_tutorial7-instancing_index.md.VCbHZtRg.lean.js b/assets/beginner_tutorial7-instancing_index.md.VCbHZtRg.lean.js new file mode 100644 index 000000000..4755c7059 --- /dev/null +++ b/assets/beginner_tutorial7-instancing_index.md.VCbHZtRg.lean.js @@ -0,0 +1 @@ +import{_ as p,D as s,o as h,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/forest.KVLLckoU.png",u=JSON.parse('{"title":"实例化绘制","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial7-instancing/index.md","filePath":"beginner/tutorial7-instancing/index.md","lastUpdated":1703303099000}'),e={name:"beginner/tutorial7-instancing/index.md"},r=k("",45);function E(d,g,y,F,c,o){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),l("div",null,[r,i(a,{example:"tutorial7_instancing"}),i(n)])}const m=p(e,[["render",E]]);export{u as __pageData,m as default}; diff --git a/assets/beginner_tutorial8-depth_index.md.0c_yzyfM.js b/assets/beginner_tutorial8-depth_index.md.0c_yzyfM.js new file mode 100644 index 000000000..2268c637f --- /dev/null +++ b/assets/beginner_tutorial8-depth_index.md.0c_yzyfM.js @@ -0,0 +1,82 @@ +import{_ as p,D as s,o as h,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/depth_problems._syaXayK.png",e="/learn-wgpu-zh/assets/forest_fixed._xtODI5M.png",D=JSON.parse('{"title":"深度缓冲区","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial8-depth/index.md","filePath":"beginner/tutorial8-depth/index.md","lastUpdated":1703303099000}'),r={name:"beginner/tutorial8-depth/index.md"},d=k('

深度缓冲区

让我们换个摄像机角度来仔细观察上个教程中的例子:

depth_problems.png

应该排在后面的对象被渲染在了前面的对象之前。这是由绘制顺序引起的。默认情况下,新对象的像素数据将取代帧缓冲区(FrameBuffer)相同坐标上旧的像素数据。

有两种方式可以解决这个问题:将数据从后往前排序; 或者使用深度缓冲区(Depth Buffer)。

从后往前排序

这是 2D 渲染的常用方法,因为很容易计算绘制对象的前后关系,甚至可以直接使用 Z 轴顺序。而在 3D 渲染中就有点棘手了,因为对象的前后关系会根据摄像机的角度而改变。

一个简单的方法是按照对象摄像机的距离来排序。但这种方法也有缺陷,因为当大对象的模型中心坐标处在小对象后面时,大对象中本应在小对象前面的部分也会被渲染到后面。我们还会遇到对象本身重叠的问题。

如果想正确地实现绘制对象的前后关系,就需要有像素级的精度。这,就是 深度缓冲区 的作用。

像素深度

深度缓冲区是一个用来存储已渲染像素的 Z 轴坐标的纹理。在绘制新的像素时,wgpu 使用它来决定是替换数据还是丢弃。这种技术被称为深度测试,它将解决绘制顺序问题,而不需要我们对绘制对象进行排序!

让我们在 texture.rs 中添加一个函数来创建深度纹理

rust
impl Texture {
+    pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; // 1.
+
+    pub fn create_depth_texture(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration, label: &str) -> Self {
+        let size = wgpu::Extent3d { // 2.
+            width: config.width,
+            height: config.height,
+            depth_or_array_layers: 1,
+        };
+        let desc = wgpu::TextureDescriptor {
+            label: Some(label),
+            size,
+            mip_level_count: 1,
+            sample_count: 1,
+            dimension: wgpu::TextureDimension::D2,
+            format: Self::DEPTH_FORMAT,
+            usage: wgpu::TextureUsages::RENDER_ATTACHMENT // 3.
+                | wgpu::TextureUsages::TEXTURE_BINDING,
+            view_formats: &[],
+        };
+        let texture = device.create_texture(&desc);
+
+        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
+        let sampler = device.create_sampler(
+            &wgpu::SamplerDescriptor { // 4.
+                address_mode_u: wgpu::AddressMode::ClampToEdge,
+                address_mode_v: wgpu::AddressMode::ClampToEdge,
+                address_mode_w: wgpu::AddressMode::ClampToEdge,
+                mag_filter: wgpu::FilterMode::Linear,
+                min_filter: wgpu::FilterMode::Linear,
+                mipmap_filter: wgpu::FilterMode::Nearest,
+                compare: Some(wgpu::CompareFunction::LessEqual), // 5.
+                lod_min_clamp: 0.0,
+                lod_max_clamp: 200.0,
+                ..Default::default()
+            }
+        );
+
+        Self { texture, view, sampler }
+    }
+}
  1. 定义 DEPTH_FORMAT 用于创建深度纹理render_pipeline 需要的 DepthStencilState 对象。
  2. 深度纹理的宽高需要与展示平面一致(更准确的说,是需要与当前的 Color Attachment 一致)。我们传入展示平面使用的 config 参数来确保它们的宽高相同。
  3. 由于要对这个纹理进行渲染,我们需要给它添加 RENDER_ATTACHMENT 使用范围标志。
  4. 从技术的角度来看,我们不需要深度纹理的采样器,是我们的 Texture 结构体需要它。

现在在 State::new() 中创建我们的 depth_texture

rust
let depth_texture = texture::Texture::create_depth_texture(&device, &config, "depth_texture");

然后修改渲染管线以启用深度测试

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    depth_stencil: Some(wgpu::DepthStencilState {
+        format: texture::Texture::DEPTH_FORMAT,
+        depth_write_enabled: true,
+        depth_compare: wgpu::CompareFunction::Less, // 1.
+        stencil: wgpu::StencilState::default(), // 2.
+        bias: wgpu::DepthBiasState::default(),
+    }),
+    // ...
+});
  1. depth_compare 字段指定通过深度测试的条件。使用 LESS 意味着像素将被从后往前绘制,大于当前位置的深度值的像素将被丢弃。下面是可选的所有枚举值:
rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub enum CompareFunction {
+    Undefined = 0,
+    Never = 1,
+    Less = 2,
+    Equal = 3,
+    LessEqual = 4,
+    Greater = 5,
+    NotEqual = 6,
+    GreaterEqual = 7,
+    Always = 8,
+}
  1. 还有一种类型的缓冲区叫做模版缓冲区(Stencil Buffer)。模版缓冲区和深度缓冲区通常被存储在同一个纹理中。这些字段控制着模版测试的数值。目前我们没有使用模版缓冲区,这里就使用默认值。在以后教程中再详情介绍模版缓冲区。

不要忘了在 State 中存储 depth_texture

rust
Self {
+// ...
+depth_texture,
+}

还要记得修改 resize() 函数来更新深度纹理及它的纹理视图

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+    // ...
+    self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture");
+    // ...
+}

请确保更新了 config 之后一定要更新 depth_texture,否则程序就会崩溃,因为此时 depth_texturesurface 纹理的宽高已经不一致了(还记得上边提到过的 “深度纹理的宽高需要与展示平面一致” 吗?)。

最后是修改 render() 函数,我们已经创建了深度纹理,但目前还没有使用。只需把它绑定到渲染通道depth_stencil_attachment 字段即可:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+    // ...
+    depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
+        view: &self.depth_texture.view,
+        depth_ops: Some(wgpu::Operations {
+            load: wgpu::LoadOp::Clear(1.0),
+            store: wgpu::StoreOp::Store
+        }),
+        stencil_ops: None,
+    }),
+    ..Default::default()
+});

这就是我们所要做的!不涉及着色器代码!现在运行该应用程序,将看到深度问题已不复存在:

forest_fixed.png

挑战

深度缓冲区是一张纹理,所以我们可以在着色器中对其采样。请为深度纹理创建一个绑定组(或重用现有的),并将其渲染到屏幕上。

',32);function E(g,y,F,c,o,u){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),l("div",null,[d,i(a,{example:"tutorial8_depth"}),i(n)])}const C=p(r,[["render",E]]);export{D as __pageData,C as default}; diff --git a/assets/beginner_tutorial8-depth_index.md.0c_yzyfM.lean.js b/assets/beginner_tutorial8-depth_index.md.0c_yzyfM.lean.js new file mode 100644 index 000000000..feeafb1e7 --- /dev/null +++ b/assets/beginner_tutorial8-depth_index.md.0c_yzyfM.lean.js @@ -0,0 +1 @@ +import{_ as p,D as s,o as h,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/depth_problems._syaXayK.png",e="/learn-wgpu-zh/assets/forest_fixed._xtODI5M.png",D=JSON.parse('{"title":"深度缓冲区","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial8-depth/index.md","filePath":"beginner/tutorial8-depth/index.md","lastUpdated":1703303099000}'),r={name:"beginner/tutorial8-depth/index.md"},d=k("",32);function E(g,y,F,c,o,u){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),l("div",null,[d,i(a,{example:"tutorial8_depth"}),i(n)])}const C=p(r,[["render",E]]);export{D as __pageData,C as default}; diff --git a/assets/beginner_tutorial8-depth_index.md.3d09ac63.js b/assets/beginner_tutorial8-depth_index.md.3d09ac63.js deleted file mode 100644 index 5fe1a9601..000000000 --- a/assets/beginner_tutorial8-depth_index.md.3d09ac63.js +++ /dev/null @@ -1,82 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/depth_problems.9a7af250.png",c="/learn-wgpu-zh/assets/forest_fixed.6ec7a22b.png",m=JSON.parse('{"title":"深度缓冲区","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial8-depth/index.md","filePath":"beginner/tutorial8-depth/index.md","lastUpdated":1701933923000}'),F={name:"beginner/tutorial8-depth/index.md"},D=r('

深度缓冲区

让我们换个摄像机角度来仔细观察上个教程中的例子:

depth_problems.png

应该排在后面的对象被渲染在了前面的对象之前。这是由绘制顺序引起的。默认情况下,新对象的像素数据将取代帧缓冲区(FrameBuffer)相同坐标上旧的像素数据。

有两种方式可以解决这个问题:将数据从后往前排序; 或者使用深度缓冲区(Depth Buffer)。

从后往前排序

这是 2D 渲染的常用方法,因为很容易计算绘制对象的前后关系,甚至可以直接使用 Z 轴顺序。而在 3D 渲染中就有点棘手了,因为对象的前后关系会根据摄像机的角度而改变。

一个简单的方法是按照对象摄像机的距离来排序。但这种方法也有缺陷,因为当大对象的模型中心坐标处在小对象后面时,大对象中本应在小对象前面的部分也会被渲染到后面。我们还会遇到对象本身重叠的问题。

如果想正确地实现绘制对象的前后关系,就需要有像素级的精度。这,就是 深度缓冲区 的作用。

像素深度

深度缓冲区是一个用来存储已渲染像素的 Z 轴坐标的纹理。在绘制新的像素时,wgpu 使用它来决定是替换数据还是丢弃。这种技术被称为深度测试,它将解决绘制顺序问题,而不需要我们对绘制对象进行排序!

让我们在 texture.rs 中添加一个函数来创建深度纹理

rust
impl Texture {
-    pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; // 1.
-
-    pub fn create_depth_texture(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration, label: &str) -> Self {
-        let size = wgpu::Extent3d { // 2.
-            width: config.width,
-            height: config.height,
-            depth_or_array_layers: 1,
-        };
-        let desc = wgpu::TextureDescriptor {
-            label: Some(label),
-            size,
-            mip_level_count: 1,
-            sample_count: 1,
-            dimension: wgpu::TextureDimension::D2,
-            format: Self::DEPTH_FORMAT,
-            usage: wgpu::TextureUsages::RENDER_ATTACHMENT // 3.
-                | wgpu::TextureUsages::TEXTURE_BINDING,
-            view_formats: &[],
-        };
-        let texture = device.create_texture(&desc);
-
-        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
-        let sampler = device.create_sampler(
-            &wgpu::SamplerDescriptor { // 4.
-                address_mode_u: wgpu::AddressMode::ClampToEdge,
-                address_mode_v: wgpu::AddressMode::ClampToEdge,
-                address_mode_w: wgpu::AddressMode::ClampToEdge,
-                mag_filter: wgpu::FilterMode::Linear,
-                min_filter: wgpu::FilterMode::Linear,
-                mipmap_filter: wgpu::FilterMode::Nearest,
-                compare: Some(wgpu::CompareFunction::LessEqual), // 5.
-                lod_min_clamp: 0.0,
-                lod_max_clamp: 200.0,
-                ..Default::default()
-            }
-        );
-
-        Self { texture, view, sampler }
-    }
-}
  1. 定义 DEPTH_FORMAT 用于创建深度纹理render_pipeline 需要的 DepthStencilState 对象。
  2. 深度纹理的宽高需要与展示平面一致(更准确的说,是需要与当前的 Color Attachment 一致)。我们传入展示平面使用的 config 参数来确保它们的宽高相同。
  3. 由于要对这个纹理进行渲染,我们需要给它添加 RENDER_ATTACHMENT 使用范围标志。
  4. 从技术的角度来看,我们不需要深度纹理的采样器,是我们的 Texture 结构体需要它。

现在在 State::new() 中创建我们的 depth_texture

rust
let depth_texture = texture::Texture::create_depth_texture(&device, &config, "depth_texture");

然后修改渲染管线以启用深度测试

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    depth_stencil: Some(wgpu::DepthStencilState {
-        format: texture::Texture::DEPTH_FORMAT,
-        depth_write_enabled: true,
-        depth_compare: wgpu::CompareFunction::Less, // 1.
-        stencil: wgpu::StencilState::default(), // 2.
-        bias: wgpu::DepthBiasState::default(),
-    }),
-    // ...
-});
  1. depth_compare 字段指定通过深度测试的条件。使用 LESS 意味着像素将被从后往前绘制,大于当前位置的深度值的像素将被丢弃。下面是可选的所有枚举值:
rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-pub enum CompareFunction {
-    Undefined = 0,
-    Never = 1,
-    Less = 2,
-    Equal = 3,
-    LessEqual = 4,
-    Greater = 5,
-    NotEqual = 6,
-    GreaterEqual = 7,
-    Always = 8,
-}
  1. 还有一种类型的缓冲区叫做模版缓冲区(Stencil Buffer)。模版缓冲区和深度缓冲区通常被存储在同一个纹理中。这些字段控制着模版测试的数值。目前我们没有使用模版缓冲区,这里就使用默认值。在以后教程中再详情介绍模版缓冲区。

不要忘了在 State 中存储 depth_texture

rust
Self {
-// ...
-depth_texture,
-}

还要记得修改 resize() 函数来更新深度纹理及它的纹理视图

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-    // ...
-    self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture");
-    // ...
-}

请确保更新了 config 之后一定要更新 depth_texture,否则程序就会崩溃,因为此时 depth_texturesurface 纹理的宽高已经不一致了(还记得上边提到过的 “深度纹理的宽高需要与展示平面一致” 吗?)。

最后是修改 render() 函数,我们已经创建了深度纹理,但目前还没有使用。只需把它绑定到渲染通道depth_stencil_attachment 字段即可:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-    // ...
-    depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
-        view: &self.depth_texture.view,
-        depth_ops: Some(wgpu::Operations {
-            load: wgpu::LoadOp::Clear(1.0),
-            store: wgpu::StoreOp::Store
-        }),
-        stencil_ops: None,
-    }),
-    ..Default::default()
-});

这就是我们所要做的!不涉及着色器代码!现在运行该应用程序,将看到深度问题已不复存在:

forest_fixed.png

挑战

深度缓冲区是一张纹理,所以我们可以在着色器中对其采样。请为深度纹理创建一个绑定组(或重用现有的),并将其渲染到屏幕上。

',32);function y(C,A,i,u,b,d){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[D,n(a,{example:"tutorial8_depth"}),n(l)])}const g=p(F,[["render",y]]);export{m as __pageData,g as default}; diff --git a/assets/beginner_tutorial8-depth_index.md.3d09ac63.lean.js b/assets/beginner_tutorial8-depth_index.md.3d09ac63.lean.js deleted file mode 100644 index 17a031a16..000000000 --- a/assets/beginner_tutorial8-depth_index.md.3d09ac63.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/depth_problems.9a7af250.png",c="/learn-wgpu-zh/assets/forest_fixed.6ec7a22b.png",m=JSON.parse('{"title":"深度缓冲区","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial8-depth/index.md","filePath":"beginner/tutorial8-depth/index.md","lastUpdated":1701933923000}'),F={name:"beginner/tutorial8-depth/index.md"},D=r("",32);function y(C,A,i,u,b,d){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[D,n(a,{example:"tutorial8_depth"}),n(l)])}const g=p(F,[["render",y]]);export{m as __pageData,g as default}; diff --git a/assets/beginner_tutorial9-models_index.md.cc4e4508.js b/assets/beginner_tutorial9-models_index.md.cc4e4508.js deleted file mode 100644 index ca6afee9b..000000000 --- a/assets/beginner_tutorial9-models_index.md.cc4e4508.js +++ /dev/null @@ -1,377 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/cubes.57cd381f.png",t="/learn-wgpu-zh/assets/cube-diffuse.989b7825.jpg",F="/learn-wgpu-zh/assets/cubes-correct.f1d8f512.png",g=JSON.parse('{"title":"模型加载","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial9-models/index.md","filePath":"beginner/tutorial9-models/index.md","lastUpdated":1701933923000}'),D={name:"beginner/tutorial9-models/index.md"},y=r(`

模型加载

到目前为止,我们一直在手动创建模型。简单的模型当然可以这么干,但如果是有成千上万多边形的复杂模型,那就行不通了。因此,我们将修改代码以利用 .obj 模型格式,以便可以利用 Blender 等软件来创建模型并运用到项目中。

lib.rs 文件中堆砌的代码已经很多了,让我们创建一个 model.rs 文件来安置所有模型加载相关的代码:

rust
// model.rs
-pub trait Vertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a>;
-}
-
-#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-pub struct ModelVertex {
-    pub position: [f32; 3],
-    pub tex_coords: [f32; 2],
-    pub normal: [f32; 3],
-}
-
-impl Vertex for ModelVertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        todo!();
-    }
-}

你会注意到这里有几点变化:

首先是 Vertex, 它在 lib.rs 中是一个结构体,而这里我们改为了 trait。我们会有多种顶点类型(模型、UI、实例数据等),Vertex 做为 trait 令我们能从其中抽象出 VertexBufferLayout 的创建函数,从而简化渲染管线的创建。

其次是 ModelVertex 中新增了 normal 字段。在讨论光照之前暂时不会用到它。

让我们来创建 VertexBufferLayout

rust
impl Vertex for ModelVertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    shader_location: 0,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-                    shader_location: 1,
-                    format: wgpu::VertexFormat::Float32x2,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
-                    shader_location: 2,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-            ],
-        }
-    }
-}

这与原来的 VertexBufferLayout 基本相同,只是为 normal 添加了一个 VertexAttribute。删除 lib.rs 中我们已不再需要的旧 Vertex 结构体,并在 RenderPipeline 中使用来自 model 的新 Vertex:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    vertex: wgpu::VertexState {
-        // ...
-        buffers: &[model::ModelVertex::desc(), InstanceRaw::desc()],
-    },
-    // ...
-});

由于 desc 接口是定义在 Vertex trait 上的,因此需要先导入 Vertex,然后才能调用到该接口的具体实现。只需将导入代码放在文件顶部:

rust
use model::Vertex;

现在,我们需要一个用于渲染的模型。你可以使用自己的模型,我这也提供了一个模型及其纹理的 zip 压缩包 。我们将新建一个与 src 目录同级的 res 目录来安置这个模型。

访问资源文件

cargo 在构建并运行程序时会设置一个当前工作目录,该目录通常就是放置了 Cargo.toml 文件的项目根目录。资源(res)目录的路径会因项目的结构而异。本节教程示例代码的资源目录位于 code/beginner/tutorial9-models/res/。我们加载模型时可以使用这个路径,仅需在路径后拼上 cube.obj。这似乎很完美,可一旦修改项目的目录结构,写在代码里的路径就不可用了。

所以,我们通过修改构建脚本,将 res 目录复制到 cargo 创建可执行文件的位置来解决此问题,然后再从那里引用资源文件。创建一个 build.rs(构建文件的默认名称)文件并添加以下代码:

rust
use anyhow::*;
-use fs_extra::copy_items;
-use fs_extra::dir::CopyOptions;
-use std::env;
-
-fn main() -> Result<()> {
-    // 这一行告诉 cargo 如果 /res/ 目录中的内容发生了变化,就重新运行脚本
-    println!("cargo:rerun-if-changed=res/*");
-
-    let out_dir = env::var("OUT_DIR")?;
-    let mut copy_options = CopyOptions::new();
-    copy_options.overwrite = true;
-    let mut paths_to_copy = Vec::new();
-    paths_to_copy.push("res/");
-    copy_items(&paths_to_copy, out_dir, &copy_options)?;
-
-    Ok(())
-}

确保将 build.rs 放在与 Cargo.toml 相同的目录中,只有这样,在项目构建时 cargo 才能此运行构建脚本。

OUT_DIR 是一个环境变量,cargo 用它来指定应用程序将在哪里构建。

还需修改 Cargo.toml 来让构建脚本能正常运行,在构建依赖[build-dependencies])配置里添加以下依赖项:

toml
[build-dependencies]
-anyhow = "1.0"
-fs_extra = "1.3"
-glob = "0.3"

从 WASM 访问文件

遵循 WASM 规范,你不能在 Web Assembly 中访问用户文件系统上的文件。所以,我们利用 web 服务来提供这些文件,然后使用 http 请求将文件加载 ​​ 到代码中。让我们创建一个名为 resources.rs 的文件来处理这个问题,创建两个函数分别用于加载文本文件和二进制文件:

rust
use std::io::{BufReader, Cursor};
-
-use cfg_if::cfg_if;
-use wgpu::util::DeviceExt;
-
-use crate::{model, texture};
-
-#[cfg(target_arch = "wasm32")]
-fn format_url(file_name: &str) -> reqwest::Url {
-    let window = web_sys::window().unwrap();
-    let location = window.location();
-    let base = reqwest::Url::parse(&format!(
-        "{}/{}/",
-        location.origin().unwrap(),
-        option_env!("RES_PATH").unwrap_or("res"),
-    )).unwrap();
-    base.join(file_name).unwrap()
-}
-
-pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
-    cfg_if! {
-        if #[cfg(target_arch = "wasm32")] {
-            let url = format_url(file_name);
-            let txt = reqwest::get(url)
-                .await?
-                .text()
-                .await?;
-        } else {
-            let path = std::path::Path::new(env!("OUT_DIR"))
-                .join("res")
-                .join(file_name);
-            let txt = std::fs::read_to_string(path)?;
-        }
-    }
-
-    Ok(txt)
-}
-
-pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
-    cfg_if! {
-        if #[cfg(target_arch = "wasm32")] {
-            let url = format_url(file_name);
-            let data = reqwest::get(url)
-                .await?
-                .bytes()
-                .await?
-                .to_vec();
-        } else {
-            let path = std::path::Path::new(env!("OUT_DIR"))
-                .join("res")
-                .join(file_name);
-            let data = std::fs::read(path)?;
-        }
-    }
-
-    Ok(data)
-}

桌面环境里,我们是使用 OUT_DIR 环境变量来访问资源目录。

在 WASM 环境里,我们使用了 reqwest 来处理网络请求。需将以下依赖项添加到 Cargo.toml:

toml
[target.'cfg(target_arch = "wasm32")'.dependencies]
-# Other dependencies
-reqwest = { version = "0.11" }

还需要将 Location 功能添加到 web-sys 的 features 数组里:

toml
web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-    "Location",
-]}

确保 resources 作为模块已添加到 lib.rs 中:

rust
mod resources;

使用 TOBJ 加载模型

加载模型是使用的 tobj 。让我们将其添加到 Cargo.toml 中:

toml
[dependencies]
-# other dependencies...
-tobj = { version = "3.2.1", features = [
-    "async",
-]}

在加载模型之前,我们需要有一个结构体来存放模型数据:

rust
// model.rs
-pub struct Model {
-    pub meshes: Vec<Mesh>,
-    pub materials: Vec<Material>,
-}

Model 结构体中 meshesmaterials 两个字段都是动态数组类型。这很重要,因为一个 obj 文件可以包含多个网格材质。下面我们接着来创建 MeshMaterial 结构体:

rust
pub struct Material {
-    pub name: String,
-    pub diffuse_texture: texture::Texture,
-    pub bind_group: wgpu::BindGroup,
-}
-
-pub struct Mesh {
-    pub name: String,
-    pub vertex_buffer: wgpu::Buffer,
-    pub index_buffer: wgpu::Buffer,
-    pub num_elements: u32,
-    pub material: usize,
-}

Material 很简单,它主要有一个名称字段和一个纹理字段。名称更多是被用于程序调试。我们的立方体模型实际上有 2 个纹理,但其中一个是法线贴图稍后 我们会介绍这些纹理。

说到纹理,我们还需在 resources.rs 中添加一个函数来加载 Texture

rust
pub async fn load_texture(
-    file_name: &str,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-) -> anyhow::Result<texture::Texture> {
-    let data = load_binary(file_name).await?;
-    texture::Texture::from_bytes(device, queue, &data, file_name)
-}

load_texture 函数在为模型加载纹理会很有用,因为include_bytes! 宏要求我们在编译阶段就指定文件名称并加载纹理数据到构建的程序包内,而我们希望模型纹理能根据需要动态加载。

Mesh 包含一个顶点缓冲区、一个索引缓冲区和网格中的索引数,material 字段被定义为 usize 类型,它将用于在绘制时索引 materials 列表。

完成上面这些后,我们就可以加载模型了:

rust
pub async fn load_model(
-    file_name: &str,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    layout: &wgpu::BindGroupLayout,
-) -> anyhow::Result<model::Model> {
-    let obj_text = load_string(file_name).await?;
-    let obj_cursor = Cursor::new(obj_text);
-    let mut obj_reader = BufReader::new(obj_cursor);
-
-    let (models, obj_materials) = tobj::load_obj_buf_async(
-        &mut obj_reader,
-        &tobj::LoadOptions {
-            triangulate: true,
-            single_index: true,
-            ..Default::default()
-        },
-        |p| async move {
-            let mat_text = load_string(&p).await.unwrap();
-            tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
-        },
-    )
-    .await?;
-
-    let mut materials = Vec::new();
-    for m in obj_materials? {
-        let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
-        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-            layout,
-            entries: &[
-                wgpu::BindGroupEntry {
-                    binding: 0,
-                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
-                },
-                wgpu::BindGroupEntry {
-                    binding: 1,
-                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
-                },
-            ],
-            label: None,
-        });
-
-        materials.push(model::Material {
-            name: m.name,
-            diffuse_texture,
-            bind_group,
-        })
-    }
-
-    let meshes = models
-        .into_iter()
-        .map(|m| {
-            let vertices = (0..m.mesh.positions.len() / 3)
-                .map(|i| model::ModelVertex {
-                    position: [
-                        m.mesh.positions[i * 3],
-                        m.mesh.positions[i * 3 + 1],
-                        m.mesh.positions[i * 3 + 2],
-                    ],
-                    tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
-                    normal: [
-                        m.mesh.normals[i * 3],
-                        m.mesh.normals[i * 3 + 1],
-                        m.mesh.normals[i * 3 + 2],
-                    ],
-                })
-                .collect::<Vec<_>>();
-
-            let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-                label: Some(&format!("{:?} Vertex Buffer", file_name)),
-                contents: bytemuck::cast_slice(&vertices),
-                usage: wgpu::BufferUsages::VERTEX,
-            });
-            let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-                label: Some(&format!("{:?} Index Buffer", file_name)),
-                contents: bytemuck::cast_slice(&m.mesh.indices),
-                usage: wgpu::BufferUsages::INDEX,
-            });
-
-            model::Mesh {
-                name: file_name.to_string(),
-                vertex_buffer,
-                index_buffer,
-                num_elements: m.mesh.indices.len() as u32,
-                material: m.mesh.material_id.unwrap_or(0),
-            }
-        })
-        .collect::<Vec<_>>();
-
-    Ok(model::Model { meshes, materials })
-}

渲染网格

在能够绘制完整模型之前,需要能绘制单个网格对象。让我们创建一个名为 DrawModel 的 trait,并为 RenderPass 实现它:

rust
// model.rs
-pub trait DrawModel<'a> {
-    fn draw_mesh(&mut self, mesh: &'a Mesh);
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        instances: Range<u32>,
-    );
-}
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_mesh(&mut self, mesh: &'b Mesh) {
-        self.draw_mesh_instanced(mesh, 0..1);
-    }
-
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        instances: Range<u32>,
-    ){
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-}

把这些函数放在 impl Model 中也是可以的,但我觉得让渲染通道做所有的渲染(准确地说,渲染通道只是编码所有的渲染命令)更加合理,因为这是它的工作。这也意味着在渲染时必须先导入 DrawModel trait:

rust
// lib.rs
-render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
-render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
-
-use model::DrawModel;
-render_pass.draw_mesh_instanced(&self.obj_model.meshes[0], 0..self.instances.len() as u32);

在开始绘制之前,需要实际加载模型并将其保存到 State 实例。请在 State::new() 中加入以下代码:

rust
let obj_model = resources::load_model(
-    "cube.obj",
-    &device,
-    &queue,
-    &texture_bind_group_layout,
-).await.unwrap();

我们的新模型比之前的五角星要大一些,所以需要调整一下实例间的间距:

rust
const SPACE_BETWEEN: f32 = 3.0;
-let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
-    (0..NUM_INSTANCES_PER_ROW).map(move |x| {
-        let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
-        let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
-
-        let position = glam::Vec3 { x, y: 0.0, z };
-
-        let rotation = if position.length().abs() <= std::f32::EPSILON {
-             glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
-        } else {
-            glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
-        };
-
-        Instance {
-            position, rotation,
-        }
-    })
-}).collect::<Vec<_>>();

完成上面这些后,运行项目你就能看到如下渲染效果:

cubes.png

使用正确的纹理

我们目前看到的是还是之前的树纹理,它显然不是 obj 文件里的纹理。正确的纹理应该是下边这个:

cube-diffuse.jpg

这其中的原因很简单:尽管我们已经创建了纹理,但还没有创建一个绑定组来给 RenderPass,使用的仍然是 diffuse_bind_group

如果想修正这一点,我们就需要使用材质绑定组--Material 结构体的 bind_group 字段。

现在,我们来给 DrawModel 添加一个材质参数:

rust
pub trait DrawModel<'a> {
-    fn draw_mesh(&mut self, mesh: &'a Mesh, material: &'a Material, camera_bind_group: &'a wgpu::BindGroup);
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        material: &'a Material,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-    );
-
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, camera_bind_group: &'b wgpu::BindGroup) {
-        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group);
-    }
-
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        material: &'b Material,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.set_bind_group(0, &material.bind_group, &[]);
-        self.set_bind_group(1, camera_bind_group, &[]);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-}

接下来修改渲染代码以使用正确的材质参数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-
-render_pass.set_pipeline(&self.render_pipeline);
-
-let mesh = &self.obj_model.meshes[0];
-let material = &self.obj_model.materials[mesh.material];
-render_pass.draw_mesh_instanced(mesh, material, 0..self.instances.len() as u32, &self.camera_bind_group);

全部修改完毕,就能看到如下渲染效果:

cubes-correct.png

渲染完整模型

上边的代码直接指定了网格和对应的材质。这对使用不同的材质绘制网格很有用。

我们还没有渲染模型的其他部分,让我们为 DrawModel 新增一个函数,它将绘制模型的所有网格和对应的材质:

rust
pub trait DrawModel<'a> {
-    // ...
-    fn draw_model(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
-    fn draw_model_instanced(
-        &mut self,
-        model: &'a Model,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a, {
-    // ...
-    fn draw_model(&mut self, model: &'b Model, camera_bind_group: &'b wgpu::BindGroup) {
-        self.draw_model_instanced(model, 0..1, camera_bind_group);
-    }
-
-    fn draw_model_instanced(
-        &mut self,
-        model: &'b Model,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            let material = &model.materials[mesh.material];
-            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group);
-        }
-    }
-}

lib.rs 中的代码也相应地修改一下以调用新的 draw_model_instanced 函数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.camera_bind_group);
`,74);function C(A,i,b,u,m,B){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[y,n(a,{example:"tutorial9_models"}),n(l)])}const f=p(D,[["render",C]]);export{g as __pageData,f as default}; diff --git a/assets/beginner_tutorial9-models_index.md.cc4e4508.lean.js b/assets/beginner_tutorial9-models_index.md.cc4e4508.lean.js deleted file mode 100644 index 7d82b7ad4..000000000 --- a/assets/beginner_tutorial9-models_index.md.cc4e4508.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/cubes.57cd381f.png",t="/learn-wgpu-zh/assets/cube-diffuse.989b7825.jpg",F="/learn-wgpu-zh/assets/cubes-correct.f1d8f512.png",g=JSON.parse('{"title":"模型加载","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial9-models/index.md","filePath":"beginner/tutorial9-models/index.md","lastUpdated":1701933923000}'),D={name:"beginner/tutorial9-models/index.md"},y=r("",74);function C(A,i,b,u,m,B){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[y,n(a,{example:"tutorial9_models"}),n(l)])}const f=p(D,[["render",C]]);export{g as __pageData,f as default}; diff --git a/assets/beginner_tutorial9-models_index.md.gSp6U_hw.js b/assets/beginner_tutorial9-models_index.md.gSp6U_hw.js new file mode 100644 index 000000000..cdff07202 --- /dev/null +++ b/assets/beginner_tutorial9-models_index.md.gSp6U_hw.js @@ -0,0 +1,377 @@ +import{_ as h,D as s,o as l,c as p,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/cubes.N5MdxQg4.png",e="/learn-wgpu-zh/assets/cube-diffuse.GqZGxA7U.jpg",r="/learn-wgpu-zh/assets/cubes-correct.kqVZNaXG.png",C=JSON.parse('{"title":"模型加载","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial9-models/index.md","filePath":"beginner/tutorial9-models/index.md","lastUpdated":1703303099000}'),E={name:"beginner/tutorial9-models/index.md"},d=k(`

模型加载

到目前为止,我们一直在手动创建模型。简单的模型当然可以这么干,但如果是有成千上万多边形的复杂模型,那就行不通了。因此,我们将修改代码以利用 .obj 模型格式,以便可以利用 Blender 等软件来创建模型并运用到项目中。

lib.rs 文件中堆砌的代码已经很多了,让我们创建一个 model.rs 文件来安置所有模型加载相关的代码:

rust
// model.rs
+pub trait Vertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a>;
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+pub struct ModelVertex {
+    pub position: [f32; 3],
+    pub tex_coords: [f32; 2],
+    pub normal: [f32; 3],
+}
+
+impl Vertex for ModelVertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        todo!();
+    }
+}

你会注意到这里有几点变化:

首先是 Vertex, 它在 lib.rs 中是一个结构体,而这里我们改为了 trait。我们会有多种顶点类型(模型、UI、实例数据等),Vertex 做为 trait 令我们能从其中抽象出 VertexBufferLayout 的创建函数,从而简化渲染管线的创建。

其次是 ModelVertex 中新增了 normal 字段。在讨论光照之前暂时不会用到它。

让我们来创建 VertexBufferLayout

rust
impl Vertex for ModelVertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    shader_location: 0,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+                    shader_location: 1,
+                    format: wgpu::VertexFormat::Float32x2,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
+                    shader_location: 2,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+            ],
+        }
+    }
+}

这与原来的 VertexBufferLayout 基本相同,只是为 normal 添加了一个 VertexAttribute。删除 lib.rs 中我们已不再需要的旧 Vertex 结构体,并在 RenderPipeline 中使用来自 model 的新 Vertex:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    vertex: wgpu::VertexState {
+        // ...
+        buffers: &[model::ModelVertex::desc(), InstanceRaw::desc()],
+    },
+    // ...
+});

由于 desc 接口是定义在 Vertex trait 上的,因此需要先导入 Vertex,然后才能调用到该接口的具体实现。只需将导入代码放在文件顶部:

rust
use model::Vertex;

现在,我们需要一个用于渲染的模型。你可以使用自己的模型,我这也提供了一个模型及其纹理的 zip 压缩包 。我们将新建一个与 src 目录同级的 res 目录来安置这个模型。

访问资源文件

cargo 在构建并运行程序时会设置一个当前工作目录,该目录通常就是放置了 Cargo.toml 文件的项目根目录。资源(res)目录的路径会因项目的结构而异。本节教程示例代码的资源目录位于 code/beginner/tutorial9-models/res/。我们加载模型时可以使用这个路径,仅需在路径后拼上 cube.obj。这似乎很完美,可一旦修改项目的目录结构,写在代码里的路径就不可用了。

所以,我们通过修改构建脚本,将 res 目录复制到 cargo 创建可执行文件的位置来解决此问题,然后再从那里引用资源文件。创建一个 build.rs(构建文件的默认名称)文件并添加以下代码:

rust
use anyhow::*;
+use fs_extra::copy_items;
+use fs_extra::dir::CopyOptions;
+use std::env;
+
+fn main() -> Result<()> {
+    // 这一行告诉 cargo 如果 /res/ 目录中的内容发生了变化,就重新运行脚本
+    println!("cargo:rerun-if-changed=res/*");
+
+    let out_dir = env::var("OUT_DIR")?;
+    let mut copy_options = CopyOptions::new();
+    copy_options.overwrite = true;
+    let mut paths_to_copy = Vec::new();
+    paths_to_copy.push("res/");
+    copy_items(&paths_to_copy, out_dir, &copy_options)?;
+
+    Ok(())
+}

确保将 build.rs 放在与 Cargo.toml 相同的目录中,只有这样,在项目构建时 cargo 才能此运行构建脚本。

OUT_DIR 是一个环境变量,cargo 用它来指定应用程序将在哪里构建。

还需修改 Cargo.toml 来让构建脚本能正常运行,在构建依赖[build-dependencies])配置里添加以下依赖项:

toml
[build-dependencies]
+anyhow = "1.0"
+fs_extra = "1.3"
+glob = "0.3"

从 WASM 访问文件

遵循 WASM 规范,你不能在 Web Assembly 中访问用户文件系统上的文件。所以,我们利用 web 服务来提供这些文件,然后使用 http 请求将文件加载 ​​ 到代码中。让我们创建一个名为 resources.rs 的文件来处理这个问题,创建两个函数分别用于加载文本文件和二进制文件:

rust
use std::io::{BufReader, Cursor};
+
+use cfg_if::cfg_if;
+use wgpu::util::DeviceExt;
+
+use crate::{model, texture};
+
+#[cfg(target_arch = "wasm32")]
+fn format_url(file_name: &str) -> reqwest::Url {
+    let window = web_sys::window().unwrap();
+    let location = window.location();
+    let base = reqwest::Url::parse(&format!(
+        "{}/{}/",
+        location.origin().unwrap(),
+        option_env!("RES_PATH").unwrap_or("res"),
+    )).unwrap();
+    base.join(file_name).unwrap()
+}
+
+pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
+    cfg_if! {
+        if #[cfg(target_arch = "wasm32")] {
+            let url = format_url(file_name);
+            let txt = reqwest::get(url)
+                .await?
+                .text()
+                .await?;
+        } else {
+            let path = std::path::Path::new(env!("OUT_DIR"))
+                .join("res")
+                .join(file_name);
+            let txt = std::fs::read_to_string(path)?;
+        }
+    }
+
+    Ok(txt)
+}
+
+pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
+    cfg_if! {
+        if #[cfg(target_arch = "wasm32")] {
+            let url = format_url(file_name);
+            let data = reqwest::get(url)
+                .await?
+                .bytes()
+                .await?
+                .to_vec();
+        } else {
+            let path = std::path::Path::new(env!("OUT_DIR"))
+                .join("res")
+                .join(file_name);
+            let data = std::fs::read(path)?;
+        }
+    }
+
+    Ok(data)
+}

桌面环境里,我们是使用 OUT_DIR 环境变量来访问资源目录。

在 WASM 环境里,我们使用了 reqwest 来处理网络请求。需将以下依赖项添加到 Cargo.toml:

toml
[target.'cfg(target_arch = "wasm32")'.dependencies]
+# Other dependencies
+reqwest = { version = "0.11" }

还需要将 Location 功能添加到 web-sys 的 features 数组里:

toml
web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+    "Location",
+]}

确保 resources 作为模块已添加到 lib.rs 中:

rust
mod resources;

使用 TOBJ 加载模型

加载模型是使用的 tobj 。让我们将其添加到 Cargo.toml 中:

toml
[dependencies]
+# other dependencies...
+tobj = { version = "3.2.1", features = [
+    "async",
+]}

在加载模型之前,我们需要有一个结构体来存放模型数据:

rust
// model.rs
+pub struct Model {
+    pub meshes: Vec<Mesh>,
+    pub materials: Vec<Material>,
+}

Model 结构体中 meshesmaterials 两个字段都是动态数组类型。这很重要,因为一个 obj 文件可以包含多个网格材质。下面我们接着来创建 MeshMaterial 结构体:

rust
pub struct Material {
+    pub name: String,
+    pub diffuse_texture: texture::Texture,
+    pub bind_group: wgpu::BindGroup,
+}
+
+pub struct Mesh {
+    pub name: String,
+    pub vertex_buffer: wgpu::Buffer,
+    pub index_buffer: wgpu::Buffer,
+    pub num_elements: u32,
+    pub material: usize,
+}

Material 很简单,它主要有一个名称字段和一个纹理字段。名称更多是被用于程序调试。我们的立方体模型实际上有 2 个纹理,但其中一个是法线贴图稍后 我们会介绍这些纹理。

说到纹理,我们还需在 resources.rs 中添加一个函数来加载 Texture

rust
pub async fn load_texture(
+    file_name: &str,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+) -> anyhow::Result<texture::Texture> {
+    let data = load_binary(file_name).await?;
+    texture::Texture::from_bytes(device, queue, &data, file_name)
+}

load_texture 函数在为模型加载纹理会很有用,因为include_bytes! 宏要求我们在编译阶段就指定文件名称并加载纹理数据到构建的程序包内,而我们希望模型纹理能根据需要动态加载。

Mesh 包含一个顶点缓冲区、一个索引缓冲区和网格中的索引数,material 字段被定义为 usize 类型,它将用于在绘制时索引 materials 列表。

完成上面这些后,我们就可以加载模型了:

rust
pub async fn load_model(
+    file_name: &str,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    layout: &wgpu::BindGroupLayout,
+) -> anyhow::Result<model::Model> {
+    let obj_text = load_string(file_name).await?;
+    let obj_cursor = Cursor::new(obj_text);
+    let mut obj_reader = BufReader::new(obj_cursor);
+
+    let (models, obj_materials) = tobj::load_obj_buf_async(
+        &mut obj_reader,
+        &tobj::LoadOptions {
+            triangulate: true,
+            single_index: true,
+            ..Default::default()
+        },
+        |p| async move {
+            let mat_text = load_string(&p).await.unwrap();
+            tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
+        },
+    )
+    .await?;
+
+    let mut materials = Vec::new();
+    for m in obj_materials? {
+        let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
+        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+            layout,
+            entries: &[
+                wgpu::BindGroupEntry {
+                    binding: 0,
+                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
+                },
+                wgpu::BindGroupEntry {
+                    binding: 1,
+                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
+                },
+            ],
+            label: None,
+        });
+
+        materials.push(model::Material {
+            name: m.name,
+            diffuse_texture,
+            bind_group,
+        })
+    }
+
+    let meshes = models
+        .into_iter()
+        .map(|m| {
+            let vertices = (0..m.mesh.positions.len() / 3)
+                .map(|i| model::ModelVertex {
+                    position: [
+                        m.mesh.positions[i * 3],
+                        m.mesh.positions[i * 3 + 1],
+                        m.mesh.positions[i * 3 + 2],
+                    ],
+                    tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
+                    normal: [
+                        m.mesh.normals[i * 3],
+                        m.mesh.normals[i * 3 + 1],
+                        m.mesh.normals[i * 3 + 2],
+                    ],
+                })
+                .collect::<Vec<_>>();
+
+            let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+                label: Some(&format!("{:?} Vertex Buffer", file_name)),
+                contents: bytemuck::cast_slice(&vertices),
+                usage: wgpu::BufferUsages::VERTEX,
+            });
+            let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+                label: Some(&format!("{:?} Index Buffer", file_name)),
+                contents: bytemuck::cast_slice(&m.mesh.indices),
+                usage: wgpu::BufferUsages::INDEX,
+            });
+
+            model::Mesh {
+                name: file_name.to_string(),
+                vertex_buffer,
+                index_buffer,
+                num_elements: m.mesh.indices.len() as u32,
+                material: m.mesh.material_id.unwrap_or(0),
+            }
+        })
+        .collect::<Vec<_>>();
+
+    Ok(model::Model { meshes, materials })
+}

渲染网格

在能够绘制完整模型之前,需要能绘制单个网格对象。让我们创建一个名为 DrawModel 的 trait,并为 RenderPass 实现它:

rust
// model.rs
+pub trait DrawModel<'a> {
+    fn draw_mesh(&mut self, mesh: &'a Mesh);
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        instances: Range<u32>,
+    );
+}
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_mesh(&mut self, mesh: &'b Mesh) {
+        self.draw_mesh_instanced(mesh, 0..1);
+    }
+
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        instances: Range<u32>,
+    ){
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+}

把这些函数放在 impl Model 中也是可以的,但我觉得让渲染通道做所有的渲染(准确地说,渲染通道只是编码所有的渲染命令)更加合理,因为这是它的工作。这也意味着在渲染时必须先导入 DrawModel trait:

rust
// lib.rs
+render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
+render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
+
+use model::DrawModel;
+render_pass.draw_mesh_instanced(&self.obj_model.meshes[0], 0..self.instances.len() as u32);

在开始绘制之前,需要实际加载模型并将其保存到 State 实例。请在 State::new() 中加入以下代码:

rust
let obj_model = resources::load_model(
+    "cube.obj",
+    &device,
+    &queue,
+    &texture_bind_group_layout,
+).await.unwrap();

我们的新模型比之前的五角星要大一些,所以需要调整一下实例间的间距:

rust
const SPACE_BETWEEN: f32 = 3.0;
+let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
+    (0..NUM_INSTANCES_PER_ROW).map(move |x| {
+        let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
+        let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
+
+        let position = glam::Vec3 { x, y: 0.0, z };
+
+        let rotation = if position.length().abs() <= std::f32::EPSILON {
+             glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
+        } else {
+            glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
+        };
+
+        Instance {
+            position, rotation,
+        }
+    })
+}).collect::<Vec<_>>();

完成上面这些后,运行项目你就能看到如下渲染效果:

cubes.png

使用正确的纹理

我们目前看到的是还是之前的树纹理,它显然不是 obj 文件里的纹理。正确的纹理应该是下边这个:

cube-diffuse.jpg

这其中的原因很简单:尽管我们已经创建了纹理,但还没有创建一个绑定组来给 RenderPass,使用的仍然是 diffuse_bind_group

如果想修正这一点,我们就需要使用材质绑定组--Material 结构体的 bind_group 字段。

现在,我们来给 DrawModel 添加一个材质参数:

rust
pub trait DrawModel<'a> {
+    fn draw_mesh(&mut self, mesh: &'a Mesh, material: &'a Material, camera_bind_group: &'a wgpu::BindGroup);
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        material: &'a Material,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+    );
+
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, camera_bind_group: &'b wgpu::BindGroup) {
+        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group);
+    }
+
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        material: &'b Material,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.set_bind_group(0, &material.bind_group, &[]);
+        self.set_bind_group(1, camera_bind_group, &[]);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+}

接下来修改渲染代码以使用正确的材质参数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+
+render_pass.set_pipeline(&self.render_pipeline);
+
+let mesh = &self.obj_model.meshes[0];
+let material = &self.obj_model.materials[mesh.material];
+render_pass.draw_mesh_instanced(mesh, material, 0..self.instances.len() as u32, &self.camera_bind_group);

全部修改完毕,就能看到如下渲染效果:

cubes-correct.png

渲染完整模型

上边的代码直接指定了网格和对应的材质。这对使用不同的材质绘制网格很有用。

我们还没有渲染模型的其他部分,让我们为 DrawModel 新增一个函数,它将绘制模型的所有网格和对应的材质:

rust
pub trait DrawModel<'a> {
+    // ...
+    fn draw_model(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
+    fn draw_model_instanced(
+        &mut self,
+        model: &'a Model,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a, {
+    // ...
+    fn draw_model(&mut self, model: &'b Model, camera_bind_group: &'b wgpu::BindGroup) {
+        self.draw_model_instanced(model, 0..1, camera_bind_group);
+    }
+
+    fn draw_model_instanced(
+        &mut self,
+        model: &'b Model,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            let material = &model.materials[mesh.material];
+            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group);
+        }
+    }
+}

lib.rs 中的代码也相应地修改一下以调用新的 draw_model_instanced 函数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.camera_bind_group);
`,74);function g(y,F,c,b,u,m){const a=s("WasmExample"),n=s("AutoGithubLink");return l(),p("div",null,[d,i(a,{example:"tutorial9_models"}),i(n)])}const D=h(E,[["render",g]]);export{C as __pageData,D as default}; diff --git a/assets/beginner_tutorial9-models_index.md.gSp6U_hw.lean.js b/assets/beginner_tutorial9-models_index.md.gSp6U_hw.lean.js new file mode 100644 index 000000000..56c7a647c --- /dev/null +++ b/assets/beginner_tutorial9-models_index.md.gSp6U_hw.lean.js @@ -0,0 +1 @@ +import{_ as h,D as s,o as l,c as p,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/cubes.N5MdxQg4.png",e="/learn-wgpu-zh/assets/cube-diffuse.GqZGxA7U.jpg",r="/learn-wgpu-zh/assets/cubes-correct.kqVZNaXG.png",C=JSON.parse('{"title":"模型加载","description":"","frontmatter":{},"headers":[],"relativePath":"beginner/tutorial9-models/index.md","filePath":"beginner/tutorial9-models/index.md","lastUpdated":1703303099000}'),E={name:"beginner/tutorial9-models/index.md"},d=k("",74);function g(y,F,c,b,u,m){const a=s("WasmExample"),n=s("AutoGithubLink");return l(),p("div",null,[d,i(a,{example:"tutorial9_models"}),i(n)])}const D=h(E,[["render",g]]);export{C as __pageData,D as default}; diff --git a/assets/beginner_wgsl.md.44PkYt5K.js b/assets/beginner_wgsl.md.44PkYt5K.js new file mode 100644 index 000000000..defeb531f --- /dev/null +++ b/assets/beginner_wgsl.md.44PkYt5K.js @@ -0,0 +1,254 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const c=JSON.parse('{"title":"WGSL 着色器语言","description":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比","frontmatter":{"head":[["meta",{"name":"description","content":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比"}],["meta",{"name":"keywords","content":"WGSL GLSL Shader WebGPU wgpu"}]]},"headers":[],"relativePath":"beginner/wgsl.md","filePath":"beginner/wgsl.md","lastUpdated":1703303099000}'),l={name:"beginner/wgsl.md"},h=n(`

WGSL 着色器语言

WGSL 的来由

WebGPU 的目标是要在各个现代底层图形 API 之上抽象出一套统一的图形 API,而每个底层图形 API 后端都有自己的着色语言:

WGSL (WebGPU Shading Language) 出现之前,很多开发者或团队是通过宏及各种转译工具来将自己的着色器编译到不同目标平台的,他们自然是希望有一个标准化的统一语言。

WebGPU 成员花了 2 年半的时间来争论 WebGPU 是否应该有自己的着色语言。kvark 将这场争论中的核心论点组成了一张流图,它是 SVG 格式的,支持在网页中无损放大查看。

WGSL 的目标不是要与 GLSL 兼容,它是对现代着色器语言的重新设计。

2020 年 4 月 27 日,WGSL 标准有了第一次提交。自此开始,wgpu 和 dawn 都摆脱了对 shaderc 之类复杂繁重的着色器转译工具的依赖。wgpu 里使用的 WGSL 转译工具叫 naga, kvark 有一篇博客(Shader translation benchmark)对比了 naga 相比于其它转译工具的性能优化,总体来说,有 10 倍以上的性能优势。

2023 年之前,WGSL 的学习资源不多,唯一好的参考是 WGSL 规范,但它是对语言实现细节的规范,对普通用户来说有点难以理解。 我从 2018 年开始使用 wgpu (那时还是 使用 GLSL 做为着色器语言),2021 年底完成了个人作品 字习 Pro 及其他几个练手作品从 GLSL 到 WGSL 的 100 多个着色器的移植工作,在这个过程中对这两个着色器语言有了比较深入的了解。这个增补章节旨在介绍 WGSL 的一些基础知识,希望这对从 OpenGL / WebGL 迁移到 WebGPU 的朋友带来一点有益的经验(下边的所有 GLSL 代码均是按照 GLSL450 标准编写的)。

增补两个网上新出现的学习资源:

Tour of WGSLcompute.toys

一个简单的绘制着色器:对比 GLSL

GLSL 的绘制着色器:

rust
// 顶点着色器文件
+layout(location = 0) in vec3 position;
+layout(location = 1) in vec2 texcoord;
+layout(location = 0) out vec2 uv;
+
+layout(set = 0, binding = 0) uniform UniformParams {
+    mat4 mvp_matrix;
+    vec3 tint_color;
+};
+
+void main() {
+    gl_Position = mvp_matrix * vec4(position, 1.0);
+    uv = texcoord;
+}
+
+// 片元着色器文件
+layout(location = 0) in vec2 uv;
+layout(location = 0) out vec4 frag_color;
+
+layout(set = 0, binding = 0) uniform UniformParams {
+    mat4 mvp_matrix;
+    vec3 tint_color;
+};
+layout(set = 0, binding = 1) uniform texture2D textureFront;
+layout(set = 0, binding = 2) uniform sampler samplerFront;
+
+void main(void) {
+  vec4 front = texture(sampler2D(textureFront, samplerFront), uv);
+  frag_color = front * vec4(tint_color.rgb, 1.0);;
+}

下边是使用 WGSL 的等价实现,在 WGSL 中,我们通常将顶点着色器与片元着色器写在同一个文件中:

rust
struct VertexOutput {
+    @location(0) uv: vec2f,
+    @builtin(position) position: vec4f,
+};
+
+struct UniformParams {
+    mvp: mat4x4f,
+	tint_color: vec3f,
+};
+
+@group(0) @binding(0) var<uniform> params: UniformParams;
+
+@vertex
+fn vs_main(@location(0) pos: vec3f, @location(1) uv: vec2f) -> VertexOutput {
+    var out: VertexOutput;
+    out.position = params.mvp * vec4f(pos, 1.0);
+    out.uv = uv;
+    return out;
+}
+
+@group(0) @binding(1) var texture_front: texture_2d<f32>;
+@group(0) @binding(2) var sampler_front: sampler;
+
+@fragment
+fn fs_main(input: VertexOutput) -> @location(0) vec4f {
+    let front = textureSample(texture_front, sampler_front, input.uv);
+    return front * vec4f(params.tintColor, 1.0);
+}

计算着色器:继续对比 GLSL

GLSL 的计算着色器, 实现在 x 轴上的高斯模糊:

rust
layout(local_size_x = 16, local_size_y = 16) in;
+
+layout(set = 0, binding = 0) uniform InfoParams {
+  ivec2 img_size;
+};
+layout(set = 0, binding = 1) uniform readonly image2D src_pic;
+layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
+
+const float WEIGHT[5] = float[](0.2, 0.1, 0.10, 0.1, 0.1);
+
+void main() {
+  ivec2 uv = ivec2(gl_GlobalInvocationID.xy);
+  if (uv.x > info.x || uv.y > info.y) {
+    return;
+  }
+
+  vec4 temp = imageLoad(src_pic, uv) * WEIGHT[0];
+  ivec2 uvMax: vec2<i32> = img_size - 1;
+  for (int i = 1; i < 5; i += 1) {
+    ivec2 offset_uv = ivec2(1.0, 0) * i;
+    temp += imageLoad(src_pic, clamp(uv + offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
+    temp += imageLoad(src_pic, clamp(uv - offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
+  }
+  imageStore(swap_pic, uv, temp);
+}
WebGL 2.0 并不支持计算着色器,所以上面的 GLSL 计算着色器只能在 Native 端使用。

WGSL 版本的对等实现:

rust
struct InfoParams {
+  img_size: vec2<i32>,
+};
+
+@group(0) @binding(0) var<uniform> params: InfoParams;
+@group(0) @binding(1) var src_pic: texture_2d<f32>;
+@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;
+
+let WEIGHT: array<f32, 5> = array<f32, 5>(0.2, 0.1, 0.10, 0.1, 0.1);
+
+@compute @workgroup_size(16, 16)
+fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
+  let uv = vec2<i32>(global_id.xy);
+  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
+    return;
+  }
+
+  var temp = textureLoad(src_pic, uv, 0) * WEIGHT[0];
+  let uvMax: vec2<i32> = img_size - 1;
+  for (var i: i32 = 1; i <= 4; i += 1) {
+    var uvOffset = vec2<i32>(3, 0) * i;
+    temp += textureLoad(src_pic, clamp(uv + uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
+    temp += textureLoad(src_pic, clamp(uv - uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
+  }
+  textureStore(swap_pic, uv, temp);
+}

你应该注意到了很多差异,比如:

总体上 WGSL 代码要比 GLSL 明晰得多。这是 WGSL 的一大优点,几乎所有内容都具有明确的自说明特性。 下边我们来深入了解一些关键区别。

入口点

WGSL 没有强制使用固定的 main() 函数作为入口点Entry Point),它通过 @vertex@fragment@compute 三个着色器阶段(Shader State)标记提供了足够的灵活性让开发人员能更好的组织着色器代码。你可以给入口点取任意函数名,只要不重名,还能将所有阶段(甚至是不同着色器的同一个阶段)的代码组织同一个文件中:

rust
// 顶点着色器入口点
+@vertex
+fn vs_main() {}
+
+// 片元着色器入口点
+@fragment
+fn fs_main() -> @location(X) vec4f{}
+
+// 计算着色器入口点
+@compute
+fn cs_main() {}

工作组

计算着色器中,一个工作组(Workgroup)就是一组调用,它们同时执行一个计算着色器阶段入口点,并共享对工作组地址空间中着色器变量的访问。可以将工作组理解为一个三维网格,我们通过(x, y, z)三个维度来声明当前计算着色器的工作组大小,每个维度上的默认值都是 1。

WGSL 声明工作组大小的语法相比 GLSL 简洁明了:

rust
// GLSL
+layout(local_size_x = 16, local_size_y = 16) in;
+
+// WGSL
+@workgroup_size(16, 16) // x = 16, y = 16, z = 1
+@workgroup_size(16)     // x = 16, y = 1, z = 1

Group 与 Binding 属性

WGSL 中每个资源都使用了 @group(X)@binding(X) 属性标记,例如 @group(0) @binding(0) var<uniform> params: UniformParams 它表示的是 Uniform buffer 对应于哪个绑定组中的哪个绑定槽(对应于 wgpu API 调用)。这与 GLSL 中的 layout(set = X, binding = X) 布局标记类似。WGSL 的属性非常明晰,描述了着色器阶段到结构的精确二进制布局的所有内容。

变量声明

WGSL 对于基于显式类型的 var 的变量声明有不同的语法。

rust
// GLSL:
+lowp vec4 color;
+// 或者,也可以不使用精度说明符
+vec4 color;
+
+// WGSL:
+var color: vec4f;

WGSL 没有像 lowp 这样的精度说明符, 而是显式指定具体类型,例如 f32(32 位浮点数)。如果要使用 f16 类型,需要在你的 WebGPU 程序中开启 shader-f16 扩展(wgpu 中目前已经加入了此扩展,但是 naga 中还没有完全实现对 f16 的支持)。

WGSL 支持自动类型推断。因此,如果在声明变量的同时进行赋值,就不必指定类型:

rust
// 显式指定变量类型声明
+var color: vec4f = vec4f(1.0, 0.0, 0.0, 1.0);
+
+// 省略类型声明,变量类型将在编译时自动推断得出
+var color = vec4f(1.0, 0.0, 0.0, 1.0);

WGSL 中的 var let 关键字与 Swift 语言一样:

结构体

在 WGSL 中,结构体(struct)用于表示 Unoform 及 Storage 缓冲区以及着色器的输入和输出。Unoform 缓冲区与 GLSL 类似,Storage 缓冲区虽然也在 GLSL 中存在等价物,但是 WebGL 2.0 并不支持。

WGSL 结构体字段对齐规则也与 GLSL 几乎一致,想要了解更多细节,可查看 WGSL 规范中的字节对齐规则示例

rust
// GLSL
+layout(set = 0, binding = 0) uniform UniformParams {
+    mat4 mvp_matrix;
+    vec3 tint_color;
+};
+// ...
+gl_Position = mvp_matrix * vec4(position, 1.0);
+
+
+// WGSL
+struct UniformParams {
+    mvp: mat4x4f,
+	tint_color: vec3f,
+};
+@group(0) @binding(0) var<uniform> params: UniformParams;
+// ...
+out.position = params.mvp * vec4f(pos, 1.0);

注意到上面 Unoform 缓冲区在声明及使用上的两个区别了吗?

  1. WGSL 需要先定义结构体然后才能声明绑定,而 GLSL 可以在声明绑定的同时定义(当然也支持先定义);
  2. WGSL 里需要用声明的变量来访问结构体字段,而 GLSL 里是直接使用结构体中的字段;

WGSL 的输入和输出结构体比较独特,在 GLSL 中没有对应物。入口函数接受输入结构,返回输出结构,并且结构体的所有字段都有 location(X) 属性注释。 如果只有单个输入或输出,那使用结构体就是可选的。

这种明确定义输入和输出的方式,使得 WGSL 的代码逻辑更加清晰,明显优于在 GLSL 中给魔法变量赋值的方式。

下边是一个顶点着色器的输出结构体(同时它也是对应的片元着色器的输入结构体):

rust
struct VertexOutput {
+    @location(0) uv: vec2f,
+    @builtin(position) position: vec4f,
+};

WGSL 不再需要像 GLSL 一样,在顶点着色器中定义完输出字段后,再到片元着色器中定义相应的输入字段。

函数语法

WGSL 函数语法与 Rust 一致, 而 GLSL 是类 C 语法。一个简单的 add 函数如下:

rust
// GLSL
+float add(float a, float b) {
+    return a + b;
+}
+
+// WGSL
+fn add(a: f32, b: f32) -> f32 {
+	return a + b;
+}

纹理

采样纹理

WGSL 中采样纹理总是要指定纹素(Texel)的数据类型 texture_2d<T>texture_3d<T>texture_cube<T>texture_cube_array<T>(T 必须是 f32、i32、u32 这三种类型之一),而 GLSL 中是没有纹素类型信息的,只有查看使用此着色器的程序源码才能知道:

rust
// GLSL
+layout(set = 0, binding = 1) uniform texture2D texture_front;
+
+// WGSL
+@group(0) @binding(1) var texture_front: texture_2d<f32>;

Storage 纹理

WGSL 中存储纹理的数据类型为 texture_storage_XX<T, access>, 而 GLSL 中没有明确的存储纹理类型,如果需要当做存储纹理使用,就需要在 layout(...) 中标记出纹素格式:

rust
// GLSL
+layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
+
+// WGSL
+@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;

在目前的 WebGPU 标准中, 存储纹理的 access 只能为 write(只写), wgpu 能在 native 中支持 read_write(可读可写)。

更多 WGSL 语法细节

三元运算符

GLSL 支持三元运算符 ? : , WGSL 并不直接支持,但提供了内置函数 select(falseValue,trueValue,condition)

rust
// GLSL
+int n = isTrue ? 1 : 0;
+
+// WGSL
+let n: i32 = select(0, 1, isTrue);

花括号

WGSL 中的 if else 语法不能省略大括号(与 Rust 及 Swift 语言一样):

rust
// GLSL
+if (gray > 0.2) n = 65600;
+
+// WGSL
+if (gray > 0.2) { n = 65600; }

求模运算

GLSL 中我们使用 mod 函数做求模运算,WGSL 中有一个长得类似的函数 modf, 但它的功能是将传入参数分割为小数与整数两部分。在 WGSL 中需要使用 % 运算符来求模, 且 mod% 的工作方式还略有不同, mod 内部使用的是 floor (x - y * floor(x / y)), 而 % 内部使用的是 trunc (x - y * trunc(x / y)):

rust
// GLSL
+float n = mod(x, y);
+
+// WGSL
+let n = x % y;

着色器预处理

听到过很多人抱怨 WGSL 不提供预处理器,但其实所有的着色器语言都不自己提供预处理,只是我们可能已经习惯了使用已经封装好预处理逻辑的框架。

其实自己写一个预处理逻辑也是非常简单的事,有两种实现预处理的机制:

  1. 着色器被调用时实时预处理(对运行时性能会产生负影响);
  2. 利用 build.rs 在程序编译阶段预处理,并磁盘上生成预处理后的文件;

这两种实现方式的代码逻辑其实是一样的,仅仅是预处理的时机不同。

下边是一个需要预处理的实现了边缘检测的片元着色器:

rust
///#include "common/group0+vs.wgsl"
+
+///#include "func/edge_detection.wgsl"
+
+@fragment
+fn fs_main(vertex: VertexOutput) -> @location(0) vec4f {
+    let color = textureSample(tex, tex_sampler, vertex.uv);
+    return vec4f(edge_detection(length(color.rgb), 0.125));
+}

///#include 后面的路径分指向的是 commonfunc 目录下已经实现好的通用顶点着色器与边缘检测函数,我们现在按第 2 种机制实现一个简单的预处理来自动将顶点着色器及边缘检测函数包含进来:

rust
const WGSL_FOLDER: &'static str = "../wgsl_preprocessed";
+const INCLUDE: &tatic str = "///#include ";
+
+fn main() -> Result<(), Box<dyn Error>> {
+    // 这一行告诉 cargo 如果 /wgsl/ 目录中的内容发生了变化,就重新运行脚本
+    println!("cargo:rerun-if-changed=/../wgsl/*");
+
+    // 需要预处理的着色器数组(当然,更好的方式是读取并遍历待处理文件夹)
+    let shader_files = vec!["edge_detection"];
+
+    // 创建预处理后着色器的存放目录
+    std::fs::create_dir_all(WGSL_FOLDER)?;
+    for name in shader_files {
+        let _ = regenerate_shader(name);
+    }
+    Ok(())
+}
+
+fn regenerate_shader(shader_name: &str) -> Result<(), Box<dyn Error>> {
+    let base_dir = env!("CARGO_MANIFEST_DIR");
+    let path = PathBuf::from(&base_dir)
+        .join("../wgsl")
+        .join(format!("{}.wgsl", shader_name));
+    let mut out_path = WGSL_FOLDER.to_string();
+    out_path += &format!("/{}.wgsl", shader_name.replace("/", "_"));
+
+    let code = match read_to_string(&path) {
+        Ok(code) => code,
+        Err(e) => {
+            panic!("无法读取 {:?}: {:?}", path, e)
+        }
+    };
+
+    let mut shader_source = String::new();
+    parse_shader_source(&code, &mut shader_source, &base_dir);
+
+    let mut f = std::fs::File::create(&std::path::Path::new(&base_dir).join(&out_path))?;
+    f.write_all(shader_source.as_bytes())?;
+
+    Ok(())
+}
+
+fn parse_shader_source(source: &str, output: &mut String, base_path: &str) {
+    for line in source.lines() {
+        if line.starts_with(INCLUDE) {
+            // 支持一次 include 多个外部着色器文件,文件路径之间用 , 号分割
+            let imports = line[INCLUDE.len()..].split(',');
+            // 遍历所有待导入的文件,递归处理导入的代码里还包括导入的情况
+            for import in imports {
+                if let Some(include) = get_include_code(import, base_path) {
+                    parse_shader_source(&include, output, base_path);
+                } else {
+                    println!("无法找到要导入的着色器文件: {}", import);
+                }
+            }
+        }
+    }
+}
+
+fn get_include_code(key: &str, base_path: &str) -> Option<String> {
+    let path = PathBuf::from(base_path)
+        .join("../wgsl")
+        .join(key.replace('"', ""));
+    let shader = match read_to_string(&path) {
+        Ok(code) => code,
+        Err(e) => panic!("无法读取 {:?}: {:?}", path, e),
+    };
+    Some(shader)
+}

上面的几十行代码就是一套完整的预处理逻辑,它在每次程序编译时自动检查 wgsl/ 目录下的待处理着色器有没有发生变化,如果有变化,就重新处理并在 wgsl_preprocessed/ 目录下写入一个同名的处理后的着色器。

`,84),p=[h];function k(t,e,r,E,d,g){return i(),a("div",null,p)}const F=s(l,[["render",k]]);export{c as __pageData,F as default}; diff --git a/assets/beginner_wgsl.md.44PkYt5K.lean.js b/assets/beginner_wgsl.md.44PkYt5K.lean.js new file mode 100644 index 000000000..5d8c9784c --- /dev/null +++ b/assets/beginner_wgsl.md.44PkYt5K.lean.js @@ -0,0 +1 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const c=JSON.parse('{"title":"WGSL 着色器语言","description":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比","frontmatter":{"head":[["meta",{"name":"description","content":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比"}],["meta",{"name":"keywords","content":"WGSL GLSL Shader WebGPU wgpu"}]]},"headers":[],"relativePath":"beginner/wgsl.md","filePath":"beginner/wgsl.md","lastUpdated":1703303099000}'),l={name:"beginner/wgsl.md"},h=n("",84),p=[h];function k(t,e,r,E,d,g){return i(),a("div",null,p)}const F=s(l,[["render",k]]);export{c as __pageData,F as default}; diff --git a/assets/beginner_wgsl.md.a349b4fd.js b/assets/beginner_wgsl.md.a349b4fd.js deleted file mode 100644 index 6c232ec44..000000000 --- a/assets/beginner_wgsl.md.a349b4fd.js +++ /dev/null @@ -1,254 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const i=JSON.parse('{"title":"WGSL 着色器语言","description":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比","frontmatter":{"head":[["meta",{"name":"description","content":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比"}],["meta",{"name":"keywords","content":"WGSL GLSL Shader WebGPU wgpu"}]]},"headers":[],"relativePath":"beginner/wgsl.md","filePath":"beginner/wgsl.md","lastUpdated":1701933923000}'),p={name:"beginner/wgsl.md"},o=l(`

WGSL 着色器语言

WGSL 的来由

WebGPU 的目标是要在各个现代底层图形 API 之上抽象出一套统一的图形 API,而每个底层图形 API 后端都有自己的着色语言:

WGSL (WebGPU Shading Language) 出现之前,很多开发者或团队是通过宏及各种转译工具来将自己的着色器编译到不同目标平台的,他们自然是希望有一个标准化的统一语言。

WebGPU 成员花了 2 年半的时间来争论 WebGPU 是否应该有自己的着色语言。kvark 将这场争论中的核心论点组成了一张流图,它是 SVG 格式的,支持在网页中无损放大查看。

WGSL 的目标不是要与 GLSL 兼容,它是对现代着色器语言的重新设计。

2020 年 4 月 27 日,WGSL 标准有了第一次提交。自此开始,wgpu 和 dawn 都摆脱了对 shaderc 之类复杂繁重的着色器转译工具的依赖。wgpu 里使用的 WGSL 转译工具叫 naga, kvark 有一篇博客(Shader translation benchmark)对比了 naga 相比于其它转译工具的性能优化,总体来说,有 10 倍以上的性能优势。

2023 年之前,WGSL 的学习资源不多,唯一好的参考是 WGSL 规范,但它是对语言实现细节的规范,对普通用户来说有点难以理解。 我从 2018 年开始使用 wgpu (那时还是 使用 GLSL 做为着色器语言),2021 年底完成了个人作品 字习 Pro 及其他几个练手作品从 GLSL 到 WGSL 的 100 多个着色器的移植工作,在这个过程中对这两个着色器语言有了比较深入的了解。这个增补章节旨在介绍 WGSL 的一些基础知识,希望这对从 OpenGL / WebGL 迁移到 WebGPU 的朋友带来一点有益的经验(下边的所有 GLSL 代码均是按照 GLSL450 标准编写的)。

增补两个网上新出现的学习资源:

Tour of WGSLcompute.toys

一个简单的绘制着色器:对比 GLSL

GLSL 的绘制着色器:

rust
// 顶点着色器文件
-layout(location = 0) in vec3 position;
-layout(location = 1) in vec2 texcoord;
-layout(location = 0) out vec2 uv;
-
-layout(set = 0, binding = 0) uniform UniformParams {
-    mat4 mvp_matrix;
-    vec3 tint_color;
-};
-
-void main() {
-    gl_Position = mvp_matrix * vec4(position, 1.0);
-    uv = texcoord;
-}
-
-// 片元着色器文件
-layout(location = 0) in vec2 uv;
-layout(location = 0) out vec4 frag_color;
-
-layout(set = 0, binding = 0) uniform UniformParams {
-    mat4 mvp_matrix;
-    vec3 tint_color;
-};
-layout(set = 0, binding = 1) uniform texture2D textureFront;
-layout(set = 0, binding = 2) uniform sampler samplerFront;
-
-void main(void) {
-  vec4 front = texture(sampler2D(textureFront, samplerFront), uv);
-  frag_color = front * vec4(tint_color.rgb, 1.0);;
-}

下边是使用 WGSL 的等价实现,在 WGSL 中,我们通常将顶点着色器与片元着色器写在同一个文件中:

rust
struct VertexOutput {
-    @location(0) uv: vec2f,
-    @builtin(position) position: vec4f,
-};
-
-struct UniformParams {
-    mvp: mat4x4f,
-	tint_color: vec3f,
-};
-
-@group(0) @binding(0) var<uniform> params: UniformParams;
-
-@vertex
-fn vs_main(@location(0) pos: vec3f, @location(1) uv: vec2f) -> VertexOutput {
-    var out: VertexOutput;
-    out.position = params.mvp * vec4f(pos, 1.0);
-    out.uv = uv;
-    return out;
-}
-
-@group(0) @binding(1) var texture_front: texture_2d<f32>;
-@group(0) @binding(2) var sampler_front: sampler;
-
-@fragment
-fn fs_main(input: VertexOutput) -> @location(0) vec4f {
-    let front = textureSample(texture_front, sampler_front, input.uv);
-    return front * vec4f(params.tintColor, 1.0);
-}

计算着色器:继续对比 GLSL

GLSL 的计算着色器, 实现在 x 轴上的高斯模糊:

rust
layout(local_size_x = 16, local_size_y = 16) in;
-
-layout(set = 0, binding = 0) uniform InfoParams {
-  ivec2 img_size;
-};
-layout(set = 0, binding = 1) uniform readonly image2D src_pic;
-layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
-
-const float WEIGHT[5] = float[](0.2, 0.1, 0.10, 0.1, 0.1);
-
-void main() {
-  ivec2 uv = ivec2(gl_GlobalInvocationID.xy);
-  if (uv.x > info.x || uv.y > info.y) {
-    return;
-  }
-
-  vec4 temp = imageLoad(src_pic, uv) * WEIGHT[0];
-  ivec2 uvMax: vec2<i32> = img_size - 1;
-  for (int i = 1; i < 5; i += 1) {
-    ivec2 offset_uv = ivec2(1.0, 0) * i;
-    temp += imageLoad(src_pic, clamp(uv + offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
-    temp += imageLoad(src_pic, clamp(uv - offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
-  }
-  imageStore(swap_pic, uv, temp);
-}
WebGL 2.0 并不支持计算着色器,所以上面的 GLSL 计算着色器只能在 Native 端使用。

WGSL 版本的对等实现:

rust
struct InfoParams {
-  img_size: vec2<i32>,
-};
-
-@group(0) @binding(0) var<uniform> params: InfoParams;
-@group(0) @binding(1) var src_pic: texture_2d<f32>;
-@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;
-
-let WEIGHT: array<f32, 5> = array<f32, 5>(0.2, 0.1, 0.10, 0.1, 0.1);
-
-@compute @workgroup_size(16, 16)
-fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
-  let uv = vec2<i32>(global_id.xy);
-  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
-    return;
-  }
-
-  var temp = textureLoad(src_pic, uv, 0) * WEIGHT[0];
-  let uvMax: vec2<i32> = img_size - 1;
-  for (var i: i32 = 1; i <= 4; i += 1) {
-    var uvOffset = vec2<i32>(3, 0) * i;
-    temp += textureLoad(src_pic, clamp(uv + uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
-    temp += textureLoad(src_pic, clamp(uv - uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
-  }
-  textureStore(swap_pic, uv, temp);
-}

你应该注意到了很多差异,比如:

总体上 WGSL 代码要比 GLSL 明晰得多。这是 WGSL 的一大优点,几乎所有内容都具有明确的自说明特性。 下边我们来深入了解一些关键区别。

入口点

WGSL 没有强制使用固定的 main() 函数作为入口点Entry Point),它通过 @vertex@fragment@compute 三个着色器阶段(Shader State)标记提供了足够的灵活性让开发人员能更好的组织着色器代码。你可以给入口点取任意函数名,只要不重名,还能将所有阶段(甚至是不同着色器的同一个阶段)的代码组织同一个文件中:

rust
// 顶点着色器入口点
-@vertex
-fn vs_main() {}
-
-// 片元着色器入口点
-@fragment
-fn fs_main() -> @location(X) vec4f{}
-
-// 计算着色器入口点
-@compute
-fn cs_main() {}

工作组

计算着色器中,一个工作组(Workgroup)就是一组调用,它们同时执行一个计算着色器阶段入口点,并共享对工作组地址空间中着色器变量的访问。可以将工作组理解为一个三维网格,我们通过(x, y, z)三个维度来声明当前计算着色器的工作组大小,每个维度上的默认值都是 1。

WGSL 声明工作组大小的语法相比 GLSL 简洁明了:

rust
// GLSL
-layout(local_size_x = 16, local_size_y = 16) in;
-
-// WGSL
-@workgroup_size(16, 16) // x = 16, y = 16, z = 1
-@workgroup_size(16)     // x = 16, y = 1, z = 1

Group 与 Binding 属性

WGSL 中每个资源都使用了 @group(X)@binding(X) 属性标记,例如 @group(0) @binding(0) var<uniform> params: UniformParams 它表示的是 Uniform buffer 对应于哪个绑定组中的哪个绑定槽(对应于 wgpu API 调用)。这与 GLSL 中的 layout(set = X, binding = X) 布局标记类似。WGSL 的属性非常明晰,描述了着色器阶段到结构的精确二进制布局的所有内容。

变量声明

WGSL 对于基于显式类型的 var 的变量声明有不同的语法。

rust
// GLSL:
-lowp vec4 color;
-// 或者,也可以不使用精度说明符
-vec4 color;
-
-// WGSL:
-var color: vec4f;

WGSL 没有像 lowp 这样的精度说明符, 而是显式指定具体类型,例如 f32(32 位浮点数)。如果要使用 f16 类型,需要在你的 WebGPU 程序中开启 shader-f16 扩展(wgpu 中目前已经加入了此扩展,但是 naga 中还没有完全实现对 f16 的支持)。

WGSL 支持自动类型推断。因此,如果在声明变量的同时进行赋值,就不必指定类型:

rust
// 显式指定变量类型声明
-var color: vec4f = vec4f(1.0, 0.0, 0.0, 1.0);
-
-// 省略类型声明,变量类型将在编译时自动推断得出
-var color = vec4f(1.0, 0.0, 0.0, 1.0);

WGSL 中的 var let 关键字与 Swift 语言一样:

结构体

在 WGSL 中,结构体(struct)用于表示 Unoform 及 Storage 缓冲区以及着色器的输入和输出。Unoform 缓冲区与 GLSL 类似,Storage 缓冲区虽然也在 GLSL 中存在等价物,但是 WebGL 2.0 并不支持。

WGSL 结构体字段对齐规则也与 GLSL 几乎一致,想要了解更多细节,可查看 WGSL 规范中的字节对齐规则示例

rust
// GLSL
-layout(set = 0, binding = 0) uniform UniformParams {
-    mat4 mvp_matrix;
-    vec3 tint_color;
-};
-// ...
-gl_Position = mvp_matrix * vec4(position, 1.0);
-
-
-// WGSL
-struct UniformParams {
-    mvp: mat4x4f,
-	tint_color: vec3f,
-};
-@group(0) @binding(0) var<uniform> params: UniformParams;
-// ...
-out.position = params.mvp * vec4f(pos, 1.0);

注意到上面 Unoform 缓冲区在声明及使用上的两个区别了吗?

  1. WGSL 需要先定义结构体然后才能声明绑定,而 GLSL 可以在声明绑定的同时定义(当然也支持先定义);
  2. WGSL 里需要用声明的变量来访问结构体字段,而 GLSL 里是直接使用结构体中的字段;

WGSL 的输入和输出结构体比较独特,在 GLSL 中没有对应物。入口函数接受输入结构,返回输出结构,并且结构体的所有字段都有 location(X) 属性注释。 如果只有单个输入或输出,那使用结构体就是可选的。

这种明确定义输入和输出的方式,使得 WGSL 的代码逻辑更加清晰,明显优于在 GLSL 中给魔法变量赋值的方式。

下边是一个顶点着色器的输出结构体(同时它也是对应的片元着色器的输入结构体):

rust
struct VertexOutput {
-    @location(0) uv: vec2f,
-    @builtin(position) position: vec4f,
-};

WGSL 不再需要像 GLSL 一样,在顶点着色器中定义完输出字段后,再到片元着色器中定义相应的输入字段。

函数语法

WGSL 函数语法与 Rust 一致, 而 GLSL 是类 C 语法。一个简单的 add 函数如下:

rust
// GLSL
-float add(float a, float b) {
-    return a + b;
-}
-
-// WGSL
-fn add(a: f32, b: f32) -> f32 {
-	return a + b;
-}

纹理

采样纹理

WGSL 中采样纹理总是要指定纹素(Texel)的数据类型 texture_2d<T>texture_3d<T>texture_cube<T>texture_cube_array<T>(T 必须是 f32、i32、u32 这三种类型之一),而 GLSL 中是没有纹素类型信息的,只有查看使用此着色器的程序源码才能知道:

rust
// GLSL
-layout(set = 0, binding = 1) uniform texture2D texture_front;
-
-// WGSL
-@group(0) @binding(1) var texture_front: texture_2d<f32>;

Storage 纹理

WGSL 中存储纹理的数据类型为 texture_storage_XX<T, access>, 而 GLSL 中没有明确的存储纹理类型,如果需要当做存储纹理使用,就需要在 layout(...) 中标记出纹素格式:

rust
// GLSL
-layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
-
-// WGSL
-@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;

在目前的 WebGPU 标准中, 存储纹理的 access 只能为 write(只写), wgpu 能在 native 中支持 read_write(可读可写)。

更多 WGSL 语法细节

三元运算符

GLSL 支持三元运算符 ? : , WGSL 并不直接支持,但提供了内置函数 select(falseValue,trueValue,condition)

rust
// GLSL
-int n = isTrue ? 1 : 0;
-
-// WGSL
-let n: i32 = select(0, 1, isTrue);

花括号

WGSL 中的 if else 语法不能省略大括号(与 Rust 及 Swift 语言一样):

rust
// GLSL
-if (gray > 0.2) n = 65600;
-
-// WGSL
-if (gray > 0.2) { n = 65600; }

求模运算

GLSL 中我们使用 mod 函数做求模运算,WGSL 中有一个长得类似的函数 modf, 但它的功能是将传入参数分割为小数与整数两部分。在 WGSL 中需要使用 % 运算符来求模, 且 mod% 的工作方式还略有不同, mod 内部使用的是 floor (x - y * floor(x / y)), 而 % 内部使用的是 trunc (x - y * trunc(x / y)):

rust
// GLSL
-float n = mod(x, y);
-
-// WGSL
-let n = x % y;

着色器预处理

听到过很多人抱怨 WGSL 不提供预处理器,但其实所有的着色器语言都不自己提供预处理,只是我们可能已经习惯了使用已经封装好预处理逻辑的框架。

其实自己写一个预处理逻辑也是非常简单的事,有两种实现预处理的机制:

  1. 着色器被调用时实时预处理(对运行时性能会产生负影响);
  2. 利用 build.rs 在程序编译阶段预处理,并磁盘上生成预处理后的文件;

这两种实现方式的代码逻辑其实是一样的,仅仅是预处理的时机不同。

下边是一个需要预处理的实现了边缘检测的片元着色器:

rust
///#include "common/group0+vs.wgsl"
-
-///#include "func/edge_detection.wgsl"
-
-@fragment
-fn fs_main(vertex: VertexOutput) -> @location(0) vec4f {
-    let color = textureSample(tex, tex_sampler, vertex.uv);
-    return vec4f(edge_detection(length(color.rgb), 0.125));
-}

///#include 后面的路径分指向的是 commonfunc 目录下已经实现好的通用顶点着色器与边缘检测函数,我们现在按第 2 种机制实现一个简单的预处理来自动将顶点着色器及边缘检测函数包含进来:

rust
const WGSL_FOLDER: &'static str = "../wgsl_preprocessed";
-const INCLUDE: &tatic str = "///#include ";
-
-fn main() -> Result<(), Box<dyn Error>> {
-    // 这一行告诉 cargo 如果 /wgsl/ 目录中的内容发生了变化,就重新运行脚本
-    println!("cargo:rerun-if-changed=/../wgsl/*");
-
-    // 需要预处理的着色器数组(当然,更好的方式是读取并遍历待处理文件夹)
-    let shader_files = vec!["edge_detection"];
-
-    // 创建预处理后着色器的存放目录
-    std::fs::create_dir_all(WGSL_FOLDER)?;
-    for name in shader_files {
-        let _ = regenerate_shader(name);
-    }
-    Ok(())
-}
-
-fn regenerate_shader(shader_name: &str) -> Result<(), Box<dyn Error>> {
-    let base_dir = env!("CARGO_MANIFEST_DIR");
-    let path = PathBuf::from(&base_dir)
-        .join("../wgsl")
-        .join(format!("{}.wgsl", shader_name));
-    let mut out_path = WGSL_FOLDER.to_string();
-    out_path += &format!("/{}.wgsl", shader_name.replace("/", "_"));
-
-    let code = match read_to_string(&path) {
-        Ok(code) => code,
-        Err(e) => {
-            panic!("无法读取 {:?}: {:?}", path, e)
-        }
-    };
-
-    let mut shader_source = String::new();
-    parse_shader_source(&code, &mut shader_source, &base_dir);
-
-    let mut f = std::fs::File::create(&std::path::Path::new(&base_dir).join(&out_path))?;
-    f.write_all(shader_source.as_bytes())?;
-
-    Ok(())
-}
-
-fn parse_shader_source(source: &str, output: &mut String, base_path: &str) {
-    for line in source.lines() {
-        if line.starts_with(INCLUDE) {
-            // 支持一次 include 多个外部着色器文件,文件路径之间用 , 号分割
-            let imports = line[INCLUDE.len()..].split(',');
-            // 遍历所有待导入的文件,递归处理导入的代码里还包括导入的情况
-            for import in imports {
-                if let Some(include) = get_include_code(import, base_path) {
-                    parse_shader_source(&include, output, base_path);
-                } else {
-                    println!("无法找到要导入的着色器文件: {}", import);
-                }
-            }
-        }
-    }
-}
-
-fn get_include_code(key: &str, base_path: &str) -> Option<String> {
-    let path = PathBuf::from(base_path)
-        .join("../wgsl")
-        .join(key.replace('"', ""));
-    let shader = match read_to_string(&path) {
-        Ok(code) => code,
-        Err(e) => panic!("无法读取 {:?}: {:?}", path, e),
-    };
-    Some(shader)
-}

上面的几十行代码就是一套完整的预处理逻辑,它在每次程序编译时自动检查 wgsl/ 目录下的待处理着色器有没有发生变化,如果有变化,就重新处理并在 wgsl_preprocessed/ 目录下写入一个同名的处理后的着色器。

`,84),e=[o];function r(t,c,D,F,y,C){return n(),a("div",null,e)}const b=s(p,[["render",r]]);export{i as __pageData,b as default}; diff --git a/assets/beginner_wgsl.md.a349b4fd.lean.js b/assets/beginner_wgsl.md.a349b4fd.lean.js deleted file mode 100644 index 641afc60e..000000000 --- a/assets/beginner_wgsl.md.a349b4fd.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const i=JSON.parse('{"title":"WGSL 着色器语言","description":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比","frontmatter":{"head":[["meta",{"name":"description","content":"WebGPU 着色器语言 WGSL 介绍及与 GLSL 的语法对比"}],["meta",{"name":"keywords","content":"WGSL GLSL Shader WebGPU wgpu"}]]},"headers":[],"relativePath":"beginner/wgsl.md","filePath":"beginner/wgsl.md","lastUpdated":1701933923000}'),p={name:"beginner/wgsl.md"},o=l("",84),e=[o];function r(t,c,D,F,y,C){return n(),a("div",null,e)}const b=s(p,[["render",r]]);export{i as __pageData,b as default}; diff --git a/assets/bevy_in_android.6d8b91d5.png b/assets/bevy_in_android.gkRUPflr.png similarity index 100% rename from assets/bevy_in_android.6d8b91d5.png rename to assets/bevy_in_android.gkRUPflr.png diff --git a/assets/bevy_in_ios.7ff2933b.png b/assets/bevy_in_ios.xoCIvUfb.png similarity index 100% rename from assets/bevy_in_ios.7ff2933b.png rename to assets/bevy_in_ios.xoCIvUfb.png diff --git a/assets/black_triangles.7cbbe4d6.png b/assets/black_triangles.n_V-Q8yK.png similarity index 100% rename from assets/black_triangles.7cbbe4d6.png rename to assets/black_triangles.n_V-Q8yK.png diff --git a/assets/buffer_data.e88484b4.png b/assets/buffer_data.6SFbM26w.png similarity index 100% rename from assets/buffer_data.e88484b4.png rename to assets/buffer_data.6SFbM26w.png diff --git a/assets/capture.b5f12af0.png b/assets/capture.UPiFxBju.png similarity index 100% rename from assets/capture.b5f12af0.png rename to assets/capture.UPiFxBju.png diff --git a/assets/chunks/framework.adbf3c9e.js b/assets/chunks/framework.adbf3c9e.js deleted file mode 100644 index 00a324523..000000000 --- a/assets/chunks/framework.adbf3c9e.js +++ /dev/null @@ -1,2 +0,0 @@ -function Vn(e,t){const n=Object.create(null),s=e.split(",");for(let r=0;r!!n[r.toLowerCase()]:r=>!!n[r]}const te={},ft=[],Ie=()=>{},Ei=()=>!1,Ti=/^on[^a-z]/,Nt=e=>Ti.test(e),qn=e=>e.startsWith("onUpdate:"),ie=Object.assign,zn=(e,t)=>{const n=e.indexOf(t);n>-1&&e.splice(n,1)},Ai=Object.prototype.hasOwnProperty,q=(e,t)=>Ai.call(e,t),L=Array.isArray,at=e=>on(e)==="[object Map]",ar=e=>on(e)==="[object Set]",j=e=>typeof e=="function",se=e=>typeof e=="string",Yn=e=>typeof e=="symbol",ee=e=>e!==null&&typeof e=="object",ur=e=>ee(e)&&j(e.then)&&j(e.catch),dr=Object.prototype.toString,on=e=>dr.call(e),Pi=e=>on(e).slice(8,-1),hr=e=>on(e)==="[object Object]",Jn=e=>se(e)&&e!=="NaN"&&e[0]!=="-"&&""+parseInt(e,10)===e,Et=Vn(",key,ref,ref_for,ref_key,onVnodeBeforeMount,onVnodeMounted,onVnodeBeforeUpdate,onVnodeUpdated,onVnodeBeforeUnmount,onVnodeUnmounted"),ln=e=>{const t=Object.create(null);return n=>t[n]||(t[n]=e(n))},Ii=/-(\w)/g,Me=ln(e=>e.replace(Ii,(t,n)=>n?n.toUpperCase():"")),Oi=/\B([A-Z])/g,rt=ln(e=>e.replace(Oi,"-$1").toLowerCase()),cn=ln(e=>e.charAt(0).toUpperCase()+e.slice(1)),zt=ln(e=>e?`on${cn(e)}`:""),Ot=(e,t)=>!Object.is(e,t),Cn=(e,t)=>{for(let n=0;n{Object.defineProperty(e,t,{configurable:!0,enumerable:!1,value:n})},Ri=e=>{const t=parseFloat(e);return isNaN(t)?e:t},Fi=e=>{const t=se(e)?Number(e):NaN;return isNaN(t)?e:t};let bs;const Fn=()=>bs||(bs=typeof globalThis<"u"?globalThis:typeof self<"u"?self:typeof window<"u"?window:typeof global<"u"?global:{});function Xn(e){if(L(e)){const t={};for(let n=0;n{if(n){const s=n.split(Mi);s.length>1&&(t[s[0].trim()]=s[1].trim())}}),t}function Zn(e){let t="";if(se(e))t=e;else if(L(e))for(let n=0;nse(e)?e:e==null?"":L(e)||ee(e)&&(e.toString===dr||!j(e.toString))?JSON.stringify(e,gr,2):String(e),gr=(e,t)=>t&&t.__v_isRef?gr(e,t.value):at(t)?{[`Map(${t.size})`]:[...t.entries()].reduce((n,[s,r])=>(n[`${s} =>`]=r,n),{})}:ar(t)?{[`Set(${t.size})`]:[...t.values()]}:ee(t)&&!L(t)&&!hr(t)?String(t):t;let _e;class Bi{constructor(t=!1){this.detached=t,this._active=!0,this.effects=[],this.cleanups=[],this.parent=_e,!t&&_e&&(this.index=(_e.scopes||(_e.scopes=[])).push(this)-1)}get active(){return this._active}run(t){if(this._active){const n=_e;try{return _e=this,t()}finally{_e=n}}}on(){_e=this}off(){_e=this.parent}stop(t){if(this._active){let n,s;for(n=0,s=this.effects.length;n{const t=new Set(e);return t.w=0,t.n=0,t},mr=e=>(e.w&Ve)>0,_r=e=>(e.n&Ve)>0,Di=({deps:e})=>{if(e.length)for(let t=0;t{const{deps:t}=e;if(t.length){let n=0;for(let s=0;s{(d==="length"||d>=c)&&l.push(a)})}else switch(n!==void 0&&l.push(o.get(n)),t){case"add":L(e)?Jn(n)&&l.push(o.get("length")):(l.push(o.get(nt)),at(e)&&l.push(o.get(Ln)));break;case"delete":L(e)||(l.push(o.get(nt)),at(e)&&l.push(o.get(Ln)));break;case"set":at(e)&&l.push(o.get(nt));break}if(l.length===1)l[0]&&Nn(l[0]);else{const c=[];for(const a of l)a&&c.push(...a);Nn(Qn(c))}}function Nn(e,t){const n=L(e)?e:[...e];for(const s of n)s.computed&&ws(s);for(const s of n)s.computed||ws(s)}function ws(e,t){(e!==Ae||e.allowRecurse)&&(e.scheduler?e.scheduler():e.run())}const ki=Vn("__proto__,__v_isRef,__isVue"),wr=new Set(Object.getOwnPropertyNames(Symbol).filter(e=>e!=="arguments"&&e!=="caller").map(e=>Symbol[e]).filter(Yn)),Wi=es(),Vi=es(!1,!0),qi=es(!0),xs=zi();function zi(){const e={};return["includes","indexOf","lastIndexOf"].forEach(t=>{e[t]=function(...n){const s=Y(this);for(let i=0,o=this.length;i{e[t]=function(...n){bt();const s=Y(this)[t].apply(this,n);return yt(),s}}),e}function Yi(e){const t=Y(this);return pe(t,"has",e),t.hasOwnProperty(e)}function es(e=!1,t=!1){return function(s,r,i){if(r==="__v_isReactive")return!e;if(r==="__v_isReadonly")return e;if(r==="__v_isShallow")return t;if(r==="__v_raw"&&i===(e?t?ao:Tr:t?Er:vr).get(s))return s;const o=L(s);if(!e){if(o&&q(xs,r))return Reflect.get(xs,r,i);if(r==="hasOwnProperty")return Yi}const l=Reflect.get(s,r,i);return(Yn(r)?wr.has(r):ki(r))||(e||pe(s,"get",r),t)?l:fe(l)?o&&Jn(r)?l:l.value:ee(l)?e?Ar(l):an(l):l}}const Ji=xr(),Xi=xr(!0);function xr(e=!1){return function(n,s,r,i){let o=n[s];if(gt(o)&&fe(o)&&!fe(r))return!1;if(!e&&(!Qt(r)&&!gt(r)&&(o=Y(o),r=Y(r)),!L(n)&&fe(o)&&!fe(r)))return o.value=r,!0;const l=L(n)&&Jn(s)?Number(s)e,fn=e=>Reflect.getPrototypeOf(e);function Bt(e,t,n=!1,s=!1){e=e.__v_raw;const r=Y(e),i=Y(t);n||(t!==i&&pe(r,"get",t),pe(r,"get",i));const{has:o}=fn(r),l=s?ts:n?rs:Rt;if(o.call(r,t))return l(e.get(t));if(o.call(r,i))return l(e.get(i));e!==r&&e.get(t)}function Ut(e,t=!1){const n=this.__v_raw,s=Y(n),r=Y(e);return t||(e!==r&&pe(s,"has",e),pe(s,"has",r)),e===r?n.has(e):n.has(e)||n.has(r)}function jt(e,t=!1){return e=e.__v_raw,!t&&pe(Y(e),"iterate",nt),Reflect.get(e,"size",e)}function Cs(e){e=Y(e);const t=Y(this);return fn(t).has.call(t,e)||(t.add(e),$e(t,"add",e,e)),this}function vs(e,t){t=Y(t);const n=Y(this),{has:s,get:r}=fn(n);let i=s.call(n,e);i||(e=Y(e),i=s.call(n,e));const o=r.call(n,e);return n.set(e,t),i?Ot(t,o)&&$e(n,"set",e,t):$e(n,"add",e,t),this}function Es(e){const t=Y(this),{has:n,get:s}=fn(t);let r=n.call(t,e);r||(e=Y(e),r=n.call(t,e)),s&&s.call(t,e);const i=t.delete(e);return r&&$e(t,"delete",e,void 0),i}function Ts(){const e=Y(this),t=e.size!==0,n=e.clear();return t&&$e(e,"clear",void 0,void 0),n}function Dt(e,t){return function(s,r){const i=this,o=i.__v_raw,l=Y(o),c=t?ts:e?rs:Rt;return!e&&pe(l,"iterate",nt),o.forEach((a,d)=>s.call(r,c(a),c(d),i))}}function Kt(e,t,n){return function(...s){const r=this.__v_raw,i=Y(r),o=at(i),l=e==="entries"||e===Symbol.iterator&&o,c=e==="keys"&&o,a=r[e](...s),d=n?ts:t?rs:Rt;return!t&&pe(i,"iterate",c?Ln:nt),{next(){const{value:p,done:y}=a.next();return y?{value:p,done:y}:{value:l?[d(p[0]),d(p[1])]:d(p),done:y}},[Symbol.iterator](){return this}}}}function Be(e){return function(...t){return e==="delete"?!1:this}}function no(){const e={get(i){return Bt(this,i)},get size(){return jt(this)},has:Ut,add:Cs,set:vs,delete:Es,clear:Ts,forEach:Dt(!1,!1)},t={get(i){return Bt(this,i,!1,!0)},get size(){return jt(this)},has:Ut,add:Cs,set:vs,delete:Es,clear:Ts,forEach:Dt(!1,!0)},n={get(i){return Bt(this,i,!0)},get size(){return jt(this,!0)},has(i){return Ut.call(this,i,!0)},add:Be("add"),set:Be("set"),delete:Be("delete"),clear:Be("clear"),forEach:Dt(!0,!1)},s={get(i){return Bt(this,i,!0,!0)},get size(){return jt(this,!0)},has(i){return Ut.call(this,i,!0)},add:Be("add"),set:Be("set"),delete:Be("delete"),clear:Be("clear"),forEach:Dt(!0,!0)};return["keys","values","entries",Symbol.iterator].forEach(i=>{e[i]=Kt(i,!1,!1),n[i]=Kt(i,!0,!1),t[i]=Kt(i,!1,!0),s[i]=Kt(i,!0,!0)}),[e,n,t,s]}const[so,ro,io,oo]=no();function ns(e,t){const n=t?e?oo:io:e?ro:so;return(s,r,i)=>r==="__v_isReactive"?!e:r==="__v_isReadonly"?e:r==="__v_raw"?s:Reflect.get(q(n,r)&&r in s?n:s,r,i)}const lo={get:ns(!1,!1)},co={get:ns(!1,!0)},fo={get:ns(!0,!1)},vr=new WeakMap,Er=new WeakMap,Tr=new WeakMap,ao=new WeakMap;function uo(e){switch(e){case"Object":case"Array":return 1;case"Map":case"Set":case"WeakMap":case"WeakSet":return 2;default:return 0}}function ho(e){return e.__v_skip||!Object.isExtensible(e)?0:uo(Pi(e))}function an(e){return gt(e)?e:ss(e,!1,Cr,lo,vr)}function po(e){return ss(e,!1,to,co,Er)}function Ar(e){return ss(e,!0,eo,fo,Tr)}function ss(e,t,n,s,r){if(!ee(e)||e.__v_raw&&!(t&&e.__v_isReactive))return e;const i=r.get(e);if(i)return i;const o=ho(e);if(o===0)return e;const l=new Proxy(e,o===2?s:n);return r.set(e,l),l}function ut(e){return gt(e)?ut(e.__v_raw):!!(e&&e.__v_isReactive)}function gt(e){return!!(e&&e.__v_isReadonly)}function Qt(e){return!!(e&&e.__v_isShallow)}function Pr(e){return ut(e)||gt(e)}function Y(e){const t=e&&e.__v_raw;return t?Y(t):e}function Tt(e){return Zt(e,"__v_skip",!0),e}const Rt=e=>ee(e)?an(e):e,rs=e=>ee(e)?Ar(e):e;function Ir(e){ke&&Ae&&(e=Y(e),yr(e.dep||(e.dep=Qn())))}function Or(e,t){e=Y(e);const n=e.dep;n&&Nn(n)}function fe(e){return!!(e&&e.__v_isRef===!0)}function Rr(e){return Fr(e,!1)}function go(e){return Fr(e,!0)}function Fr(e,t){return fe(e)?e:new mo(e,t)}class mo{constructor(t,n){this.__v_isShallow=n,this.dep=void 0,this.__v_isRef=!0,this._rawValue=n?t:Y(t),this._value=n?t:Rt(t)}get value(){return Ir(this),this._value}set value(t){const n=this.__v_isShallow||Qt(t)||gt(t);t=n?t:Y(t),Ot(t,this._rawValue)&&(this._rawValue=t,this._value=n?t:Rt(t),Or(this))}}function _o(e){return fe(e)?e.value:e}const bo={get:(e,t,n)=>_o(Reflect.get(e,t,n)),set:(e,t,n,s)=>{const r=e[t];return fe(r)&&!fe(n)?(r.value=n,!0):Reflect.set(e,t,n,s)}};function Sr(e){return ut(e)?e:new Proxy(e,bo)}class yo{constructor(t,n,s,r){this._setter=n,this.dep=void 0,this.__v_isRef=!0,this.__v_isReadonly=!1,this._dirty=!0,this.effect=new Gn(t,()=>{this._dirty||(this._dirty=!0,Or(this))}),this.effect.computed=this,this.effect.active=this._cacheable=!r,this.__v_isReadonly=s}get value(){const t=Y(this);return Ir(t),(t._dirty||!t._cacheable)&&(t._dirty=!1,t._value=t.effect.run()),t._value}set value(t){this._setter(t)}}function wo(e,t,n=!1){let s,r;const i=j(e);return i?(s=e,r=Ie):(s=e.get,r=e.set),new yo(s,r,i||!r,n)}function We(e,t,n,s){let r;try{r=s?e(...s):e()}catch(i){un(i,t,n)}return r}function Ce(e,t,n,s){if(j(e)){const i=We(e,t,n,s);return i&&ur(i)&&i.catch(o=>{un(o,t,n)}),i}const r=[];for(let i=0;i>>1;St(ce[s])Se&&ce.splice(t,1)}function Eo(e){L(e)?dt.push(...e):(!Ne||!Ne.includes(e,e.allowRecurse?Qe+1:Qe))&&dt.push(e),Nr()}function As(e,t=Ft?Se+1:0){for(;tSt(n)-St(s)),Qe=0;Qee.id==null?1/0:e.id,To=(e,t)=>{const n=St(e)-St(t);if(n===0){if(e.pre&&!t.pre)return-1;if(t.pre&&!e.pre)return 1}return n};function $r(e){$n=!1,Ft=!0,ce.sort(To);const t=Ie;try{for(Se=0;Sese(A)?A.trim():A)),p&&(r=n.map(Ri))}let l,c=s[l=zt(t)]||s[l=zt(Me(t))];!c&&i&&(c=s[l=zt(rt(t))]),c&&Ce(c,e,6,r);const a=s[l+"Once"];if(a){if(!e.emitted)e.emitted={};else if(e.emitted[l])return;e.emitted[l]=!0,Ce(a,e,6,r)}}function Hr(e,t,n=!1){const s=t.emitsCache,r=s.get(e);if(r!==void 0)return r;const i=e.emits;let o={},l=!1;if(!j(e)){const c=a=>{const d=Hr(a,t,!0);d&&(l=!0,ie(o,d))};!n&&t.mixins.length&&t.mixins.forEach(c),e.extends&&c(e.extends),e.mixins&&e.mixins.forEach(c)}return!i&&!l?(ee(e)&&s.set(e,null),null):(L(i)?i.forEach(c=>o[c]=null):ie(o,i),ee(e)&&s.set(e,o),o)}function dn(e,t){return!e||!Nt(t)?!1:(t=t.slice(2).replace(/Once$/,""),q(e,t[0].toLowerCase()+t.slice(1))||q(e,rt(t))||q(e,t))}let ae=null,hn=null;function en(e){const t=ae;return ae=e,hn=e&&e.type.__scopeId||null,t}function Oc(e){hn=e}function Rc(){hn=null}function Po(e,t=ae,n){if(!t||e._n)return e;const s=(...r)=>{s._d&&Bs(-1);const i=en(t);let o;try{o=e(...r)}finally{en(i),s._d&&Bs(1)}return o};return s._n=!0,s._c=!0,s._d=!0,s}function vn(e){const{type:t,vnode:n,proxy:s,withProxy:r,props:i,propsOptions:[o],slots:l,attrs:c,emit:a,render:d,renderCache:p,data:y,setupState:A,ctx:N,inheritAttrs:R}=e;let U,_;const x=en(e);try{if(n.shapeFlag&4){const P=r||s;U=Te(d.call(P,P,p,i,A,y,N)),_=c}else{const P=t;U=Te(P.length>1?P(i,{attrs:c,slots:l,emit:a}):P(i,null)),_=t.props?c:Io(c)}}catch(P){It.length=0,un(P,e,1),U=le(be)}let $=U;if(_&&R!==!1){const P=Object.keys(_),{shapeFlag:K}=$;P.length&&K&7&&(o&&P.some(qn)&&(_=Oo(_,o)),$=qe($,_))}return n.dirs&&($=qe($),$.dirs=$.dirs?$.dirs.concat(n.dirs):n.dirs),n.transition&&($.transition=n.transition),U=$,en(x),U}const Io=e=>{let t;for(const n in e)(n==="class"||n==="style"||Nt(n))&&((t||(t={}))[n]=e[n]);return t},Oo=(e,t)=>{const n={};for(const s in e)(!qn(s)||!(s.slice(9)in t))&&(n[s]=e[s]);return n};function Ro(e,t,n){const{props:s,children:r,component:i}=e,{props:o,children:l,patchFlag:c}=t,a=i.emitsOptions;if(t.dirs||t.transition)return!0;if(n&&c>=0){if(c&1024)return!0;if(c&16)return s?Ps(s,o,a):!!o;if(c&8){const d=t.dynamicProps;for(let p=0;pe.__isSuspense;function Br(e,t){t&&t.pendingBranch?L(e)?t.effects.push(...e):t.effects.push(e):Eo(e)}function Mo(e,t){return pn(e,null,t)}function Fc(e,t){return pn(e,null,{flush:"post"})}const kt={};function Yt(e,t,n){return pn(e,t,n)}function pn(e,t,{immediate:n,deep:s,flush:r,onTrack:i,onTrigger:o}=te){var l;const c=ji()===((l=oe)==null?void 0:l.scope)?oe:null;let a,d=!1,p=!1;if(fe(e)?(a=()=>e.value,d=Qt(e)):ut(e)?(a=()=>e,s=!0):L(e)?(p=!0,d=e.some(P=>ut(P)||Qt(P)),a=()=>e.map(P=>{if(fe(P))return P.value;if(ut(P))return ct(P);if(j(P))return We(P,c,2)})):j(e)?t?a=()=>We(e,c,2):a=()=>{if(!(c&&c.isUnmounted))return y&&y(),Ce(e,c,3,[A])}:a=Ie,t&&s){const P=a;a=()=>ct(P())}let y,A=P=>{y=x.onStop=()=>{We(P,c,4)}},N;if(Lt)if(A=Ie,t?n&&Ce(t,c,3,[a(),p?[]:void 0,A]):a(),r==="sync"){const P=Pl();N=P.__watcherHandles||(P.__watcherHandles=[])}else return Ie;let R=p?new Array(e.length).fill(kt):kt;const U=()=>{if(x.active)if(t){const P=x.run();(s||d||(p?P.some((K,J)=>Ot(K,R[J])):Ot(P,R)))&&(y&&y(),Ce(t,c,3,[P,R===kt?void 0:p&&R[0]===kt?[]:R,A]),R=P)}else x.run()};U.allowRecurse=!!t;let _;r==="sync"?_=U:r==="post"?_=()=>de(U,c&&c.suspense):(U.pre=!0,c&&(U.id=c.uid),_=()=>os(U));const x=new Gn(a,_);t?n?U():R=x.run():r==="post"?de(x.run.bind(x),c&&c.suspense):x.run();const $=()=>{x.stop(),c&&c.scope&&zn(c.scope.effects,x)};return N&&N.push($),$}function Lo(e,t,n){const s=this.proxy,r=se(e)?e.includes(".")?Ur(s,e):()=>s[e]:e.bind(s,s);let i;j(t)?i=t:(i=t.handler,n=t);const o=oe;_t(this);const l=pn(r,i.bind(s),n);return o?_t(o):st(),l}function Ur(e,t){const n=t.split(".");return()=>{let s=e;for(let r=0;r{ct(n,t)});else if(hr(e))for(const n in e)ct(e[n],t);return e}function Fe(e,t,n,s){const r=e.dirs,i=t&&t.dirs;for(let o=0;o{e.isMounted=!0}),Vr(()=>{e.isUnmounting=!0}),e}const ye=[Function,Array],jr={mode:String,appear:Boolean,persisted:Boolean,onBeforeEnter:ye,onEnter:ye,onAfterEnter:ye,onEnterCancelled:ye,onBeforeLeave:ye,onLeave:ye,onAfterLeave:ye,onLeaveCancelled:ye,onBeforeAppear:ye,onAppear:ye,onAfterAppear:ye,onAppearCancelled:ye},$o={name:"BaseTransition",props:jr,setup(e,{slots:t}){const n=fi(),s=No();let r;return()=>{const i=t.default&&Kr(t.default(),!0);if(!i||!i.length)return;let o=i[0];if(i.length>1){for(const R of i)if(R.type!==be){o=R;break}}const l=Y(e),{mode:c}=l;if(s.isLeaving)return En(o);const a=Is(o);if(!a)return En(o);const d=Hn(a,l,s,n);Bn(a,d);const p=n.subTree,y=p&&Is(p);let A=!1;const{getTransitionKey:N}=a.type;if(N){const R=N();r===void 0?r=R:R!==r&&(r=R,A=!0)}if(y&&y.type!==be&&(!Ge(a,y)||A)){const R=Hn(y,l,s,n);if(Bn(y,R),c==="out-in")return s.isLeaving=!0,R.afterLeave=()=>{s.isLeaving=!1,n.update.active!==!1&&n.update()},En(o);c==="in-out"&&a.type!==be&&(R.delayLeave=(U,_,x)=>{const $=Dr(s,y);$[String(y.key)]=y,U._leaveCb=()=>{_(),U._leaveCb=void 0,delete d.delayedLeave},d.delayedLeave=x})}return o}}},Ho=$o;function Dr(e,t){const{leavingVNodes:n}=e;let s=n.get(t.type);return s||(s=Object.create(null),n.set(t.type,s)),s}function Hn(e,t,n,s){const{appear:r,mode:i,persisted:o=!1,onBeforeEnter:l,onEnter:c,onAfterEnter:a,onEnterCancelled:d,onBeforeLeave:p,onLeave:y,onAfterLeave:A,onLeaveCancelled:N,onBeforeAppear:R,onAppear:U,onAfterAppear:_,onAppearCancelled:x}=t,$=String(e.key),P=Dr(n,e),K=(T,D)=>{T&&Ce(T,s,9,D)},J=(T,D)=>{const B=D[1];K(T,D),L(T)?T.every(z=>z.length<=1)&&B():T.length<=1&&B()},V={mode:i,persisted:o,beforeEnter(T){let D=l;if(!n.isMounted)if(r)D=R||l;else return;T._leaveCb&&T._leaveCb(!0);const B=P[$];B&&Ge(e,B)&&B.el._leaveCb&&B.el._leaveCb(),K(D,[T])},enter(T){let D=c,B=a,z=d;if(!n.isMounted)if(r)D=U||c,B=_||a,z=x||d;else return;let I=!1;const k=T._enterCb=F=>{I||(I=!0,F?K(z,[T]):K(B,[T]),V.delayedLeave&&V.delayedLeave(),T._enterCb=void 0)};D?J(D,[T,k]):k()},leave(T,D){const B=String(e.key);if(T._enterCb&&T._enterCb(!0),n.isUnmounting)return D();K(p,[T]);let z=!1;const I=T._leaveCb=k=>{z||(z=!0,D(),k?K(N,[T]):K(A,[T]),T._leaveCb=void 0,P[B]===e&&delete P[B])};P[B]=e,y?J(y,[T,I]):I()},clone(T){return Hn(T,t,n,s)}};return V}function En(e){if(gn(e))return e=qe(e),e.children=null,e}function Is(e){return gn(e)?e.children?e.children[0]:void 0:e}function Bn(e,t){e.shapeFlag&6&&e.component?Bn(e.component.subTree,t):e.shapeFlag&128?(e.ssContent.transition=t.clone(e.ssContent),e.ssFallback.transition=t.clone(e.ssFallback)):e.transition=t}function Kr(e,t=!1,n){let s=[],r=0;for(let i=0;i1)for(let i=0;iie({name:e.name},t,{setup:e}))():e}const ht=e=>!!e.type.__asyncLoader,gn=e=>e.type.__isKeepAlive;function Bo(e,t){Wr(e,"a",t)}function Uo(e,t){Wr(e,"da",t)}function Wr(e,t,n=oe){const s=e.__wdc||(e.__wdc=()=>{let r=n;for(;r;){if(r.isDeactivated)return;r=r.parent}return e()});if(mn(t,s,n),n){let r=n.parent;for(;r&&r.parent;)gn(r.parent.vnode)&&jo(s,t,n,r),r=r.parent}}function jo(e,t,n,s){const r=mn(t,e,s,!0);bn(()=>{zn(s[t],r)},n)}function mn(e,t,n=oe,s=!1){if(n){const r=n[e]||(n[e]=[]),i=t.__weh||(t.__weh=(...o)=>{if(n.isUnmounted)return;bt(),_t(n);const l=Ce(t,n,e,o);return st(),yt(),l});return s?r.unshift(i):r.push(i),i}}const He=e=>(t,n=oe)=>(!Lt||e==="sp")&&mn(e,(...s)=>t(...s),n),Do=He("bm"),_n=He("m"),Ko=He("bu"),ko=He("u"),Vr=He("bum"),bn=He("um"),Wo=He("sp"),Vo=He("rtg"),qo=He("rtc");function zo(e,t=oe){mn("ec",e,t)}const ls="components";function Sc(e,t){return zr(ls,e,!0,t)||e}const qr=Symbol.for("v-ndc");function Mc(e){return se(e)?zr(ls,e,!1)||e:e||qr}function zr(e,t,n=!0,s=!1){const r=ae||oe;if(r){const i=r.type;if(e===ls){const l=El(i,!1);if(l&&(l===t||l===Me(t)||l===cn(Me(t))))return i}const o=Os(r[e]||i[e],t)||Os(r.appContext[e],t);return!o&&s?i:o}}function Os(e,t){return e&&(e[t]||e[Me(t)]||e[cn(Me(t))])}function Lc(e,t,n,s){let r;const i=n&&n[s];if(L(e)||se(e)){r=new Array(e.length);for(let o=0,l=e.length;ot(o,l,void 0,i&&i[l]));else{const o=Object.keys(e);r=new Array(o.length);for(let l=0,c=o.length;lrn(t)?!(t.type===be||t.type===he&&!Yr(t.children)):!0)?e:null}function $c(e,t){const n={};for(const s in e)n[t&&/[A-Z]/.test(s)?`on:${s}`:zt(s)]=e[s];return n}const Un=e=>e?ai(e)?ds(e)||e.proxy:Un(e.parent):null,At=ie(Object.create(null),{$:e=>e,$el:e=>e.vnode.el,$data:e=>e.data,$props:e=>e.props,$attrs:e=>e.attrs,$slots:e=>e.slots,$refs:e=>e.refs,$parent:e=>Un(e.parent),$root:e=>Un(e.root),$emit:e=>e.emit,$options:e=>cs(e),$forceUpdate:e=>e.f||(e.f=()=>os(e.update)),$nextTick:e=>e.n||(e.n=Lr.bind(e.proxy)),$watch:e=>Lo.bind(e)}),Tn=(e,t)=>e!==te&&!e.__isScriptSetup&&q(e,t),Yo={get({_:e},t){const{ctx:n,setupState:s,data:r,props:i,accessCache:o,type:l,appContext:c}=e;let a;if(t[0]!=="$"){const A=o[t];if(A!==void 0)switch(A){case 1:return s[t];case 2:return r[t];case 4:return n[t];case 3:return i[t]}else{if(Tn(s,t))return o[t]=1,s[t];if(r!==te&&q(r,t))return o[t]=2,r[t];if((a=e.propsOptions[0])&&q(a,t))return o[t]=3,i[t];if(n!==te&&q(n,t))return o[t]=4,n[t];jn&&(o[t]=0)}}const d=At[t];let p,y;if(d)return t==="$attrs"&&pe(e,"get",t),d(e);if((p=l.__cssModules)&&(p=p[t]))return p;if(n!==te&&q(n,t))return o[t]=4,n[t];if(y=c.config.globalProperties,q(y,t))return y[t]},set({_:e},t,n){const{data:s,setupState:r,ctx:i}=e;return Tn(r,t)?(r[t]=n,!0):s!==te&&q(s,t)?(s[t]=n,!0):q(e.props,t)||t[0]==="$"&&t.slice(1)in e?!1:(i[t]=n,!0)},has({_:{data:e,setupState:t,accessCache:n,ctx:s,appContext:r,propsOptions:i}},o){let l;return!!n[o]||e!==te&&q(e,o)||Tn(t,o)||(l=i[0])&&q(l,o)||q(s,o)||q(At,o)||q(r.config.globalProperties,o)},defineProperty(e,t,n){return n.get!=null?e._.accessCache[t]=0:q(n,"value")&&this.set(e,t,n.value,null),Reflect.defineProperty(e,t,n)}};function Hc(){return Jo().slots}function Jo(){const e=fi();return e.setupContext||(e.setupContext=di(e))}function Rs(e){return L(e)?e.reduce((t,n)=>(t[n]=null,t),{}):e}let jn=!0;function Xo(e){const t=cs(e),n=e.proxy,s=e.ctx;jn=!1,t.beforeCreate&&Fs(t.beforeCreate,e,"bc");const{data:r,computed:i,methods:o,watch:l,provide:c,inject:a,created:d,beforeMount:p,mounted:y,beforeUpdate:A,updated:N,activated:R,deactivated:U,beforeDestroy:_,beforeUnmount:x,destroyed:$,unmounted:P,render:K,renderTracked:J,renderTriggered:V,errorCaptured:T,serverPrefetch:D,expose:B,inheritAttrs:z,components:I,directives:k,filters:F}=t;if(a&&Zo(a,s,null),o)for(const ne in o){const Q=o[ne];j(Q)&&(s[ne]=Q.bind(n))}if(r){const ne=r.call(n,n);ee(ne)&&(e.data=an(ne))}if(jn=!0,i)for(const ne in i){const Q=i[ne],ze=j(Q)?Q.bind(n,n):j(Q.get)?Q.get.bind(n,n):Ie,$t=!j(Q)&&j(Q.set)?Q.set.bind(n):Ie,Ye=Ee({get:ze,set:$t});Object.defineProperty(s,ne,{enumerable:!0,configurable:!0,get:()=>Ye.value,set:Oe=>Ye.value=Oe})}if(l)for(const ne in l)Jr(l[ne],s,n,ne);if(c){const ne=j(c)?c.call(n):c;Reflect.ownKeys(ne).forEach(Q=>{sl(Q,ne[Q])})}d&&Fs(d,e,"c");function X(ne,Q){L(Q)?Q.forEach(ze=>ne(ze.bind(n))):Q&&ne(Q.bind(n))}if(X(Do,p),X(_n,y),X(Ko,A),X(ko,N),X(Bo,R),X(Uo,U),X(zo,T),X(qo,J),X(Vo,V),X(Vr,x),X(bn,P),X(Wo,D),L(B))if(B.length){const ne=e.exposed||(e.exposed={});B.forEach(Q=>{Object.defineProperty(ne,Q,{get:()=>n[Q],set:ze=>n[Q]=ze})})}else e.exposed||(e.exposed={});K&&e.render===Ie&&(e.render=K),z!=null&&(e.inheritAttrs=z),I&&(e.components=I),k&&(e.directives=k)}function Zo(e,t,n=Ie){L(e)&&(e=Dn(e));for(const s in e){const r=e[s];let i;ee(r)?"default"in r?i=pt(r.from||s,r.default,!0):i=pt(r.from||s):i=pt(r),fe(i)?Object.defineProperty(t,s,{enumerable:!0,configurable:!0,get:()=>i.value,set:o=>i.value=o}):t[s]=i}}function Fs(e,t,n){Ce(L(e)?e.map(s=>s.bind(t.proxy)):e.bind(t.proxy),t,n)}function Jr(e,t,n,s){const r=s.includes(".")?Ur(n,s):()=>n[s];if(se(e)){const i=t[e];j(i)&&Yt(r,i)}else if(j(e))Yt(r,e.bind(n));else if(ee(e))if(L(e))e.forEach(i=>Jr(i,t,n,s));else{const i=j(e.handler)?e.handler.bind(n):t[e.handler];j(i)&&Yt(r,i,e)}}function cs(e){const t=e.type,{mixins:n,extends:s}=t,{mixins:r,optionsCache:i,config:{optionMergeStrategies:o}}=e.appContext,l=i.get(t);let c;return l?c=l:!r.length&&!n&&!s?c=t:(c={},r.length&&r.forEach(a=>tn(c,a,o,!0)),tn(c,t,o)),ee(t)&&i.set(t,c),c}function tn(e,t,n,s=!1){const{mixins:r,extends:i}=t;i&&tn(e,i,n,!0),r&&r.forEach(o=>tn(e,o,n,!0));for(const o in t)if(!(s&&o==="expose")){const l=Qo[o]||n&&n[o];e[o]=l?l(e[o],t[o]):t[o]}return e}const Qo={data:Ss,props:Ms,emits:Ms,methods:vt,computed:vt,beforeCreate:ue,created:ue,beforeMount:ue,mounted:ue,beforeUpdate:ue,updated:ue,beforeDestroy:ue,beforeUnmount:ue,destroyed:ue,unmounted:ue,activated:ue,deactivated:ue,errorCaptured:ue,serverPrefetch:ue,components:vt,directives:vt,watch:el,provide:Ss,inject:Go};function Ss(e,t){return t?e?function(){return ie(j(e)?e.call(this,this):e,j(t)?t.call(this,this):t)}:t:e}function Go(e,t){return vt(Dn(e),Dn(t))}function Dn(e){if(L(e)){const t={};for(let n=0;n1)return n&&j(t)?t.call(s&&s.proxy):t}}function rl(e,t,n,s=!1){const r={},i={};Zt(i,yn,1),e.propsDefaults=Object.create(null),Zr(e,t,r,i);for(const o in e.propsOptions[0])o in r||(r[o]=void 0);n?e.props=s?r:po(r):e.type.props?e.props=r:e.props=i,e.attrs=i}function il(e,t,n,s){const{props:r,attrs:i,vnode:{patchFlag:o}}=e,l=Y(r),[c]=e.propsOptions;let a=!1;if((s||o>0)&&!(o&16)){if(o&8){const d=e.vnode.dynamicProps;for(let p=0;p{c=!0;const[y,A]=Qr(p,t,!0);ie(o,y),A&&l.push(...A)};!n&&t.mixins.length&&t.mixins.forEach(d),e.extends&&d(e.extends),e.mixins&&e.mixins.forEach(d)}if(!i&&!c)return ee(e)&&s.set(e,ft),ft;if(L(i))for(let d=0;d-1,A[1]=R<0||N-1||q(A,"default"))&&l.push(p)}}}const a=[o,l];return ee(e)&&s.set(e,a),a}function Ls(e){return e[0]!=="$"}function Ns(e){const t=e&&e.toString().match(/^\s*(function|class) (\w+)/);return t?t[2]:e===null?"null":""}function $s(e,t){return Ns(e)===Ns(t)}function Hs(e,t){return L(t)?t.findIndex(n=>$s(n,e)):j(t)&&$s(t,e)?0:-1}const Gr=e=>e[0]==="_"||e==="$stable",fs=e=>L(e)?e.map(Te):[Te(e)],ol=(e,t,n)=>{if(t._n)return t;const s=Po((...r)=>fs(t(...r)),n);return s._c=!1,s},ei=(e,t,n)=>{const s=e._ctx;for(const r in e){if(Gr(r))continue;const i=e[r];if(j(i))t[r]=ol(r,i,s);else if(i!=null){const o=fs(i);t[r]=()=>o}}},ti=(e,t)=>{const n=fs(t);e.slots.default=()=>n},ll=(e,t)=>{if(e.vnode.shapeFlag&32){const n=t._;n?(e.slots=Y(t),Zt(t,"_",n)):ei(t,e.slots={})}else e.slots={},t&&ti(e,t);Zt(e.slots,yn,1)},cl=(e,t,n)=>{const{vnode:s,slots:r}=e;let i=!0,o=te;if(s.shapeFlag&32){const l=t._;l?n&&l===1?i=!1:(ie(r,t),!n&&l===1&&delete r._):(i=!t.$stable,ei(t,r)),o=t}else t&&(ti(e,t),o={default:1});if(i)for(const l in r)!Gr(l)&&!(l in o)&&delete r[l]};function sn(e,t,n,s,r=!1){if(L(e)){e.forEach((y,A)=>sn(y,t&&(L(t)?t[A]:t),n,s,r));return}if(ht(s)&&!r)return;const i=s.shapeFlag&4?ds(s.component)||s.component.proxy:s.el,o=r?null:i,{i:l,r:c}=e,a=t&&t.r,d=l.refs===te?l.refs={}:l.refs,p=l.setupState;if(a!=null&&a!==c&&(se(a)?(d[a]=null,q(p,a)&&(p[a]=null)):fe(a)&&(a.value=null)),j(c))We(c,l,12,[o,d]);else{const y=se(c),A=fe(c);if(y||A){const N=()=>{if(e.f){const R=y?q(p,c)?p[c]:d[c]:c.value;r?L(R)&&zn(R,i):L(R)?R.includes(i)||R.push(i):y?(d[c]=[i],q(p,c)&&(p[c]=d[c])):(c.value=[i],e.k&&(d[e.k]=c.value))}else y?(d[c]=o,q(p,c)&&(p[c]=o)):A&&(c.value=o,e.k&&(d[e.k]=o))};o?(N.id=-1,de(N,n)):N()}}}let Ue=!1;const Wt=e=>/svg/.test(e.namespaceURI)&&e.tagName!=="foreignObject",Vt=e=>e.nodeType===8;function fl(e){const{mt:t,p:n,o:{patchProp:s,createText:r,nextSibling:i,parentNode:o,remove:l,insert:c,createComment:a}}=e,d=(_,x)=>{if(!x.hasChildNodes()){n(null,_,x),Gt(),x._vnode=_;return}Ue=!1,p(x.firstChild,_,null,null,null),Gt(),x._vnode=_,Ue&&console.error("Hydration completed but contains mismatches.")},p=(_,x,$,P,K,J=!1)=>{const V=Vt(_)&&_.data==="[",T=()=>R(_,x,$,P,K,V),{type:D,ref:B,shapeFlag:z,patchFlag:I}=x;let k=_.nodeType;x.el=_,I===-2&&(J=!1,x.dynamicChildren=null);let F=null;switch(D){case mt:k!==3?x.children===""?(c(x.el=r(""),o(_),_),F=_):F=T():(_.data!==x.children&&(Ue=!0,_.data=x.children),F=i(_));break;case be:k!==8||V?F=T():F=i(_);break;case Pt:if(V&&(_=i(_),k=_.nodeType),k===1||k===3){F=_;const ge=!x.children.length;for(let X=0;X{J=J||!!x.dynamicChildren;const{type:V,props:T,patchFlag:D,shapeFlag:B,dirs:z}=x,I=V==="input"&&z||V==="option";if(I||D!==-1){if(z&&Fe(x,null,$,"created"),T)if(I||!J||D&48)for(const F in T)(I&&F.endsWith("value")||Nt(F)&&!Et(F))&&s(_,F,null,T[F],!1,void 0,$);else T.onClick&&s(_,"onClick",null,T.onClick,!1,void 0,$);let k;if((k=T&&T.onVnodeBeforeMount)&&we(k,$,x),z&&Fe(x,null,$,"beforeMount"),((k=T&&T.onVnodeMounted)||z)&&Br(()=>{k&&we(k,$,x),z&&Fe(x,null,$,"mounted")},P),B&16&&!(T&&(T.innerHTML||T.textContent))){let F=A(_.firstChild,x,_,$,P,K,J);for(;F;){Ue=!0;const ge=F;F=F.nextSibling,l(ge)}}else B&8&&_.textContent!==x.children&&(Ue=!0,_.textContent=x.children)}return _.nextSibling},A=(_,x,$,P,K,J,V)=>{V=V||!!x.dynamicChildren;const T=x.children,D=T.length;for(let B=0;B{const{slotScopeIds:V}=x;V&&(K=K?K.concat(V):V);const T=o(_),D=A(i(_),x,T,$,P,K,J);return D&&Vt(D)&&D.data==="]"?i(x.anchor=D):(Ue=!0,c(x.anchor=a("]"),T,D),D)},R=(_,x,$,P,K,J)=>{if(Ue=!0,x.el=null,J){const D=U(_);for(;;){const B=i(_);if(B&&B!==D)l(B);else break}}const V=i(_),T=o(_);return l(_),n(null,x,T,V,$,P,Wt(T),K),V},U=_=>{let x=0;for(;_;)if(_=i(_),_&&Vt(_)&&(_.data==="["&&x++,_.data==="]")){if(x===0)return i(_);x--}return _};return[d,p]}const de=Br;function al(e){return ul(e,fl)}function ul(e,t){const n=Fn();n.__VUE__=!0;const{insert:s,remove:r,patchProp:i,createElement:o,createText:l,createComment:c,setText:a,setElementText:d,parentNode:p,nextSibling:y,setScopeId:A=Ie,insertStaticContent:N}=e,R=(f,u,h,m=null,g=null,C=null,E=!1,w=null,v=!!u.dynamicChildren)=>{if(f===u)return;f&&!Ge(f,u)&&(m=Ht(f),Oe(f,g,C,!0),f=null),u.patchFlag===-2&&(v=!1,u.dynamicChildren=null);const{type:b,ref:S,shapeFlag:O}=u;switch(b){case mt:U(f,u,h,m);break;case be:_(f,u,h,m);break;case Pt:f==null&&x(u,h,m,E);break;case he:I(f,u,h,m,g,C,E,w,v);break;default:O&1?K(f,u,h,m,g,C,E,w,v):O&6?k(f,u,h,m,g,C,E,w,v):(O&64||O&128)&&b.process(f,u,h,m,g,C,E,w,v,it)}S!=null&&g&&sn(S,f&&f.ref,C,u||f,!u)},U=(f,u,h,m)=>{if(f==null)s(u.el=l(u.children),h,m);else{const g=u.el=f.el;u.children!==f.children&&a(g,u.children)}},_=(f,u,h,m)=>{f==null?s(u.el=c(u.children||""),h,m):u.el=f.el},x=(f,u,h,m)=>{[f.el,f.anchor]=N(f.children,u,h,m,f.el,f.anchor)},$=({el:f,anchor:u},h,m)=>{let g;for(;f&&f!==u;)g=y(f),s(f,h,m),f=g;s(u,h,m)},P=({el:f,anchor:u})=>{let h;for(;f&&f!==u;)h=y(f),r(f),f=h;r(u)},K=(f,u,h,m,g,C,E,w,v)=>{E=E||u.type==="svg",f==null?J(u,h,m,g,C,E,w,v):D(f,u,g,C,E,w,v)},J=(f,u,h,m,g,C,E,w)=>{let v,b;const{type:S,props:O,shapeFlag:M,transition:H,dirs:W}=f;if(v=f.el=o(f.type,C,O&&O.is,O),M&8?d(v,f.children):M&16&&T(f.children,v,null,m,g,C&&S!=="foreignObject",E,w),W&&Fe(f,null,m,"created"),V(v,f,f.scopeId,E,m),O){for(const Z in O)Z!=="value"&&!Et(Z)&&i(v,Z,null,O[Z],C,f.children,m,g,Le);"value"in O&&i(v,"value",null,O.value),(b=O.onVnodeBeforeMount)&&we(b,m,f)}W&&Fe(f,null,m,"beforeMount");const G=(!g||g&&!g.pendingBranch)&&H&&!H.persisted;G&&H.beforeEnter(v),s(v,u,h),((b=O&&O.onVnodeMounted)||G||W)&&de(()=>{b&&we(b,m,f),G&&H.enter(v),W&&Fe(f,null,m,"mounted")},g)},V=(f,u,h,m,g)=>{if(h&&A(f,h),m)for(let C=0;C{for(let b=v;b{const w=u.el=f.el;let{patchFlag:v,dynamicChildren:b,dirs:S}=u;v|=f.patchFlag&16;const O=f.props||te,M=u.props||te;let H;h&&Je(h,!1),(H=M.onVnodeBeforeUpdate)&&we(H,h,u,f),S&&Fe(u,f,h,"beforeUpdate"),h&&Je(h,!0);const W=g&&u.type!=="foreignObject";if(b?B(f.dynamicChildren,b,w,h,m,W,C):E||Q(f,u,w,null,h,m,W,C,!1),v>0){if(v&16)z(w,u,O,M,h,m,g);else if(v&2&&O.class!==M.class&&i(w,"class",null,M.class,g),v&4&&i(w,"style",O.style,M.style,g),v&8){const G=u.dynamicProps;for(let Z=0;Z{H&&we(H,h,u,f),S&&Fe(u,f,h,"updated")},m)},B=(f,u,h,m,g,C,E)=>{for(let w=0;w{if(h!==m){if(h!==te)for(const w in h)!Et(w)&&!(w in m)&&i(f,w,h[w],null,E,u.children,g,C,Le);for(const w in m){if(Et(w))continue;const v=m[w],b=h[w];v!==b&&w!=="value"&&i(f,w,b,v,E,u.children,g,C,Le)}"value"in m&&i(f,"value",h.value,m.value)}},I=(f,u,h,m,g,C,E,w,v)=>{const b=u.el=f?f.el:l(""),S=u.anchor=f?f.anchor:l("");let{patchFlag:O,dynamicChildren:M,slotScopeIds:H}=u;H&&(w=w?w.concat(H):H),f==null?(s(b,h,m),s(S,h,m),T(u.children,h,S,g,C,E,w,v)):O>0&&O&64&&M&&f.dynamicChildren?(B(f.dynamicChildren,M,h,g,C,E,w),(u.key!=null||g&&u===g.subTree)&&ni(f,u,!0)):Q(f,u,h,S,g,C,E,w,v)},k=(f,u,h,m,g,C,E,w,v)=>{u.slotScopeIds=w,f==null?u.shapeFlag&512?g.ctx.activate(u,h,m,E,v):F(u,h,m,g,C,E,v):ge(f,u,v)},F=(f,u,h,m,g,C,E)=>{const w=f.component=wl(f,m,g);if(gn(f)&&(w.ctx.renderer=it),xl(w),w.asyncDep){if(g&&g.registerDep(w,X),!f.el){const v=w.subTree=le(be);_(null,v,u,h)}return}X(w,f,u,h,g,C,E)},ge=(f,u,h)=>{const m=u.component=f.component;if(Ro(f,u,h))if(m.asyncDep&&!m.asyncResolved){ne(m,u,h);return}else m.next=u,vo(m.update),m.update();else u.el=f.el,m.vnode=u},X=(f,u,h,m,g,C,E)=>{const w=()=>{if(f.isMounted){let{next:S,bu:O,u:M,parent:H,vnode:W}=f,G=S,Z;Je(f,!1),S?(S.el=W.el,ne(f,S,E)):S=W,O&&Cn(O),(Z=S.props&&S.props.onVnodeBeforeUpdate)&&we(Z,H,S,W),Je(f,!0);const re=vn(f),ve=f.subTree;f.subTree=re,R(ve,re,p(ve.el),Ht(ve),f,g,C),S.el=re.el,G===null&&Fo(f,re.el),M&&de(M,g),(Z=S.props&&S.props.onVnodeUpdated)&&de(()=>we(Z,H,S,W),g)}else{let S;const{el:O,props:M}=u,{bm:H,m:W,parent:G}=f,Z=ht(u);if(Je(f,!1),H&&Cn(H),!Z&&(S=M&&M.onVnodeBeforeMount)&&we(S,G,u),Je(f,!0),O&&xn){const re=()=>{f.subTree=vn(f),xn(O,f.subTree,f,g,null)};Z?u.type.__asyncLoader().then(()=>!f.isUnmounted&&re()):re()}else{const re=f.subTree=vn(f);R(null,re,h,m,f,g,C),u.el=re.el}if(W&&de(W,g),!Z&&(S=M&&M.onVnodeMounted)){const re=u;de(()=>we(S,G,re),g)}(u.shapeFlag&256||G&&ht(G.vnode)&&G.vnode.shapeFlag&256)&&f.a&&de(f.a,g),f.isMounted=!0,u=h=m=null}},v=f.effect=new Gn(w,()=>os(b),f.scope),b=f.update=()=>v.run();b.id=f.uid,Je(f,!0),b()},ne=(f,u,h)=>{u.component=f;const m=f.vnode.props;f.vnode=u,f.next=null,il(f,u.props,m,h),cl(f,u.children,h),bt(),As(),yt()},Q=(f,u,h,m,g,C,E,w,v=!1)=>{const b=f&&f.children,S=f?f.shapeFlag:0,O=u.children,{patchFlag:M,shapeFlag:H}=u;if(M>0){if(M&128){$t(b,O,h,m,g,C,E,w,v);return}else if(M&256){ze(b,O,h,m,g,C,E,w,v);return}}H&8?(S&16&&Le(b,g,C),O!==b&&d(h,O)):S&16?H&16?$t(b,O,h,m,g,C,E,w,v):Le(b,g,C,!0):(S&8&&d(h,""),H&16&&T(O,h,m,g,C,E,w,v))},ze=(f,u,h,m,g,C,E,w,v)=>{f=f||ft,u=u||ft;const b=f.length,S=u.length,O=Math.min(b,S);let M;for(M=0;MS?Le(f,g,C,!0,!1,O):T(u,h,m,g,C,E,w,v,O)},$t=(f,u,h,m,g,C,E,w,v)=>{let b=0;const S=u.length;let O=f.length-1,M=S-1;for(;b<=O&&b<=M;){const H=f[b],W=u[b]=v?Ke(u[b]):Te(u[b]);if(Ge(H,W))R(H,W,h,null,g,C,E,w,v);else break;b++}for(;b<=O&&b<=M;){const H=f[O],W=u[M]=v?Ke(u[M]):Te(u[M]);if(Ge(H,W))R(H,W,h,null,g,C,E,w,v);else break;O--,M--}if(b>O){if(b<=M){const H=M+1,W=HM)for(;b<=O;)Oe(f[b],g,C,!0),b++;else{const H=b,W=b,G=new Map;for(b=W;b<=M;b++){const me=u[b]=v?Ke(u[b]):Te(u[b]);me.key!=null&&G.set(me.key,b)}let Z,re=0;const ve=M-W+1;let ot=!1,gs=0;const wt=new Array(ve);for(b=0;b=ve){Oe(me,g,C,!0);continue}let Re;if(me.key!=null)Re=G.get(me.key);else for(Z=W;Z<=M;Z++)if(wt[Z-W]===0&&Ge(me,u[Z])){Re=Z;break}Re===void 0?Oe(me,g,C,!0):(wt[Re-W]=b+1,Re>=gs?gs=Re:ot=!0,R(me,u[Re],h,null,g,C,E,w,v),re++)}const ms=ot?dl(wt):ft;for(Z=ms.length-1,b=ve-1;b>=0;b--){const me=W+b,Re=u[me],_s=me+1{const{el:C,type:E,transition:w,children:v,shapeFlag:b}=f;if(b&6){Ye(f.component.subTree,u,h,m);return}if(b&128){f.suspense.move(u,h,m);return}if(b&64){E.move(f,u,h,it);return}if(E===he){s(C,u,h);for(let O=0;Ow.enter(C),g);else{const{leave:O,delayLeave:M,afterLeave:H}=w,W=()=>s(C,u,h),G=()=>{O(C,()=>{W(),H&&H()})};M?M(C,W,G):G()}else s(C,u,h)},Oe=(f,u,h,m=!1,g=!1)=>{const{type:C,props:E,ref:w,children:v,dynamicChildren:b,shapeFlag:S,patchFlag:O,dirs:M}=f;if(w!=null&&sn(w,null,h,f,!0),S&256){u.ctx.deactivate(f);return}const H=S&1&&M,W=!ht(f);let G;if(W&&(G=E&&E.onVnodeBeforeUnmount)&&we(G,u,f),S&6)vi(f.component,h,m);else{if(S&128){f.suspense.unmount(h,m);return}H&&Fe(f,null,u,"beforeUnmount"),S&64?f.type.remove(f,u,h,g,it,m):b&&(C!==he||O>0&&O&64)?Le(b,u,h,!1,!0):(C===he&&O&384||!g&&S&16)&&Le(v,u,h),m&&hs(f)}(W&&(G=E&&E.onVnodeUnmounted)||H)&&de(()=>{G&&we(G,u,f),H&&Fe(f,null,u,"unmounted")},h)},hs=f=>{const{type:u,el:h,anchor:m,transition:g}=f;if(u===he){Ci(h,m);return}if(u===Pt){P(f);return}const C=()=>{r(h),g&&!g.persisted&&g.afterLeave&&g.afterLeave()};if(f.shapeFlag&1&&g&&!g.persisted){const{leave:E,delayLeave:w}=g,v=()=>E(h,C);w?w(f.el,C,v):v()}else C()},Ci=(f,u)=>{let h;for(;f!==u;)h=y(f),r(f),f=h;r(u)},vi=(f,u,h)=>{const{bum:m,scope:g,update:C,subTree:E,um:w}=f;m&&Cn(m),g.stop(),C&&(C.active=!1,Oe(E,f,u,h)),w&&de(w,u),de(()=>{f.isUnmounted=!0},u),u&&u.pendingBranch&&!u.isUnmounted&&f.asyncDep&&!f.asyncResolved&&f.suspenseId===u.pendingId&&(u.deps--,u.deps===0&&u.resolve())},Le=(f,u,h,m=!1,g=!1,C=0)=>{for(let E=C;Ef.shapeFlag&6?Ht(f.component.subTree):f.shapeFlag&128?f.suspense.next():y(f.anchor||f.el),ps=(f,u,h)=>{f==null?u._vnode&&Oe(u._vnode,null,null,!0):R(u._vnode||null,f,u,null,null,null,h),As(),Gt(),u._vnode=f},it={p:R,um:Oe,m:Ye,r:hs,mt:F,mc:T,pc:Q,pbc:B,n:Ht,o:e};let wn,xn;return t&&([wn,xn]=t(it)),{render:ps,hydrate:wn,createApp:nl(ps,wn)}}function Je({effect:e,update:t},n){e.allowRecurse=t.allowRecurse=n}function ni(e,t,n=!1){const s=e.children,r=t.children;if(L(s)&&L(r))for(let i=0;i>1,e[n[l]]0&&(t[s]=n[i-1]),n[i]=s)}}for(i=n.length,o=n[i-1];i-- >0;)n[i]=o,o=t[o];return n}const hl=e=>e.__isTeleport,he=Symbol.for("v-fgt"),mt=Symbol.for("v-txt"),be=Symbol.for("v-cmt"),Pt=Symbol.for("v-stc"),It=[];let Pe=null;function si(e=!1){It.push(Pe=e?null:[])}function pl(){It.pop(),Pe=It[It.length-1]||null}let Mt=1;function Bs(e){Mt+=e}function ri(e){return e.dynamicChildren=Mt>0?Pe||ft:null,pl(),Mt>0&&Pe&&Pe.push(e),e}function Bc(e,t,n,s,r,i){return ri(li(e,t,n,s,r,i,!0))}function ii(e,t,n,s,r){return ri(le(e,t,n,s,r,!0))}function rn(e){return e?e.__v_isVNode===!0:!1}function Ge(e,t){return e.type===t.type&&e.key===t.key}const yn="__vInternal",oi=({key:e})=>e??null,Jt=({ref:e,ref_key:t,ref_for:n})=>(typeof e=="number"&&(e=""+e),e!=null?se(e)||fe(e)||j(e)?{i:ae,r:e,k:t,f:!!n}:e:null);function li(e,t=null,n=null,s=0,r=null,i=e===he?0:1,o=!1,l=!1){const c={__v_isVNode:!0,__v_skip:!0,type:e,props:t,key:t&&oi(t),ref:t&&Jt(t),scopeId:hn,slotScopeIds:null,children:n,component:null,suspense:null,ssContent:null,ssFallback:null,dirs:null,transition:null,el:null,anchor:null,target:null,targetAnchor:null,staticCount:0,shapeFlag:i,patchFlag:s,dynamicProps:r,dynamicChildren:null,appContext:null,ctx:ae};return l?(as(c,n),i&128&&e.normalize(c)):n&&(c.shapeFlag|=se(n)?8:16),Mt>0&&!o&&Pe&&(c.patchFlag>0||i&6)&&c.patchFlag!==32&&Pe.push(c),c}const le=gl;function gl(e,t=null,n=null,s=0,r=null,i=!1){if((!e||e===qr)&&(e=be),rn(e)){const l=qe(e,t,!0);return n&&as(l,n),Mt>0&&!i&&Pe&&(l.shapeFlag&6?Pe[Pe.indexOf(e)]=l:Pe.push(l)),l.patchFlag|=-2,l}if(Tl(e)&&(e=e.__vccOpts),t){t=ml(t);let{class:l,style:c}=t;l&&!se(l)&&(t.class=Zn(l)),ee(c)&&(Pr(c)&&!L(c)&&(c=ie({},c)),t.style=Xn(c))}const o=se(e)?1:So(e)?128:hl(e)?64:ee(e)?4:j(e)?2:0;return li(e,t,n,s,r,o,i,!0)}function ml(e){return e?Pr(e)||yn in e?ie({},e):e:null}function qe(e,t,n=!1){const{props:s,ref:r,patchFlag:i,children:o}=e,l=t?_l(s||{},t):s;return{__v_isVNode:!0,__v_skip:!0,type:e.type,props:l,key:l&&oi(l),ref:t&&t.ref?n&&r?L(r)?r.concat(Jt(t)):[r,Jt(t)]:Jt(t):r,scopeId:e.scopeId,slotScopeIds:e.slotScopeIds,children:o,target:e.target,targetAnchor:e.targetAnchor,staticCount:e.staticCount,shapeFlag:e.shapeFlag,patchFlag:t&&e.type!==he?i===-1?16:i|16:i,dynamicProps:e.dynamicProps,dynamicChildren:e.dynamicChildren,appContext:e.appContext,dirs:e.dirs,transition:e.transition,component:e.component,suspense:e.suspense,ssContent:e.ssContent&&qe(e.ssContent),ssFallback:e.ssFallback&&qe(e.ssFallback),el:e.el,anchor:e.anchor,ctx:e.ctx,ce:e.ce}}function ci(e=" ",t=0){return le(mt,null,e,t)}function Uc(e,t){const n=le(Pt,null,e);return n.staticCount=t,n}function jc(e="",t=!1){return t?(si(),ii(be,null,e)):le(be,null,e)}function Te(e){return e==null||typeof e=="boolean"?le(be):L(e)?le(he,null,e.slice()):typeof e=="object"?Ke(e):le(mt,null,String(e))}function Ke(e){return e.el===null&&e.patchFlag!==-1||e.memo?e:qe(e)}function as(e,t){let n=0;const{shapeFlag:s}=e;if(t==null)t=null;else if(L(t))n=16;else if(typeof t=="object")if(s&65){const r=t.default;r&&(r._c&&(r._d=!1),as(e,r()),r._c&&(r._d=!0));return}else{n=32;const r=t._;!r&&!(yn in t)?t._ctx=ae:r===3&&ae&&(ae.slots._===1?t._=1:(t._=2,e.patchFlag|=1024))}else j(t)?(t={default:t,_ctx:ae},n=32):(t=String(t),s&64?(n=16,t=[ci(t)]):n=8);e.children=t,e.shapeFlag|=n}function _l(...e){const t={};for(let n=0;noe||ae;let us,lt,Us="__VUE_INSTANCE_SETTERS__";(lt=Fn()[Us])||(lt=Fn()[Us]=[]),lt.push(e=>oe=e),us=e=>{lt.length>1?lt.forEach(t=>t(e)):lt[0](e)};const _t=e=>{us(e),e.scope.on()},st=()=>{oe&&oe.scope.off(),us(null)};function ai(e){return e.vnode.shapeFlag&4}let Lt=!1;function xl(e,t=!1){Lt=t;const{props:n,children:s}=e.vnode,r=ai(e);rl(e,n,r,t),ll(e,s);const i=r?Cl(e,t):void 0;return Lt=!1,i}function Cl(e,t){const n=e.type;e.accessCache=Object.create(null),e.proxy=Tt(new Proxy(e.ctx,Yo));const{setup:s}=n;if(s){const r=e.setupContext=s.length>1?di(e):null;_t(e),bt();const i=We(s,e,0,[e.props,r]);if(yt(),st(),ur(i)){if(i.then(st,st),t)return i.then(o=>{js(e,o,t)}).catch(o=>{un(o,e,0)});e.asyncDep=i}else js(e,i,t)}else ui(e,t)}function js(e,t,n){j(t)?e.type.__ssrInlineRender?e.ssrRender=t:e.render=t:ee(t)&&(e.setupState=Sr(t)),ui(e,n)}let Ds;function ui(e,t,n){const s=e.type;if(!e.render){if(!t&&Ds&&!s.render){const r=s.template||cs(e).template;if(r){const{isCustomElement:i,compilerOptions:o}=e.appContext.config,{delimiters:l,compilerOptions:c}=s,a=ie(ie({isCustomElement:i,delimiters:l},o),c);s.render=Ds(r,a)}}e.render=s.render||Ie}_t(e),bt(),Xo(e),yt(),st()}function vl(e){return e.attrsProxy||(e.attrsProxy=new Proxy(e.attrs,{get(t,n){return pe(e,"get","$attrs"),t[n]}}))}function di(e){const t=n=>{e.exposed=n||{}};return{get attrs(){return vl(e)},slots:e.slots,emit:e.emit,expose:t}}function ds(e){if(e.exposed)return e.exposeProxy||(e.exposeProxy=new Proxy(Sr(Tt(e.exposed)),{get(t,n){if(n in t)return t[n];if(n in At)return At[n](e)},has(t,n){return n in t||n in At}}))}function El(e,t=!0){return j(e)?e.displayName||e.name:e.name||t&&e.__name}function Tl(e){return j(e)&&"__vccOpts"in e}const Ee=(e,t)=>wo(e,t,Lt);function kn(e,t,n){const s=arguments.length;return s===2?ee(t)&&!L(t)?rn(t)?le(e,null,[t]):le(e,t):le(e,null,t):(s>3?n=Array.prototype.slice.call(arguments,2):s===3&&rn(n)&&(n=[n]),le(e,t,n))}const Al=Symbol.for("v-scx"),Pl=()=>pt(Al),Il="3.3.4",Ol="http://www.w3.org/2000/svg",et=typeof document<"u"?document:null,Ks=et&&et.createElement("template"),Rl={insert:(e,t,n)=>{t.insertBefore(e,n||null)},remove:e=>{const t=e.parentNode;t&&t.removeChild(e)},createElement:(e,t,n,s)=>{const r=t?et.createElementNS(Ol,e):et.createElement(e,n?{is:n}:void 0);return e==="select"&&s&&s.multiple!=null&&r.setAttribute("multiple",s.multiple),r},createText:e=>et.createTextNode(e),createComment:e=>et.createComment(e),setText:(e,t)=>{e.nodeValue=t},setElementText:(e,t)=>{e.textContent=t},parentNode:e=>e.parentNode,nextSibling:e=>e.nextSibling,querySelector:e=>et.querySelector(e),setScopeId(e,t){e.setAttribute(t,"")},insertStaticContent(e,t,n,s,r,i){const o=n?n.previousSibling:t.lastChild;if(r&&(r===i||r.nextSibling))for(;t.insertBefore(r.cloneNode(!0),n),!(r===i||!(r=r.nextSibling)););else{Ks.innerHTML=s?`${e}`:e;const l=Ks.content;if(s){const c=l.firstChild;for(;c.firstChild;)l.appendChild(c.firstChild);l.removeChild(c)}t.insertBefore(l,n)}return[o?o.nextSibling:t.firstChild,n?n.previousSibling:t.lastChild]}};function Fl(e,t,n){const s=e._vtc;s&&(t=(t?[t,...s]:[...s]).join(" ")),t==null?e.removeAttribute("class"):n?e.setAttribute("class",t):e.className=t}function Sl(e,t,n){const s=e.style,r=se(n);if(n&&!r){if(t&&!se(t))for(const i in t)n[i]==null&&Wn(s,i,"");for(const i in n)Wn(s,i,n[i])}else{const i=s.display;r?t!==n&&(s.cssText=n):t&&e.removeAttribute("style"),"_vod"in e&&(s.display=i)}}const ks=/\s*!important$/;function Wn(e,t,n){if(L(n))n.forEach(s=>Wn(e,t,s));else if(n==null&&(n=""),t.startsWith("--"))e.setProperty(t,n);else{const s=Ml(e,t);ks.test(n)?e.setProperty(rt(s),n.replace(ks,""),"important"):e[s]=n}}const Ws=["Webkit","Moz","ms"],An={};function Ml(e,t){const n=An[t];if(n)return n;let s=Me(t);if(s!=="filter"&&s in e)return An[t]=s;s=cn(s);for(let r=0;rPn||(jl.then(()=>Pn=0),Pn=Date.now());function Kl(e,t){const n=s=>{if(!s._vts)s._vts=Date.now();else if(s._vts<=n.attached)return;Ce(kl(s,n.value),t,5,[s])};return n.value=e,n.attached=Dl(),n}function kl(e,t){if(L(t)){const n=e.stopImmediatePropagation;return e.stopImmediatePropagation=()=>{n.call(e),e._stopped=!0},t.map(s=>r=>!r._stopped&&s&&s(r))}else return t}const zs=/^on[a-z]/,Wl=(e,t,n,s,r=!1,i,o,l,c)=>{t==="class"?Fl(e,s,r):t==="style"?Sl(e,n,s):Nt(t)?qn(t)||Bl(e,t,n,s,o):(t[0]==="."?(t=t.slice(1),!0):t[0]==="^"?(t=t.slice(1),!1):Vl(e,t,s,r))?Nl(e,t,s,i,o,l,c):(t==="true-value"?e._trueValue=s:t==="false-value"&&(e._falseValue=s),Ll(e,t,s,r))};function Vl(e,t,n,s){return s?!!(t==="innerHTML"||t==="textContent"||t in e&&zs.test(t)&&j(n)):t==="spellcheck"||t==="draggable"||t==="translate"||t==="form"||t==="list"&&e.tagName==="INPUT"||t==="type"&&e.tagName==="TEXTAREA"||zs.test(t)&&se(n)?!1:t in e}const je="transition",xt="animation",hi=(e,{slots:t})=>kn(Ho,ql(e),t);hi.displayName="Transition";const pi={name:String,type:String,css:{type:Boolean,default:!0},duration:[String,Number,Object],enterFromClass:String,enterActiveClass:String,enterToClass:String,appearFromClass:String,appearActiveClass:String,appearToClass:String,leaveFromClass:String,leaveActiveClass:String,leaveToClass:String};hi.props=ie({},jr,pi);const Xe=(e,t=[])=>{L(e)?e.forEach(n=>n(...t)):e&&e(...t)},Ys=e=>e?L(e)?e.some(t=>t.length>1):e.length>1:!1;function ql(e){const t={};for(const I in e)I in pi||(t[I]=e[I]);if(e.css===!1)return t;const{name:n="v",type:s,duration:r,enterFromClass:i=`${n}-enter-from`,enterActiveClass:o=`${n}-enter-active`,enterToClass:l=`${n}-enter-to`,appearFromClass:c=i,appearActiveClass:a=o,appearToClass:d=l,leaveFromClass:p=`${n}-leave-from`,leaveActiveClass:y=`${n}-leave-active`,leaveToClass:A=`${n}-leave-to`}=e,N=zl(r),R=N&&N[0],U=N&&N[1],{onBeforeEnter:_,onEnter:x,onEnterCancelled:$,onLeave:P,onLeaveCancelled:K,onBeforeAppear:J=_,onAppear:V=x,onAppearCancelled:T=$}=t,D=(I,k,F)=>{Ze(I,k?d:l),Ze(I,k?a:o),F&&F()},B=(I,k)=>{I._isLeaving=!1,Ze(I,p),Ze(I,A),Ze(I,y),k&&k()},z=I=>(k,F)=>{const ge=I?V:x,X=()=>D(k,I,F);Xe(ge,[k,X]),Js(()=>{Ze(k,I?c:i),De(k,I?d:l),Ys(ge)||Xs(k,s,R,X)})};return ie(t,{onBeforeEnter(I){Xe(_,[I]),De(I,i),De(I,o)},onBeforeAppear(I){Xe(J,[I]),De(I,c),De(I,a)},onEnter:z(!1),onAppear:z(!0),onLeave(I,k){I._isLeaving=!0;const F=()=>B(I,k);De(I,p),Xl(),De(I,y),Js(()=>{I._isLeaving&&(Ze(I,p),De(I,A),Ys(P)||Xs(I,s,U,F))}),Xe(P,[I,F])},onEnterCancelled(I){D(I,!1),Xe($,[I])},onAppearCancelled(I){D(I,!0),Xe(T,[I])},onLeaveCancelled(I){B(I),Xe(K,[I])}})}function zl(e){if(e==null)return null;if(ee(e))return[In(e.enter),In(e.leave)];{const t=In(e);return[t,t]}}function In(e){return Fi(e)}function De(e,t){t.split(/\s+/).forEach(n=>n&&e.classList.add(n)),(e._vtc||(e._vtc=new Set)).add(t)}function Ze(e,t){t.split(/\s+/).forEach(s=>s&&e.classList.remove(s));const{_vtc:n}=e;n&&(n.delete(t),n.size||(e._vtc=void 0))}function Js(e){requestAnimationFrame(()=>{requestAnimationFrame(e)})}let Yl=0;function Xs(e,t,n,s){const r=e._endId=++Yl,i=()=>{r===e._endId&&s()};if(n)return setTimeout(i,n);const{type:o,timeout:l,propCount:c}=Jl(e,t);if(!o)return s();const a=o+"end";let d=0;const p=()=>{e.removeEventListener(a,y),i()},y=A=>{A.target===e&&++d>=c&&p()};setTimeout(()=>{d(n[N]||"").split(", "),r=s(`${je}Delay`),i=s(`${je}Duration`),o=Zs(r,i),l=s(`${xt}Delay`),c=s(`${xt}Duration`),a=Zs(l,c);let d=null,p=0,y=0;t===je?o>0&&(d=je,p=o,y=i.length):t===xt?a>0&&(d=xt,p=a,y=c.length):(p=Math.max(o,a),d=p>0?o>a?je:xt:null,y=d?d===je?i.length:c.length:0);const A=d===je&&/\b(transform|all)(,|$)/.test(s(`${je}Property`).toString());return{type:d,timeout:p,propCount:y,hasTransform:A}}function Zs(e,t){for(;e.lengthQs(n)+Qs(e[s])))}function Qs(e){return Number(e.slice(0,-1).replace(",","."))*1e3}function Xl(){return document.body.offsetHeight}const Zl=["ctrl","shift","alt","meta"],Ql={stop:e=>e.stopPropagation(),prevent:e=>e.preventDefault(),self:e=>e.target!==e.currentTarget,ctrl:e=>!e.ctrlKey,shift:e=>!e.shiftKey,alt:e=>!e.altKey,meta:e=>!e.metaKey,left:e=>"button"in e&&e.button!==0,middle:e=>"button"in e&&e.button!==1,right:e=>"button"in e&&e.button!==2,exact:(e,t)=>Zl.some(n=>e[`${n}Key`]&&!t.includes(n))},Dc=(e,t)=>(n,...s)=>{for(let r=0;rn=>{if(!("key"in n))return;const s=rt(n.key);if(t.some(r=>r===s||Gl[r]===s))return e(n)},ec=ie({patchProp:Wl},Rl);let On,Gs=!1;function tc(){return On=Gs?On:al(ec),Gs=!0,On}const kc=(...e)=>{const t=tc().createApp(...e),{mount:n}=t;return t.mount=s=>{const r=nc(s);if(r)return n(r,!0,r instanceof SVGElement)},t};function nc(e){return se(e)?document.querySelector(e):e}const Wc=(e,t)=>{const n=e.__vccOpts||e;for(const[s,r]of t)n[s]=r;return n},sc="modulepreload",rc=function(e){return"/learn-wgpu-zh/"+e},er={},Vc=function(t,n,s){if(!n||n.length===0)return t();const r=document.getElementsByTagName("link");return Promise.all(n.map(i=>{if(i=rc(i),i in er)return;er[i]=!0;const o=i.endsWith(".css"),l=o?'[rel="stylesheet"]':"";if(!!s)for(let d=r.length-1;d>=0;d--){const p=r[d];if(p.href===i&&(!o||p.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${i}"]${l}`))return;const a=document.createElement("link");if(a.rel=o?"stylesheet":sc,o||(a.as="script",a.crossOrigin=""),a.href=i,document.head.appendChild(a),o)return new Promise((d,p)=>{a.addEventListener("load",d),a.addEventListener("error",()=>p(new Error(`Unable to preload CSS for ${i}`)))})})).then(()=>t()).catch(i=>{const o=new Event("vite:preloadError",{cancelable:!0});if(o.payload=i,window.dispatchEvent(o),!o.defaultPrevented)throw i})},ic=window.__VP_SITE_DATA__,gi=/^[a-z]+:/i,qc=/^pathname:\/\//,zc="vitepress-theme-appearance",mi=/#.*$/,oc=/(index)?\.(md|html)$/,xe=typeof document<"u",_i={relativePath:"",filePath:"",title:"404",description:"Not Found",headers:[],frontmatter:{sidebar:!1,layout:"page"},lastUpdated:0,isNotFound:!0};function lc(e,t,n=!1){if(t===void 0)return!1;if(e=tr(`/${e}`),n)return new RegExp(t).test(e);if(tr(t)!==e)return!1;const s=t.match(mi);return s?(xe?location.hash:"")===s[0]:!0}function tr(e){return decodeURI(e).replace(mi,"").replace(oc,"")}function cc(e){return gi.test(e)}function fc(e,t){var s,r,i,o,l,c,a;const n=Object.keys(e.locales).find(d=>d!=="root"&&!cc(d)&&lc(t,`/${d}/`,!0))||"root";return Object.assign({},e,{localeIndex:n,lang:((s=e.locales[n])==null?void 0:s.lang)??e.lang,dir:((r=e.locales[n])==null?void 0:r.dir)??e.dir,title:((i=e.locales[n])==null?void 0:i.title)??e.title,titleTemplate:((o=e.locales[n])==null?void 0:o.titleTemplate)??e.titleTemplate,description:((l=e.locales[n])==null?void 0:l.description)??e.description,head:yi(e.head,((c=e.locales[n])==null?void 0:c.head)??[]),themeConfig:{...e.themeConfig,...(a=e.locales[n])==null?void 0:a.themeConfig}})}function bi(e,t){const n=t.title||e.title,s=t.titleTemplate??e.titleTemplate;if(typeof s=="string"&&s.includes(":title"))return s.replace(/:title/g,n);const r=ac(e.title,s);return`${n}${r}`}function ac(e,t){return t===!1?"":t===!0||t===void 0?` | ${e}`:e===t?"":` | ${t}`}function uc(e,t){const[n,s]=t;if(n!=="meta")return!1;const r=Object.entries(s)[0];return r==null?!1:e.some(([i,o])=>i===n&&o[r[0]]===r[1])}function yi(e,t){return[...e.filter(n=>!uc(t,n)),...t]}const dc=/[\u0000-\u001F"#$&*+,:;<=>?[\]^`{|}\u007F]/g,hc=/^[a-z]:/i;function nr(e){const t=hc.exec(e),n=t?t[0]:"";return n+e.slice(n.length).replace(dc,"_").replace(/(^|\/)_+(?=[^/]*$)/,"$1")}const pc=Symbol(),tt=go(ic);function Yc(e){const t=Ee(()=>fc(tt.value,e.data.relativePath));return{site:t,theme:Ee(()=>t.value.themeConfig),page:Ee(()=>e.data),frontmatter:Ee(()=>e.data.frontmatter),params:Ee(()=>e.data.params),lang:Ee(()=>t.value.lang),dir:Ee(()=>t.value.dir),localeIndex:Ee(()=>t.value.localeIndex||"root"),title:Ee(()=>bi(t.value,e.data)),description:Ee(()=>e.data.description||t.value.description),isDark:Rr(!1)}}function gc(){const e=pt(pc);if(!e)throw new Error("vitepress data not properly injected in app");return e}function mc(e,t){return`${e}${t}`.replace(/\/+/g,"/")}function sr(e){return gi.test(e)||e.startsWith(".")?e:mc(tt.value.base,e)}function _c(e){let t=e.replace(/\.html$/,"");if(t=decodeURIComponent(t),t=t.replace(/\/$/,"/index"),xe){const n="/learn-wgpu-zh/";t=nr(t.slice(n.length).replace(/\//g,"_")||"index")+".md";let s=__VP_HASH_MAP__[t.toLowerCase()];if(s||(t=t.endsWith("_index.md")?t.slice(0,-9)+".md":t.slice(0,-3)+"_index.md",s=__VP_HASH_MAP__[t.toLowerCase()]),!s)return null;t=`${n}assets/${t}.${s}.js`}else t=`./${nr(t.slice(1).replace(/\//g,"_"))}.md.js`;return t}let Xt=[];function Jc(e){Xt.push(e),bn(()=>{Xt=Xt.filter(t=>t!==e)})}const bc=Symbol(),rr="http://a.com",yc=()=>({path:"/",component:null,data:_i});function Xc(e,t){const n=an(yc()),s={route:n,go:r};async function r(l=xe?location.href:"/"){var a,d;if(await((a=s.onBeforeRouteChange)==null?void 0:a.call(s,l))===!1)return;const c=new URL(l,rr);tt.value.cleanUrls||!c.pathname.endsWith("/")&&!c.pathname.endsWith(".html")&&(c.pathname+=".html",l=c.pathname+c.search+c.hash),lr(l),await o(l),await((d=s.onAfterRouteChanged)==null?void 0:d.call(s,l))}let i=null;async function o(l,c=0,a=!1){var y;if(await((y=s.onBeforePageLoad)==null?void 0:y.call(s,l))===!1)return;const d=new URL(l,rr),p=i=d.pathname;try{let A=await e(p);if(!A)throw new Error(`Page not found: ${p}`);if(i===p){i=null;const{default:N,__pageData:R}=A;if(!N)throw new Error(`Invalid route component: ${N}`);n.path=xe?p:sr(p),n.component=Tt(N),n.data=Tt(R),xe&&Lr(()=>{let U=tt.value.base+R.relativePath.replace(/(?:(^|\/)index)?\.md$/,"$1");if(!tt.value.cleanUrls&&!U.endsWith("/")&&(U+=".html"),U!==d.pathname&&(d.pathname=U,l=U+d.search+d.hash,history.replaceState(null,"",l)),d.hash&&!c){let _=null;try{_=document.getElementById(decodeURIComponent(d.hash).slice(1))}catch(x){console.warn(x)}if(_){ir(_,d.hash);return}}window.scrollTo(0,c)})}}catch(A){if(!/fetch|Page not found/.test(A.message)&&!/^\/404(\.html|\/)?$/.test(l)&&console.error(A),!a)try{const N=await fetch(tt.value.base+"hashmap.json");window.__VP_HASH_MAP__=await N.json(),await o(l,c,!0);return}catch{}i===p&&(i=null,n.path=xe?p:sr(p),n.component=t?Tt(t):null,n.data=_i)}}return xe&&(window.addEventListener("click",l=>{if(l.target.closest("button"))return;const a=l.target.closest("a");if(a&&!a.closest(".vp-raw")&&(a instanceof SVGElement||!a.download)){const{target:d}=a,{href:p,origin:y,pathname:A,hash:N,search:R}=new URL(a.href instanceof SVGAnimatedString?a.href.animVal:a.href,a.baseURI),U=window.location,_=A.match(/\.\w+$/);!l.ctrlKey&&!l.shiftKey&&!l.altKey&&!l.metaKey&&!d&&y===U.origin&&!(_&&_[0]!==".html")&&(l.preventDefault(),A===U.pathname&&R===U.search?(N!==U.hash&&(history.pushState(null,"",N),window.dispatchEvent(new Event("hashchange"))),N?ir(a,N,a.classList.contains("header-anchor")):(lr(p),window.scrollTo(0,0))):r(p))}},{capture:!0}),window.addEventListener("popstate",l=>{o(location.href,l.state&&l.state.scrollPosition||0)}),window.addEventListener("hashchange",l=>{l.preventDefault()})),s}function wc(){const e=pt(bc);if(!e)throw new Error("useRouter() is called without provider.");return e}function wi(){return wc().route}function ir(e,t,n=!1){let s=null;try{s=e.classList.contains("header-anchor")?e:document.getElementById(decodeURIComponent(t).slice(1))}catch(r){console.warn(r)}if(s){let a=function(){!n||Math.abs(c-window.scrollY)>window.innerHeight?window.scrollTo(0,c):window.scrollTo({left:0,top:c,behavior:"smooth"})},r=tt.value.scrollOffset,i=0,o=24;if(typeof r=="object"&&"padding"in r&&(o=r.padding,r=r.selector),typeof r=="number")i=r;else if(typeof r=="string")i=or(r,o);else if(Array.isArray(r))for(const d of r){const p=or(d,o);if(p){i=p;break}}const l=parseInt(window.getComputedStyle(s).paddingTop,10),c=window.scrollY+s.getBoundingClientRect().top-i+l;requestAnimationFrame(a)}}function or(e,t){const n=document.querySelector(e);if(!n)return 0;const s=n.getBoundingClientRect().bottom;return s<0?0:s+t}function lr(e){xe&&e!==location.href&&(history.replaceState({scrollPosition:window.scrollY},document.title),history.pushState(null,"",e))}const cr=()=>Xt.forEach(e=>e()),Zc=kr({name:"VitePressContent",props:{as:{type:[Object,String],default:"div"}},setup(e){const t=wi(),{site:n}=gc();return()=>kn(e.as,n.value.contentProps??{style:{position:"relative"}},[t.component?kn(t.component,{onVnodeMounted:cr,onVnodeUpdated:cr}):"404 Page Not Found"])}}),Qc=kr({setup(e,{slots:t}){const n=Rr(!1);return _n(()=>{n.value=!0}),()=>n.value&&t.default?t.default():null}});function Gc(){xe&&window.addEventListener("click",e=>{var n;const t=e.target;if(t.matches(".vp-code-group input")){const s=(n=t.parentElement)==null?void 0:n.parentElement;if(!s)return;const r=Array.from(s.querySelectorAll("input")).indexOf(t);if(r<0)return;const i=s.querySelector(".blocks");if(!i)return;const o=Array.from(i.children).find(a=>a.classList.contains("active"));if(!o)return;const l=i.children[r];if(!l||o===l)return;o.classList.remove("active"),l.classList.add("active");const c=s==null?void 0:s.querySelector(`label[for="${t.id}"]`);c==null||c.scrollIntoView({block:"nearest"})}})}function ef(){if(xe){const e=new WeakMap;window.addEventListener("click",t=>{var s;const n=t.target;if(n.matches('div[class*="language-"] > button.copy')){const r=n.parentElement,i=(s=n.nextElementSibling)==null?void 0:s.nextElementSibling;if(!r||!i)return;const o=/language-(shellscript|shell|bash|sh|zsh)/.test(r.className);let l="";i.querySelectorAll("span.line:not(.diff.remove)").forEach(c=>l+=(c.textContent||"")+` -`),l=l.slice(0,-1),o&&(l=l.replace(/^ *(\$|>) /gm,"").trim()),xc(l).then(()=>{n.classList.add("copied"),clearTimeout(e.get(n));const c=setTimeout(()=>{n.classList.remove("copied"),n.blur(),e.delete(n)},2e3);e.set(n,c)})}})}}async function xc(e){try{return navigator.clipboard.writeText(e)}catch{const t=document.createElement("textarea"),n=document.activeElement;t.value=e,t.setAttribute("readonly",""),t.style.contain="strict",t.style.position="absolute",t.style.left="-9999px",t.style.fontSize="12pt";const s=document.getSelection(),r=s?s.rangeCount>0&&s.getRangeAt(0):null;document.body.appendChild(t),t.select(),t.selectionStart=0,t.selectionEnd=e.length,document.execCommand("copy"),document.body.removeChild(t),r&&(s.removeAllRanges(),s.addRange(r)),n&&n.focus()}}function tf(e,t){let n=[],s=!0;const r=i=>{if(s){s=!1;return}n.forEach(o=>document.head.removeChild(o)),n=[],i.forEach(o=>{const l=fr(o);document.head.appendChild(l),n.push(l)})};Mo(()=>{const i=e.data,o=t.value,l=i&&i.description,c=i&&i.frontmatter.head||[];document.title=bi(o,i);const a=l||o.description;let d=document.querySelector("meta[name=description]");d?d.setAttribute("content",a):fr(["meta",{name:"description",content:a}]),r(yi(o.head,vc(c)))})}function fr([e,t,n]){const s=document.createElement(e);for(const r in t)s.setAttribute(r,t[r]);return n&&(s.innerHTML=n),s}function Cc(e){return e[0]==="meta"&&e[1]&&e[1].name==="description"}function vc(e){return e.filter(t=>!Cc(t))}const Rn=new Set,xi=()=>document.createElement("link"),Ec=e=>{const t=xi();t.rel="prefetch",t.href=e,document.head.appendChild(t)},Tc=e=>{const t=new XMLHttpRequest;t.open("GET",e,t.withCredentials=!0),t.send()};let qt;const Ac=xe&&(qt=xi())&&qt.relList&&qt.relList.supports&&qt.relList.supports("prefetch")?Ec:Tc;function nf(){if(!xe||!window.IntersectionObserver)return;let e;if((e=navigator.connection)&&(e.saveData||/2g/.test(e.effectiveType)))return;const t=window.requestIdleCallback||setTimeout;let n=null;const s=()=>{n&&n.disconnect(),n=new IntersectionObserver(i=>{i.forEach(o=>{if(o.isIntersecting){const l=o.target;n.unobserve(l);const{pathname:c}=l;if(!Rn.has(c)){Rn.add(c);const a=_c(c);a&&Ac(a)}}})}),t(()=>{document.querySelectorAll("#app a").forEach(i=>{const{hostname:o,pathname:l}=new URL(i.href instanceof SVGAnimatedString?i.href.animVal:i.href,i.baseURI),c=l.match(/\.\w+$/);c&&c[0]!==".html"||i.target!=="_blank"&&o===location.hostname&&(l!==location.pathname?n.observe(i):Rn.add(l))})})};_n(s);const r=wi();Yt(()=>r.path,s),bn(()=>{n&&n.disconnect()})}export{Hc as $,xe as A,bn as B,Fc as C,ko as D,Sc as E,he as F,Lc as G,go as H,Jc as I,le as J,Mc as K,gi as L,wi as M,_l as N,pt as O,qc as P,Xn as Q,Lr as R,Uc as S,hi as T,zc as U,Ar as V,sl as W,Kc as X,$c as Y,Dc as Z,Wc as _,ci as a,Vc as a0,tf as a1,bc as a2,Yc as a3,pc as a4,Zc as a5,Qc as a6,tt as a7,kc as a8,Xc as a9,_c as aa,nf as ab,ef as ac,Gc as ad,kn as ae,ii as b,Bc as c,kr as d,jc as e,sr as f,Ee as g,Rr as h,cc as i,_n as j,li as k,_o as l,Rc as m,Zn as n,si as o,Oc as p,ji as q,Nc as r,Ic as s,Pc as t,gc as u,Yt as v,Po as w,Mo as x,fi as y,lc as z}; diff --git a/assets/chunks/framework.bMtwhlie.js b/assets/chunks/framework.bMtwhlie.js new file mode 100644 index 000000000..a6b2ce769 --- /dev/null +++ b/assets/chunks/framework.bMtwhlie.js @@ -0,0 +1,2 @@ +function ci(e,t){const n=Object.create(null),i=e.split(",");for(let s=0;s!!n[s.toLowerCase()]:s=>!!n[s]}const te={},ht=[],Ie=()=>{},er=()=>!1,Kt=e=>e.charCodeAt(0)===111&&e.charCodeAt(1)===110&&(e.charCodeAt(2)>122||e.charCodeAt(2)<97),fi=e=>e.startsWith("onUpdate:"),re=Object.assign,ui=(e,t)=>{const n=e.indexOf(t);n>-1&&e.splice(n,1)},tr=Object.prototype.hasOwnProperty,X=(e,t)=>tr.call(e,t),$=Array.isArray,gt=e=>yn(e)==="[object Map]",Ps=e=>yn(e)==="[object Set]",K=e=>typeof e=="function",ie=e=>typeof e=="string",Et=e=>typeof e=="symbol",ee=e=>e!==null&&typeof e=="object",Ms=e=>(ee(e)||K(e))&&K(e.then)&&K(e.catch),Is=Object.prototype.toString,yn=e=>Is.call(e),nr=e=>yn(e).slice(8,-1),Fs=e=>yn(e)==="[object Object]",pi=e=>ie(e)&&e!=="NaN"&&e[0]!=="-"&&""+parseInt(e,10)===e,Mt=ci(",key,ref,ref_for,ref_key,onVnodeBeforeMount,onVnodeMounted,onVnodeBeforeUpdate,onVnodeUpdated,onVnodeBeforeUnmount,onVnodeUnmounted"),bn=e=>{const t=Object.create(null);return n=>t[n]||(t[n]=e(n))},ir=/-(\w)/g,Fe=bn(e=>e.replace(ir,(t,n)=>n?n.toUpperCase():"")),sr=/\B([A-Z])/g,ft=bn(e=>e.replace(sr,"-$1").toLowerCase()),_n=bn(e=>e.charAt(0).toUpperCase()+e.slice(1)),rn=bn(e=>e?`on${_n(e)}`:""),at=(e,t)=>!Object.is(e,t),kn=(e,t)=>{for(let n=0;n{Object.defineProperty(e,t,{configurable:!0,enumerable:!1,value:n})},or=e=>{const t=parseFloat(e);return isNaN(t)?e:t},rr=e=>{const t=ie(e)?Number(e):NaN;return isNaN(t)?e:t};let Hi;const Jn=()=>Hi||(Hi=typeof globalThis<"u"?globalThis:typeof self<"u"?self:typeof window<"u"?window:typeof global<"u"?global:{});function di(e){if($(e)){const t={};for(let n=0;n{if(n){const i=n.split(ar);i.length>1&&(t[i[0].trim()]=i[1].trim())}}),t}function mi(e){let t="";if(ie(e))t=e;else if($(e))for(let n=0;nie(e)?e:e==null?"":$(e)||ee(e)&&(e.toString===Is||!K(e.toString))?JSON.stringify(e,Ns,2):String(e),Ns=(e,t)=>t&&t.__v_isRef?Ns(e,t.value):gt(t)?{[`Map(${t.size})`]:[...t.entries()].reduce((n,[i,s],o)=>(n[Hn(i,o)+" =>"]=s,n),{})}:Ps(t)?{[`Set(${t.size})`]:[...t.values()].map(n=>Hn(n))}:Et(t)?Hn(t):ee(t)&&!$(t)&&!Fs(t)?String(t):t,Hn=(e,t="")=>{var n;return Et(e)?`Symbol(${(n=e.description)!=null?n:t})`:e};let ye;class dr{constructor(t=!1){this.detached=t,this._active=!0,this.effects=[],this.cleanups=[],this.parent=ye,!t&&ye&&(this.index=(ye.scopes||(ye.scopes=[])).push(this)-1)}get active(){return this._active}run(t){if(this._active){const n=ye;try{return ye=this,t()}finally{ye=n}}}on(){ye=this}off(){ye=this.parent}stop(t){if(this._active){let n,i;for(n=0,i=this.effects.length;n{const t=new Set(e);return t.w=0,t.n=0,t},Hs=e=>(e.w&Xe)>0,$s=e=>(e.n&Xe)>0,gr=({deps:e})=>{if(e.length)for(let t=0;t{const{deps:t}=e;if(t.length){let n=0;for(let i=0;i{(p==="length"||!Et(p)&&p>=a)&&l.push(f)})}else switch(n!==void 0&&l.push(r.get(n)),t){case"add":$(e)?pi(n)&&l.push(r.get("length")):(l.push(r.get(ot)),gt(e)&&l.push(r.get(Qn)));break;case"delete":$(e)||(l.push(r.get(ot)),gt(e)&&l.push(r.get(Qn)));break;case"set":gt(e)&&l.push(r.get(ot));break}if(l.length===1)l[0]&&Zn(l[0]);else{const a=[];for(const f of l)f&&a.push(...f);Zn(hi(a))}}function Zn(e,t){const n=$(e)?e:[...e];for(const i of n)i.computed&&Di(i);for(const i of n)i.computed||Di(i)}function Di(e,t){(e!==je||e.allowRecurse)&&(e.scheduler?e.scheduler():e.run())}function vr(e,t){var n;return(n=fn.get(e))==null?void 0:n.get(t)}const yr=ci("__proto__,__v_isRef,__isVue"),Us=new Set(Object.getOwnPropertyNames(Symbol).filter(e=>e!=="arguments"&&e!=="caller").map(e=>Symbol[e]).filter(Et)),Bi=br();function br(){const e={};return["includes","indexOf","lastIndexOf"].forEach(t=>{e[t]=function(...n){const i=Q(this);for(let o=0,r=this.length;o{e[t]=function(...n){Tt();const i=Q(this)[t].apply(this,n);return At(),i}}),e}function _r(e){const t=Q(this);return xe(t,"has",e),t.hasOwnProperty(e)}class Ks{constructor(t=!1,n=!1){this._isReadonly=t,this._shallow=n}get(t,n,i){const s=this._isReadonly,o=this._shallow;if(n==="__v_isReactive")return!s;if(n==="__v_isReadonly")return s;if(n==="__v_isShallow")return o;if(n==="__v_raw")return i===(s?o?Fr:zs:o?Vs:qs).get(t)||Object.getPrototypeOf(t)===Object.getPrototypeOf(i)?t:void 0;const r=$(t);if(!s){if(r&&X(Bi,n))return Reflect.get(Bi,n,i);if(n==="hasOwnProperty")return _r}const l=Reflect.get(t,n,i);return(Et(n)?Us.has(n):yr(n))||(s||xe(t,"get",n),o)?l:pe(l)?r&&pi(n)?l:l.value:ee(l)?s?En(l):Cn(l):l}}class Ws extends Ks{constructor(t=!1){super(!1,t)}set(t,n,i,s){let o=t[n];if(!this._shallow){const a=_t(o);if(!un(i)&&!_t(i)&&(o=Q(o),i=Q(i)),!$(t)&&pe(o)&&!pe(i))return a?!1:(o.value=i,!0)}const r=$(t)&&pi(n)?Number(n)e,wn=e=>Reflect.getPrototypeOf(e);function Vt(e,t,n=!1,i=!1){e=e.__v_raw;const s=Q(e),o=Q(t);n||(at(t,o)&&xe(s,"get",t),xe(s,"get",o));const{has:r}=wn(s),l=i?xi:n?bi:kt;if(r.call(s,t))return l(e.get(t));if(r.call(s,o))return l(e.get(o));e!==s&&e.get(t)}function zt(e,t=!1){const n=this.__v_raw,i=Q(n),s=Q(e);return t||(at(e,s)&&xe(i,"has",e),xe(i,"has",s)),e===s?n.has(e):n.has(e)||n.has(s)}function Yt(e,t=!1){return e=e.__v_raw,!t&&xe(Q(e),"iterate",ot),Reflect.get(e,"size",e)}function Ui(e){e=Q(e);const t=Q(this);return wn(t).has.call(t,e)||(t.add(e),He(t,"add",e,e)),this}function Ki(e,t){t=Q(t);const n=Q(this),{has:i,get:s}=wn(n);let o=i.call(n,e);o||(e=Q(e),o=i.call(n,e));const r=s.call(n,e);return n.set(e,t),o?at(t,r)&&He(n,"set",e,t):He(n,"add",e,t),this}function Wi(e){const t=Q(this),{has:n,get:i}=wn(t);let s=n.call(t,e);s||(e=Q(e),s=n.call(t,e)),i&&i.call(t,e);const o=t.delete(e);return s&&He(t,"delete",e,void 0),o}function qi(){const e=Q(this),t=e.size!==0,n=e.clear();return t&&He(e,"clear",void 0,void 0),n}function Jt(e,t){return function(i,s){const o=this,r=o.__v_raw,l=Q(r),a=t?xi:e?bi:kt;return!e&&xe(l,"iterate",ot),r.forEach((f,p)=>i.call(s,a(f),a(p),o))}}function Xt(e,t,n){return function(...i){const s=this.__v_raw,o=Q(s),r=gt(o),l=e==="entries"||e===Symbol.iterator&&r,a=e==="keys"&&r,f=s[e](...i),p=n?xi:t?bi:kt;return!t&&xe(o,"iterate",a?Qn:ot),{next(){const{value:d,done:x}=f.next();return x?{value:d,done:x}:{value:l?[p(d[0]),p(d[1])]:p(d),done:x}},[Symbol.iterator](){return this}}}}function De(e){return function(...t){return e==="delete"?!1:e==="clear"?void 0:this}}function Ar(){const e={get(o){return Vt(this,o)},get size(){return Yt(this)},has:zt,add:Ui,set:Ki,delete:Wi,clear:qi,forEach:Jt(!1,!1)},t={get(o){return Vt(this,o,!1,!0)},get size(){return Yt(this)},has:zt,add:Ui,set:Ki,delete:Wi,clear:qi,forEach:Jt(!1,!0)},n={get(o){return Vt(this,o,!0)},get size(){return Yt(this,!0)},has(o){return zt.call(this,o,!0)},add:De("add"),set:De("set"),delete:De("delete"),clear:De("clear"),forEach:Jt(!0,!1)},i={get(o){return Vt(this,o,!0,!0)},get size(){return Yt(this,!0)},has(o){return zt.call(this,o,!0)},add:De("add"),set:De("set"),delete:De("delete"),clear:De("clear"),forEach:Jt(!0,!0)};return["keys","values","entries",Symbol.iterator].forEach(o=>{e[o]=Xt(o,!1,!1),n[o]=Xt(o,!0,!1),t[o]=Xt(o,!1,!0),i[o]=Xt(o,!0,!0)}),[e,n,t,i]}const[jr,Sr,Or,Rr]=Ar();function vi(e,t){const n=t?e?Rr:Or:e?Sr:jr;return(i,s,o)=>s==="__v_isReactive"?!e:s==="__v_isReadonly"?e:s==="__v_raw"?i:Reflect.get(X(n,s)&&s in i?n:i,s,o)}const Pr={get:vi(!1,!1)},Mr={get:vi(!1,!0)},Ir={get:vi(!0,!1)},qs=new WeakMap,Vs=new WeakMap,zs=new WeakMap,Fr=new WeakMap;function Lr(e){switch(e){case"Object":case"Array":return 1;case"Map":case"Set":case"WeakMap":case"WeakSet":return 2;default:return 0}}function Nr(e){return e.__v_skip||!Object.isExtensible(e)?0:Lr(nr(e))}function Cn(e){return _t(e)?e:yi(e,!1,Cr,Pr,qs)}function kr(e){return yi(e,!1,Tr,Mr,Vs)}function En(e){return yi(e,!0,Er,Ir,zs)}function yi(e,t,n,i,s){if(!ee(e)||e.__v_raw&&!(t&&e.__v_isReactive))return e;const o=s.get(e);if(o)return o;const r=Nr(e);if(r===0)return e;const l=new Proxy(e,r===2?i:n);return s.set(e,l),l}function xt(e){return _t(e)?xt(e.__v_raw):!!(e&&e.__v_isReactive)}function _t(e){return!!(e&&e.__v_isReadonly)}function un(e){return!!(e&&e.__v_isShallow)}function Ys(e){return xt(e)||_t(e)}function Q(e){const t=e&&e.__v_raw;return t?Q(t):e}function It(e){return cn(e,"__v_skip",!0),e}const kt=e=>ee(e)?Cn(e):e,bi=e=>ee(e)?En(e):e;function _i(e){Ve&&je&&(e=Q(e),Bs(e.dep||(e.dep=hi())))}function wi(e,t){e=Q(e);const n=e.dep;n&&Zn(n)}function pe(e){return!!(e&&e.__v_isRef===!0)}function ge(e){return Xs(e,!1)}function Js(e){return Xs(e,!0)}function Xs(e,t){return pe(e)?e:new Hr(e,t)}class Hr{constructor(t,n){this.__v_isShallow=n,this.dep=void 0,this.__v_isRef=!0,this._rawValue=n?t:Q(t),this._value=n?t:kt(t)}get value(){return _i(this),this._value}set value(t){const n=this.__v_isShallow||un(t)||_t(t);t=n?t:Q(t),at(t,this._rawValue)&&(this._rawValue=t,this._value=n?t:kt(t),wi(this))}}function Ci(e){return pe(e)?e.value:e}const $r={get:(e,t,n)=>Ci(Reflect.get(e,t,n)),set:(e,t,n,i)=>{const s=e[t];return pe(s)&&!pe(n)?(s.value=n,!0):Reflect.set(e,t,n,i)}};function Qs(e){return xt(e)?e:new Proxy(e,$r)}class Dr{constructor(t){this.dep=void 0,this.__v_isRef=!0;const{get:n,set:i}=t(()=>_i(this),()=>wi(this));this._get=n,this._set=i}get value(){return this._get()}set value(t){this._set(t)}}function Br(e){return new Dr(e)}class Ur{constructor(t,n,i){this._object=t,this._key=n,this._defaultValue=i,this.__v_isRef=!0}get value(){const t=this._object[this._key];return t===void 0?this._defaultValue:t}set value(t){this._object[this._key]=t}get dep(){return vr(Q(this._object),this._key)}}class Kr{constructor(t){this._getter=t,this.__v_isRef=!0,this.__v_isReadonly=!0}get value(){return this._getter()}}function Wr(e,t,n){return pe(e)?e:K(e)?new Kr(e):ee(e)&&arguments.length>1?qr(e,t,n):ge(e)}function qr(e,t,n){const i=e[t];return pe(i)?i:new Ur(e,t,n)}class Vr{constructor(t,n,i,s){this._setter=n,this.dep=void 0,this.__v_isRef=!0,this.__v_isReadonly=!1,this._dirty=!0,this.effect=new gi(t,()=>{this._dirty||(this._dirty=!0,wi(this))}),this.effect.computed=this,this.effect.active=this._cacheable=!s,this.__v_isReadonly=i}get value(){const t=Q(this);return _i(t),(t._dirty||!t._cacheable)&&(t._dirty=!1,t._value=t.effect.run()),t._value}set value(t){this._setter(t)}}function zr(e,t,n=!1){let i,s;const o=K(e);return o?(i=e,s=Ie):(i=e.get,s=e.set),new Vr(i,s,o||!s,n)}function ze(e,t,n,i){let s;try{s=i?e(...i):e()}catch(o){Tn(o,t,n)}return s}function Ee(e,t,n,i){if(K(e)){const o=ze(e,t,n,i);return o&&Ms(o)&&o.catch(r=>{Tn(r,t,n)}),o}const s=[];for(let o=0;o>>1,s=fe[i],o=$t(s);oMe&&fe.splice(t,1)}function Qr(e){$(e)?vt.push(...e):(!ke||!ke.includes(e,e.allowRecurse?nt+1:nt))&&vt.push(e),Gs()}function Vi(e,t,n=Ht?Me+1:0){for(;n$t(n)-$t(i)),nt=0;nte.id==null?1/0:e.id,Zr=(e,t)=>{const n=$t(e)-$t(t);if(n===0){if(e.pre&&!t.pre)return-1;if(t.pre&&!e.pre)return 1}return n};function eo(e){Gn=!1,Ht=!0,fe.sort(Zr);try{for(Me=0;Meie(E)?E.trim():E)),d&&(s=n.map(or))}let l,a=i[l=rn(t)]||i[l=rn(Fe(t))];!a&&o&&(a=i[l=rn(ft(t))]),a&&Ee(a,e,6,s);const f=i[l+"Once"];if(f){if(!e.emitted)e.emitted={};else if(e.emitted[l])return;e.emitted[l]=!0,Ee(f,e,6,s)}}function to(e,t,n=!1){const i=t.emitsCache,s=i.get(e);if(s!==void 0)return s;const o=e.emits;let r={},l=!1;if(!K(e)){const a=f=>{const p=to(f,t,!0);p&&(l=!0,re(r,p))};!n&&t.mixins.length&&t.mixins.forEach(a),e.extends&&a(e.extends),e.mixins&&e.mixins.forEach(a)}return!o&&!l?(ee(e)&&i.set(e,null),null):($(o)?o.forEach(a=>r[a]=null):re(r,o),ee(e)&&i.set(e,r),r)}function jn(e,t){return!e||!Kt(t)?!1:(t=t.slice(2).replace(/Once$/,""),X(e,t[0].toLowerCase()+t.slice(1))||X(e,ft(t))||X(e,t))}let ue=null,Sn=null;function dn(e){const t=ue;return ue=e,Sn=e&&e.type.__scopeId||null,t}function Ec(e){Sn=e}function Tc(){Sn=null}function el(e,t=ue,n){if(!t||e._n)return e;const i=(...s)=>{i._d&&ss(-1);const o=dn(t);let r;try{r=e(...s)}finally{dn(o),i._d&&ss(1)}return r};return i._n=!0,i._c=!0,i._d=!0,i}function $n(e){const{type:t,vnode:n,proxy:i,withProxy:s,props:o,propsOptions:[r],slots:l,attrs:a,emit:f,render:p,renderCache:d,data:x,setupState:E,ctx:I,inheritAttrs:R}=e;let H,q;const V=dn(e);try{if(n.shapeFlag&4){const m=s||i,P=m;H=Ae(p.call(P,m,d,o,E,x,I)),q=a}else{const m=t;H=Ae(m.length>1?m(o,{attrs:a,slots:l,emit:f}):m(o,null)),q=t.props?a:tl(a)}}catch(m){Nt.length=0,Tn(m,e,1),H=ce(be)}let g=H;if(q&&R!==!1){const m=Object.keys(q),{shapeFlag:P}=g;m.length&&P&7&&(r&&m.some(fi)&&(q=nl(q,r)),g=Qe(g,q))}return n.dirs&&(g=Qe(g),g.dirs=g.dirs?g.dirs.concat(n.dirs):n.dirs),n.transition&&(g.transition=n.transition),H=g,dn(V),H}const tl=e=>{let t;for(const n in e)(n==="class"||n==="style"||Kt(n))&&((t||(t={}))[n]=e[n]);return t},nl=(e,t)=>{const n={};for(const i in e)(!fi(i)||!(i.slice(9)in t))&&(n[i]=e[i]);return n};function il(e,t,n){const{props:i,children:s,component:o}=e,{props:r,children:l,patchFlag:a}=t,f=o.emitsOptions;if(t.dirs||t.transition)return!0;if(n&&a>=0){if(a&1024)return!0;if(a&16)return i?zi(i,r,f):!!r;if(a&8){const p=t.dynamicProps;for(let d=0;de.__isSuspense;function so(e,t){t&&t.pendingBranch?$(e)?t.effects.push(...e):t.effects.push(e):Qr(e)}function oo(e,t){return On(e,null,t)}function Sc(e,t){return On(e,null,{flush:"post"})}const Qt={};function Ye(e,t,n){return On(e,t,n)}function On(e,t,{immediate:n,deep:i,flush:s,onTrack:o,onTrigger:r}=te){var l;const a=ks()===((l=le)==null?void 0:l.scope)?le:null;let f,p=!1,d=!1;if(pe(e)?(f=()=>e.value,p=un(e)):xt(e)?(f=()=>e,i=!0):$(e)?(d=!0,p=e.some(m=>xt(m)||un(m)),f=()=>e.map(m=>{if(pe(m))return m.value;if(xt(m))return mt(m);if(K(m))return ze(m,a,2)})):K(e)?t?f=()=>ze(e,a,2):f=()=>{if(!(a&&a.isUnmounted))return x&&x(),Ee(e,a,3,[E])}:f=Ie,t&&i){const m=f;f=()=>mt(m())}let x,E=m=>{x=V.onStop=()=>{ze(m,a,4),x=V.onStop=void 0}},I;if(Bt)if(E=Ie,t?n&&Ee(t,a,3,[f(),d?[]:void 0,E]):f(),s==="sync"){const m=Gl();I=m.__watcherHandles||(m.__watcherHandles=[])}else return Ie;let R=d?new Array(e.length).fill(Qt):Qt;const H=()=>{if(V.active)if(t){const m=V.run();(i||p||(d?m.some((P,U)=>at(P,R[U])):at(m,R)))&&(x&&x(),Ee(t,a,3,[m,R===Qt?void 0:d&&R[0]===Qt?[]:R,E]),R=m)}else V.run()};H.allowRecurse=!!t;let q;s==="sync"?q=H:s==="post"?q=()=>me(H,a&&a.suspense):(H.pre=!0,a&&(H.id=a.uid),q=()=>Ti(H));const V=new gi(f,q);t?n?H():R=V.run():s==="post"?me(V.run.bind(V),a&&a.suspense):V.run();const g=()=>{V.stop(),a&&a.scope&&ui(a.scope.effects,V)};return I&&I.push(g),g}function rl(e,t,n){const i=this.proxy,s=ie(e)?e.includes(".")?ro(i,e):()=>i[e]:e.bind(i,i);let o;K(t)?o=t:(o=t.handler,n=t);const r=le;Ct(this);const l=On(s,o.bind(i),n);return r?Ct(r):rt(),l}function ro(e,t){const n=t.split(".");return()=>{let i=e;for(let s=0;s{mt(n,t)});else if(Fs(e))for(const n in e)mt(e[n],t);return e}function Pe(e,t,n,i){const s=e.dirs,o=t&&t.dirs;for(let r=0;r{e.isMounted=!0}),po(()=>{e.isUnmounting=!0}),e}const _e=[Function,Array],lo={mode:String,appear:Boolean,persisted:Boolean,onBeforeEnter:_e,onEnter:_e,onAfterEnter:_e,onEnterCancelled:_e,onBeforeLeave:_e,onLeave:_e,onAfterLeave:_e,onLeaveCancelled:_e,onBeforeAppear:_e,onAppear:_e,onAfterAppear:_e,onAppearCancelled:_e},al={name:"BaseTransition",props:lo,setup(e,{slots:t}){const n=Fn(),i=ll();let s;return()=>{const o=t.default&&co(t.default(),!0);if(!o||!o.length)return;let r=o[0];if(o.length>1){for(const R of o)if(R.type!==be){r=R;break}}const l=Q(e),{mode:a}=l;if(i.isLeaving)return Dn(r);const f=Ji(r);if(!f)return Dn(r);const p=ei(f,l,i,n);ti(f,p);const d=n.subTree,x=d&&Ji(d);let E=!1;const{getTransitionKey:I}=f.type;if(I){const R=I();s===void 0?s=R:R!==s&&(s=R,E=!0)}if(x&&x.type!==be&&(!it(f,x)||E)){const R=ei(x,l,i,n);if(ti(x,R),a==="out-in")return i.isLeaving=!0,R.afterLeave=()=>{i.isLeaving=!1,n.update.active!==!1&&n.update()},Dn(r);a==="in-out"&&f.type!==be&&(R.delayLeave=(H,q,V)=>{const g=ao(i,x);g[String(x.key)]=x,H[We]=()=>{q(),H[We]=void 0,delete p.delayedLeave},p.delayedLeave=V})}return r}}},cl=al;function ao(e,t){const{leavingVNodes:n}=e;let i=n.get(t.type);return i||(i=Object.create(null),n.set(t.type,i)),i}function ei(e,t,n,i){const{appear:s,mode:o,persisted:r=!1,onBeforeEnter:l,onEnter:a,onAfterEnter:f,onEnterCancelled:p,onBeforeLeave:d,onLeave:x,onAfterLeave:E,onLeaveCancelled:I,onBeforeAppear:R,onAppear:H,onAfterAppear:q,onAppearCancelled:V}=t,g=String(e.key),m=ao(n,e),P=(O,T)=>{O&&Ee(O,i,9,T)},U=(O,T)=>{const A=T[1];P(O,T),$(O)?O.every(W=>W.length<=1)&&A():O.length<=1&&A()},D={mode:o,persisted:r,beforeEnter(O){let T=l;if(!n.isMounted)if(s)T=R||l;else return;O[We]&&O[We](!0);const A=m[g];A&&it(e,A)&&A.el[We]&&A.el[We](),P(T,[O])},enter(O){let T=a,A=f,W=p;if(!n.isMounted)if(s)T=H||a,A=q||f,W=V||p;else return;let j=!1;const z=O[Zt]=oe=>{j||(j=!0,oe?P(W,[O]):P(A,[O]),D.delayedLeave&&D.delayedLeave(),O[Zt]=void 0)};T?U(T,[O,z]):z()},leave(O,T){const A=String(e.key);if(O[Zt]&&O[Zt](!0),n.isUnmounting)return T();P(d,[O]);let W=!1;const j=O[We]=z=>{W||(W=!0,T(),z?P(I,[O]):P(E,[O]),O[We]=void 0,m[A]===e&&delete m[A])};m[A]=e,x?U(x,[O,j]):j()},clone(O){return ei(O,t,n,i)}};return D}function Dn(e){if(Rn(e))return e=Qe(e),e.children=null,e}function Ji(e){return Rn(e)?e.children?e.children[0]:void 0:e}function ti(e,t){e.shapeFlag&6&&e.component?ti(e.component.subTree,t):e.shapeFlag&128?(e.ssContent.transition=t.clone(e.ssContent),e.ssFallback.transition=t.clone(e.ssFallback)):e.transition=t}function co(e,t=!1,n){let i=[],s=0;for(let o=0;o1)for(let o=0;o!!e.type.__asyncLoader,Rn=e=>e.type.__isKeepAlive;function fl(e,t){uo(e,"a",t)}function ul(e,t){uo(e,"da",t)}function uo(e,t,n=le){const i=e.__wdc||(e.__wdc=()=>{let s=n;for(;s;){if(s.isDeactivated)return;s=s.parent}return e()});if(Pn(t,i,n),n){let s=n.parent;for(;s&&s.parent;)Rn(s.parent.vnode)&&pl(i,t,n,s),s=s.parent}}function pl(e,t,n,i){const s=Pn(t,e,i,!0);Mn(()=>{ui(i[t],s)},n)}function Pn(e,t,n=le,i=!1){if(n){const s=n[e]||(n[e]=[]),o=t.__weh||(t.__weh=(...r)=>{if(n.isUnmounted)return;Tt(),Ct(n);const l=Ee(t,n,e,r);return rt(),At(),l});return i?s.unshift(o):s.push(o),o}}const $e=e=>(t,n=le)=>(!Bt||e==="sp")&&Pn(e,(...i)=>t(...i),n),dl=$e("bm"),jt=$e("m"),ml=$e("bu"),hl=$e("u"),po=$e("bum"),Mn=$e("um"),gl=$e("sp"),xl=$e("rtg"),vl=$e("rtc");function yl(e,t=le){Pn("ec",e,t)}function Oc(e,t,n,i){let s;const o=n&&n[i];if($(e)||ie(e)){s=new Array(e.length);for(let r=0,l=e.length;rt(r,l,void 0,o&&o[l]));else{const r=Object.keys(e);s=new Array(r.length);for(let l=0,a=r.length;lxn(t)?!(t.type===be||t.type===he&&!mo(t.children)):!0)?e:null}function Pc(e,t){const n={};for(const i in e)n[t&&/[A-Z]/.test(i)?`on:${i}`:rn(i)]=e[i];return n}const ni=e=>e?Ro(e)?Pi(e)||e.proxy:ni(e.parent):null,Ft=re(Object.create(null),{$:e=>e,$el:e=>e.vnode.el,$data:e=>e.data,$props:e=>e.props,$attrs:e=>e.attrs,$slots:e=>e.slots,$refs:e=>e.refs,$parent:e=>ni(e.parent),$root:e=>ni(e.root),$emit:e=>e.emit,$options:e=>ji(e),$forceUpdate:e=>e.f||(e.f=()=>Ti(e.update)),$nextTick:e=>e.n||(e.n=An.bind(e.proxy)),$watch:e=>rl.bind(e)}),Bn=(e,t)=>e!==te&&!e.__isScriptSetup&&X(e,t),bl={get({_:e},t){const{ctx:n,setupState:i,data:s,props:o,accessCache:r,type:l,appContext:a}=e;let f;if(t[0]!=="$"){const E=r[t];if(E!==void 0)switch(E){case 1:return i[t];case 2:return s[t];case 4:return n[t];case 3:return o[t]}else{if(Bn(i,t))return r[t]=1,i[t];if(s!==te&&X(s,t))return r[t]=2,s[t];if((f=e.propsOptions[0])&&X(f,t))return r[t]=3,o[t];if(n!==te&&X(n,t))return r[t]=4,n[t];ii&&(r[t]=0)}}const p=Ft[t];let d,x;if(p)return t==="$attrs"&&xe(e,"get",t),p(e);if((d=l.__cssModules)&&(d=d[t]))return d;if(n!==te&&X(n,t))return r[t]=4,n[t];if(x=a.config.globalProperties,X(x,t))return x[t]},set({_:e},t,n){const{data:i,setupState:s,ctx:o}=e;return Bn(s,t)?(s[t]=n,!0):i!==te&&X(i,t)?(i[t]=n,!0):X(e.props,t)||t[0]==="$"&&t.slice(1)in e?!1:(o[t]=n,!0)},has({_:{data:e,setupState:t,accessCache:n,ctx:i,appContext:s,propsOptions:o}},r){let l;return!!n[r]||e!==te&&X(e,r)||Bn(t,r)||(l=o[0])&&X(l,r)||X(i,r)||X(Ft,r)||X(s.config.globalProperties,r)},defineProperty(e,t,n){return n.get!=null?e._.accessCache[t]=0:X(n,"value")&&this.set(e,t,n.value,null),Reflect.defineProperty(e,t,n)}};function Mc(){return _l().slots}function _l(){const e=Fn();return e.setupContext||(e.setupContext=Mo(e))}function Xi(e){return $(e)?e.reduce((t,n)=>(t[n]=null,t),{}):e}let ii=!0;function wl(e){const t=ji(e),n=e.proxy,i=e.ctx;ii=!1,t.beforeCreate&&Qi(t.beforeCreate,e,"bc");const{data:s,computed:o,methods:r,watch:l,provide:a,inject:f,created:p,beforeMount:d,mounted:x,beforeUpdate:E,updated:I,activated:R,deactivated:H,beforeDestroy:q,beforeUnmount:V,destroyed:g,unmounted:m,render:P,renderTracked:U,renderTriggered:D,errorCaptured:O,serverPrefetch:T,expose:A,inheritAttrs:W,components:j,directives:z,filters:oe}=t;if(f&&Cl(f,i,null),r)for(const J in r){const F=r[J];K(F)&&(i[J]=F.bind(n))}if(s){const J=s.call(n,n);ee(J)&&(e.data=Cn(J))}if(ii=!0,o)for(const J in o){const F=o[J],Le=K(F)?F.bind(n,n):K(F.get)?F.get.bind(n,n):Ie,Wt=!K(F)&&K(F.set)?F.set.bind(n):Ie,Ze=ne({get:Le,set:Wt});Object.defineProperty(i,J,{enumerable:!0,configurable:!0,get:()=>Ze.value,set:Oe=>Ze.value=Oe})}if(l)for(const J in l)ho(l[J],i,n,J);if(a){const J=K(a)?a.call(n):a;Reflect.ownKeys(J).forEach(F=>{Ol(F,J[F])})}p&&Qi(p,e,"c");function k(J,F){$(F)?F.forEach(Le=>J(Le.bind(n))):F&&J(F.bind(n))}if(k(dl,d),k(jt,x),k(ml,E),k(hl,I),k(fl,R),k(ul,H),k(yl,O),k(vl,U),k(xl,D),k(po,V),k(Mn,m),k(gl,T),$(A))if(A.length){const J=e.exposed||(e.exposed={});A.forEach(F=>{Object.defineProperty(J,F,{get:()=>n[F],set:Le=>n[F]=Le})})}else e.exposed||(e.exposed={});P&&e.render===Ie&&(e.render=P),W!=null&&(e.inheritAttrs=W),j&&(e.components=j),z&&(e.directives=z)}function Cl(e,t,n=Ie){$(e)&&(e=si(e));for(const i in e){const s=e[i];let o;ee(s)?"default"in s?o=bt(s.from||i,s.default,!0):o=bt(s.from||i):o=bt(s),pe(o)?Object.defineProperty(t,i,{enumerable:!0,configurable:!0,get:()=>o.value,set:r=>o.value=r}):t[i]=o}}function Qi(e,t,n){Ee($(e)?e.map(i=>i.bind(t.proxy)):e.bind(t.proxy),t,n)}function ho(e,t,n,i){const s=i.includes(".")?ro(n,i):()=>n[i];if(ie(e)){const o=t[e];K(o)&&Ye(s,o)}else if(K(e))Ye(s,e.bind(n));else if(ee(e))if($(e))e.forEach(o=>ho(o,t,n,i));else{const o=K(e.handler)?e.handler.bind(n):t[e.handler];K(o)&&Ye(s,o,e)}}function ji(e){const t=e.type,{mixins:n,extends:i}=t,{mixins:s,optionsCache:o,config:{optionMergeStrategies:r}}=e.appContext,l=o.get(t);let a;return l?a=l:!s.length&&!n&&!i?a=t:(a={},s.length&&s.forEach(f=>mn(a,f,r,!0)),mn(a,t,r)),ee(t)&&o.set(t,a),a}function mn(e,t,n,i=!1){const{mixins:s,extends:o}=t;o&&mn(e,o,n,!0),s&&s.forEach(r=>mn(e,r,n,!0));for(const r in t)if(!(i&&r==="expose")){const l=El[r]||n&&n[r];e[r]=l?l(e[r],t[r]):t[r]}return e}const El={data:Zi,props:Gi,emits:Gi,methods:Pt,computed:Pt,beforeCreate:de,created:de,beforeMount:de,mounted:de,beforeUpdate:de,updated:de,beforeDestroy:de,beforeUnmount:de,destroyed:de,unmounted:de,activated:de,deactivated:de,errorCaptured:de,serverPrefetch:de,components:Pt,directives:Pt,watch:Al,provide:Zi,inject:Tl};function Zi(e,t){return t?e?function(){return re(K(e)?e.call(this,this):e,K(t)?t.call(this,this):t)}:t:e}function Tl(e,t){return Pt(si(e),si(t))}function si(e){if($(e)){const t={};for(let n=0;n1)return n&&K(t)?t.call(i&&i.proxy):t}}function Rl(e,t,n,i=!1){const s={},o={};cn(o,In,1),e.propsDefaults=Object.create(null),xo(e,t,s,o);for(const r in e.propsOptions[0])r in s||(s[r]=void 0);n?e.props=i?s:kr(s):e.type.props?e.props=s:e.props=o,e.attrs=o}function Pl(e,t,n,i){const{props:s,attrs:o,vnode:{patchFlag:r}}=e,l=Q(s),[a]=e.propsOptions;let f=!1;if((i||r>0)&&!(r&16)){if(r&8){const p=e.vnode.dynamicProps;for(let d=0;d{a=!0;const[x,E]=vo(d,t,!0);re(r,x),E&&l.push(...E)};!n&&t.mixins.length&&t.mixins.forEach(p),e.extends&&p(e.extends),e.mixins&&e.mixins.forEach(p)}if(!o&&!a)return ee(e)&&i.set(e,ht),ht;if($(o))for(let p=0;p-1,E[1]=R<0||I-1||X(E,"default"))&&l.push(d)}}}const f=[r,l];return ee(e)&&i.set(e,f),f}function es(e){return e[0]!=="$"}function ts(e){const t=e&&e.toString().match(/^\s*(function|class) (\w+)/);return t?t[2]:e===null?"null":""}function ns(e,t){return ts(e)===ts(t)}function is(e,t){return $(t)?t.findIndex(n=>ns(n,e)):K(t)&&ns(t,e)?0:-1}const yo=e=>e[0]==="_"||e==="$stable",Si=e=>$(e)?e.map(Ae):[Ae(e)],Ml=(e,t,n)=>{if(t._n)return t;const i=el((...s)=>Si(t(...s)),n);return i._c=!1,i},bo=(e,t,n)=>{const i=e._ctx;for(const s in e){if(yo(s))continue;const o=e[s];if(K(o))t[s]=Ml(s,o,i);else if(o!=null){const r=Si(o);t[s]=()=>r}}},_o=(e,t)=>{const n=Si(t);e.slots.default=()=>n},Il=(e,t)=>{if(e.vnode.shapeFlag&32){const n=t._;n?(e.slots=Q(t),cn(t,"_",n)):bo(t,e.slots={})}else e.slots={},t&&_o(e,t);cn(e.slots,In,1)},Fl=(e,t,n)=>{const{vnode:i,slots:s}=e;let o=!0,r=te;if(i.shapeFlag&32){const l=t._;l?n&&l===1?o=!1:(re(s,t),!n&&l===1&&delete s._):(o=!t.$stable,bo(t,s)),r=t}else t&&(_o(e,t),r={default:1});if(o)for(const l in s)!yo(l)&&r[l]==null&&delete s[l]};function gn(e,t,n,i,s=!1){if($(e)){e.forEach((x,E)=>gn(x,t&&($(t)?t[E]:t),n,i,s));return}if(yt(i)&&!s)return;const o=i.shapeFlag&4?Pi(i.component)||i.component.proxy:i.el,r=s?null:o,{i:l,r:a}=e,f=t&&t.r,p=l.refs===te?l.refs={}:l.refs,d=l.setupState;if(f!=null&&f!==a&&(ie(f)?(p[f]=null,X(d,f)&&(d[f]=null)):pe(f)&&(f.value=null)),K(a))ze(a,l,12,[r,p]);else{const x=ie(a),E=pe(a);if(x||E){const I=()=>{if(e.f){const R=x?X(d,a)?d[a]:p[a]:a.value;s?$(R)&&ui(R,o):$(R)?R.includes(o)||R.push(o):x?(p[a]=[o],X(d,a)&&(d[a]=p[a])):(a.value=[o],e.k&&(p[e.k]=a.value))}else x?(p[a]=r,X(d,a)&&(d[a]=r)):E&&(a.value=r,e.k&&(p[e.k]=r))};r?(I.id=-1,me(I,n)):I()}}}let Be=!1;const Gt=e=>/svg/.test(e.namespaceURI)&&e.tagName!=="foreignObject",en=e=>e.nodeType===8;function Ll(e){const{mt:t,p:n,o:{patchProp:i,createText:s,nextSibling:o,parentNode:r,remove:l,insert:a,createComment:f}}=e,p=(g,m)=>{if(!m.hasChildNodes()){n(null,g,m),pn(),m._vnode=g;return}Be=!1,d(m.firstChild,g,null,null,null),pn(),m._vnode=g,Be&&console.error("Hydration completed but contains mismatches.")},d=(g,m,P,U,D,O=!1)=>{const T=en(g)&&g.data==="[",A=()=>R(g,m,P,U,D,T),{type:W,ref:j,shapeFlag:z,patchFlag:oe}=m;let ae=g.nodeType;m.el=g,oe===-2&&(O=!1,m.dynamicChildren=null);let k=null;switch(W){case wt:ae!==3?m.children===""?(a(m.el=s(""),r(g),g),k=g):k=A():(g.data!==m.children&&(Be=!0,g.data=m.children),k=o(g));break;case be:V(g)?(k=o(g),q(m.el=g.content.firstChild,g,P)):ae!==8||T?k=A():k=o(g);break;case Lt:if(T&&(g=o(g),ae=g.nodeType),ae===1||ae===3){k=g;const J=!m.children.length;for(let F=0;F{O=O||!!m.dynamicChildren;const{type:T,props:A,patchFlag:W,shapeFlag:j,dirs:z,transition:oe}=m,ae=T==="input"||T==="option";if(ae||W!==-1){z&&Pe(m,null,P,"created");let k=!1;if(V(g)){k=wo(U,oe)&&P&&P.vnode.props&&P.vnode.props.appear;const F=g.content.firstChild;k&&oe.beforeEnter(F),q(F,g,P),m.el=g=F}if(A)if(ae||!O||W&48)for(const F in A)(ae&&(F.endsWith("value")||F==="indeterminate")||Kt(F)&&!Mt(F)||F[0]===".")&&i(g,F,null,A[F],!1,void 0,P);else A.onClick&&i(g,"onClick",null,A.onClick,!1,void 0,P);let J;if((J=A&&A.onVnodeBeforeMount)&&we(J,P,m),z&&Pe(m,null,P,"beforeMount"),((J=A&&A.onVnodeMounted)||z||k)&&so(()=>{J&&we(J,P,m),k&&oe.enter(g),z&&Pe(m,null,P,"mounted")},U),j&16&&!(A&&(A.innerHTML||A.textContent))){let F=E(g.firstChild,m,g,P,U,D,O);for(;F;){Be=!0;const Le=F;F=F.nextSibling,l(Le)}}else j&8&&g.textContent!==m.children&&(Be=!0,g.textContent=m.children)}return g.nextSibling},E=(g,m,P,U,D,O,T)=>{T=T||!!m.dynamicChildren;const A=m.children,W=A.length;for(let j=0;j{const{slotScopeIds:T}=m;T&&(D=D?D.concat(T):T);const A=r(g),W=E(o(g),m,A,P,U,D,O);return W&&en(W)&&W.data==="]"?o(m.anchor=W):(Be=!0,a(m.anchor=f("]"),A,W),W)},R=(g,m,P,U,D,O)=>{if(Be=!0,m.el=null,O){const W=H(g);for(;;){const j=o(g);if(j&&j!==W)l(j);else break}}const T=o(g),A=r(g);return l(g),n(null,m,A,T,P,U,Gt(A),D),T},H=(g,m="[",P="]")=>{let U=0;for(;g;)if(g=o(g),g&&en(g)&&(g.data===m&&U++,g.data===P)){if(U===0)return o(g);U--}return g},q=(g,m,P)=>{const U=m.parentNode;U&&U.replaceChild(g,m);let D=P;for(;D;)D.vnode.el===m&&(D.vnode.el=D.subTree.el=g),D=D.parent},V=g=>g.nodeType===1&&g.tagName.toLowerCase()==="template";return[p,d]}const me=so;function Nl(e){return kl(e,Ll)}function kl(e,t){const n=Jn();n.__VUE__=!0;const{insert:i,remove:s,patchProp:o,createElement:r,createText:l,createComment:a,setText:f,setElementText:p,parentNode:d,nextSibling:x,setScopeId:E=Ie,insertStaticContent:I}=e,R=(c,u,h,v=null,y=null,w=null,S=!1,_=null,C=!!u.dynamicChildren)=>{if(c===u)return;c&&!it(c,u)&&(v=qt(c),Oe(c,y,w,!0),c=null),u.patchFlag===-2&&(C=!1,u.dynamicChildren=null);const{type:b,ref:L,shapeFlag:M}=u;switch(b){case wt:H(c,u,h,v);break;case be:q(c,u,h,v);break;case Lt:c==null&&V(u,h,v,S);break;case he:j(c,u,h,v,y,w,S,_,C);break;default:M&1?P(c,u,h,v,y,w,S,_,C):M&6?z(c,u,h,v,y,w,S,_,C):(M&64||M&128)&&b.process(c,u,h,v,y,w,S,_,C,ut)}L!=null&&y&&gn(L,c&&c.ref,w,u||c,!u)},H=(c,u,h,v)=>{if(c==null)i(u.el=l(u.children),h,v);else{const y=u.el=c.el;u.children!==c.children&&f(y,u.children)}},q=(c,u,h,v)=>{c==null?i(u.el=a(u.children||""),h,v):u.el=c.el},V=(c,u,h,v)=>{[c.el,c.anchor]=I(c.children,u,h,v,c.el,c.anchor)},g=({el:c,anchor:u},h,v)=>{let y;for(;c&&c!==u;)y=x(c),i(c,h,v),c=y;i(u,h,v)},m=({el:c,anchor:u})=>{let h;for(;c&&c!==u;)h=x(c),s(c),c=h;s(u)},P=(c,u,h,v,y,w,S,_,C)=>{S=S||u.type==="svg",c==null?U(u,h,v,y,w,S,_,C):T(c,u,y,w,S,_,C)},U=(c,u,h,v,y,w,S,_)=>{let C,b;const{type:L,props:M,shapeFlag:N,transition:B,dirs:Y}=c;if(C=c.el=r(c.type,w,M&&M.is,M),N&8?p(C,c.children):N&16&&O(c.children,C,null,v,y,w&&L!=="foreignObject",S,_),Y&&Pe(c,null,v,"created"),D(C,c,c.scopeId,S,v),M){for(const Z in M)Z!=="value"&&!Mt(Z)&&o(C,Z,null,M[Z],w,c.children,v,y,Ne);"value"in M&&o(C,"value",null,M.value),(b=M.onVnodeBeforeMount)&&we(b,v,c)}Y&&Pe(c,null,v,"beforeMount");const G=wo(y,B);G&&B.beforeEnter(C),i(C,u,h),((b=M&&M.onVnodeMounted)||G||Y)&&me(()=>{b&&we(b,v,c),G&&B.enter(C),Y&&Pe(c,null,v,"mounted")},y)},D=(c,u,h,v,y)=>{if(h&&E(c,h),v)for(let w=0;w{for(let b=C;b{const _=u.el=c.el;let{patchFlag:C,dynamicChildren:b,dirs:L}=u;C|=c.patchFlag&16;const M=c.props||te,N=u.props||te;let B;h&&Ge(h,!1),(B=N.onVnodeBeforeUpdate)&&we(B,h,u,c),L&&Pe(u,c,h,"beforeUpdate"),h&&Ge(h,!0);const Y=y&&u.type!=="foreignObject";if(b?A(c.dynamicChildren,b,_,h,v,Y,w):S||F(c,u,_,null,h,v,Y,w,!1),C>0){if(C&16)W(_,u,M,N,h,v,y);else if(C&2&&M.class!==N.class&&o(_,"class",null,N.class,y),C&4&&o(_,"style",M.style,N.style,y),C&8){const G=u.dynamicProps;for(let Z=0;Z{B&&we(B,h,u,c),L&&Pe(u,c,h,"updated")},v)},A=(c,u,h,v,y,w,S)=>{for(let _=0;_{if(h!==v){if(h!==te)for(const _ in h)!Mt(_)&&!(_ in v)&&o(c,_,h[_],null,S,u.children,y,w,Ne);for(const _ in v){if(Mt(_))continue;const C=v[_],b=h[_];C!==b&&_!=="value"&&o(c,_,b,C,S,u.children,y,w,Ne)}"value"in v&&o(c,"value",h.value,v.value)}},j=(c,u,h,v,y,w,S,_,C)=>{const b=u.el=c?c.el:l(""),L=u.anchor=c?c.anchor:l("");let{patchFlag:M,dynamicChildren:N,slotScopeIds:B}=u;B&&(_=_?_.concat(B):B),c==null?(i(b,h,v),i(L,h,v),O(u.children,h,L,y,w,S,_,C)):M>0&&M&64&&N&&c.dynamicChildren?(A(c.dynamicChildren,N,h,y,w,S,_),(u.key!=null||y&&u===y.subTree)&&Co(c,u,!0)):F(c,u,h,L,y,w,S,_,C)},z=(c,u,h,v,y,w,S,_,C)=>{u.slotScopeIds=_,c==null?u.shapeFlag&512?y.ctx.activate(u,h,v,S,C):oe(u,h,v,y,w,S,C):ae(c,u,C)},oe=(c,u,h,v,y,w,S)=>{const _=c.component=Vl(c,v,y);if(Rn(c)&&(_.ctx.renderer=ut),zl(_),_.asyncDep){if(y&&y.registerDep(_,k),!c.el){const C=_.subTree=ce(be);q(null,C,u,h)}return}k(_,c,u,h,y,w,S)},ae=(c,u,h)=>{const v=u.component=c.component;if(il(c,u,h))if(v.asyncDep&&!v.asyncResolved){J(v,u,h);return}else v.next=u,Xr(v.update),v.update();else u.el=c.el,v.vnode=u},k=(c,u,h,v,y,w,S)=>{const _=()=>{if(c.isMounted){let{next:L,bu:M,u:N,parent:B,vnode:Y}=c,G=L,Z;Ge(c,!1),L?(L.el=Y.el,J(c,L,S)):L=Y,M&&kn(M),(Z=L.props&&L.props.onVnodeBeforeUpdate)&&we(Z,B,L,Y),Ge(c,!0);const se=$n(c),Te=c.subTree;c.subTree=se,R(Te,se,d(Te.el),qt(Te),c,y,w),L.el=se.el,G===null&&sl(c,se.el),N&&me(N,y),(Z=L.props&&L.props.onVnodeUpdated)&&me(()=>we(Z,B,L,Y),y)}else{let L;const{el:M,props:N}=u,{bm:B,m:Y,parent:G}=c,Z=yt(u);if(Ge(c,!1),B&&kn(B),!Z&&(L=N&&N.onVnodeBeforeMount)&&we(L,G,u),Ge(c,!0),M&&Nn){const se=()=>{c.subTree=$n(c),Nn(M,c.subTree,c,y,null)};Z?u.type.__asyncLoader().then(()=>!c.isUnmounted&&se()):se()}else{const se=c.subTree=$n(c);R(null,se,h,v,c,y,w),u.el=se.el}if(Y&&me(Y,y),!Z&&(L=N&&N.onVnodeMounted)){const se=u;me(()=>we(L,G,se),y)}(u.shapeFlag&256||G&&yt(G.vnode)&&G.vnode.shapeFlag&256)&&c.a&&me(c.a,y),c.isMounted=!0,u=h=v=null}},C=c.effect=new gi(_,()=>Ti(b),c.scope),b=c.update=()=>C.run();b.id=c.uid,Ge(c,!0),b()},J=(c,u,h)=>{u.component=c;const v=c.vnode.props;c.vnode=u,c.next=null,Pl(c,u.props,v,h),Fl(c,u.children,h),Tt(),Vi(c),At()},F=(c,u,h,v,y,w,S,_,C=!1)=>{const b=c&&c.children,L=c?c.shapeFlag:0,M=u.children,{patchFlag:N,shapeFlag:B}=u;if(N>0){if(N&128){Wt(b,M,h,v,y,w,S,_,C);return}else if(N&256){Le(b,M,h,v,y,w,S,_,C);return}}B&8?(L&16&&Ne(b,y,w),M!==b&&p(h,M)):L&16?B&16?Wt(b,M,h,v,y,w,S,_,C):Ne(b,y,w,!0):(L&8&&p(h,""),B&16&&O(M,h,v,y,w,S,_,C))},Le=(c,u,h,v,y,w,S,_,C)=>{c=c||ht,u=u||ht;const b=c.length,L=u.length,M=Math.min(b,L);let N;for(N=0;NL?Ne(c,y,w,!0,!1,M):O(u,h,v,y,w,S,_,C,M)},Wt=(c,u,h,v,y,w,S,_,C)=>{let b=0;const L=u.length;let M=c.length-1,N=L-1;for(;b<=M&&b<=N;){const B=c[b],Y=u[b]=C?qe(u[b]):Ae(u[b]);if(it(B,Y))R(B,Y,h,null,y,w,S,_,C);else break;b++}for(;b<=M&&b<=N;){const B=c[M],Y=u[N]=C?qe(u[N]):Ae(u[N]);if(it(B,Y))R(B,Y,h,null,y,w,S,_,C);else break;M--,N--}if(b>M){if(b<=N){const B=N+1,Y=BN)for(;b<=M;)Oe(c[b],y,w,!0),b++;else{const B=b,Y=b,G=new Map;for(b=Y;b<=N;b++){const ve=u[b]=C?qe(u[b]):Ae(u[b]);ve.key!=null&&G.set(ve.key,b)}let Z,se=0;const Te=N-Y+1;let pt=!1,Li=0;const St=new Array(Te);for(b=0;b=Te){Oe(ve,y,w,!0);continue}let Re;if(ve.key!=null)Re=G.get(ve.key);else for(Z=Y;Z<=N;Z++)if(St[Z-Y]===0&&it(ve,u[Z])){Re=Z;break}Re===void 0?Oe(ve,y,w,!0):(St[Re-Y]=b+1,Re>=Li?Li=Re:pt=!0,R(ve,u[Re],h,null,y,w,S,_,C),se++)}const Ni=pt?Hl(St):ht;for(Z=Ni.length-1,b=Te-1;b>=0;b--){const ve=Y+b,Re=u[ve],ki=ve+1{const{el:w,type:S,transition:_,children:C,shapeFlag:b}=c;if(b&6){Ze(c.component.subTree,u,h,v);return}if(b&128){c.suspense.move(u,h,v);return}if(b&64){S.move(c,u,h,ut);return}if(S===he){i(w,u,h);for(let M=0;M_.enter(w),y);else{const{leave:M,delayLeave:N,afterLeave:B}=_,Y=()=>i(w,u,h),G=()=>{M(w,()=>{Y(),B&&B()})};N?N(w,Y,G):G()}else i(w,u,h)},Oe=(c,u,h,v=!1,y=!1)=>{const{type:w,props:S,ref:_,children:C,dynamicChildren:b,shapeFlag:L,patchFlag:M,dirs:N}=c;if(_!=null&&gn(_,null,h,c,!0),L&256){u.ctx.deactivate(c);return}const B=L&1&&N,Y=!yt(c);let G;if(Y&&(G=S&&S.onVnodeBeforeUnmount)&&we(G,u,c),L&6)Go(c.component,h,v);else{if(L&128){c.suspense.unmount(h,v);return}B&&Pe(c,null,u,"beforeUnmount"),L&64?c.type.remove(c,u,h,y,ut,v):b&&(w!==he||M>0&&M&64)?Ne(b,u,h,!1,!0):(w===he&&M&384||!y&&L&16)&&Ne(C,u,h),v&&Ii(c)}(Y&&(G=S&&S.onVnodeUnmounted)||B)&&me(()=>{G&&we(G,u,c),B&&Pe(c,null,u,"unmounted")},h)},Ii=c=>{const{type:u,el:h,anchor:v,transition:y}=c;if(u===he){Zo(h,v);return}if(u===Lt){m(c);return}const w=()=>{s(h),y&&!y.persisted&&y.afterLeave&&y.afterLeave()};if(c.shapeFlag&1&&y&&!y.persisted){const{leave:S,delayLeave:_}=y,C=()=>S(h,w);_?_(c.el,w,C):C()}else w()},Zo=(c,u)=>{let h;for(;c!==u;)h=x(c),s(c),c=h;s(u)},Go=(c,u,h)=>{const{bum:v,scope:y,update:w,subTree:S,um:_}=c;v&&kn(v),y.stop(),w&&(w.active=!1,Oe(S,c,u,h)),_&&me(_,u),me(()=>{c.isUnmounted=!0},u),u&&u.pendingBranch&&!u.isUnmounted&&c.asyncDep&&!c.asyncResolved&&c.suspenseId===u.pendingId&&(u.deps--,u.deps===0&&u.resolve())},Ne=(c,u,h,v=!1,y=!1,w=0)=>{for(let S=w;Sc.shapeFlag&6?qt(c.component.subTree):c.shapeFlag&128?c.suspense.next():x(c.anchor||c.el),Fi=(c,u,h)=>{c==null?u._vnode&&Oe(u._vnode,null,null,!0):R(u._vnode||null,c,u,null,null,null,h),Vi(),pn(),u._vnode=c},ut={p:R,um:Oe,m:Ze,r:Ii,mt:oe,mc:O,pc:F,pbc:A,n:qt,o:e};let Ln,Nn;return t&&([Ln,Nn]=t(ut)),{render:Fi,hydrate:Ln,createApp:Sl(Fi,Ln)}}function Ge({effect:e,update:t},n){e.allowRecurse=t.allowRecurse=n}function wo(e,t){return(!e||e&&!e.pendingBranch)&&t&&!t.persisted}function Co(e,t,n=!1){const i=e.children,s=t.children;if($(i)&&$(s))for(let o=0;o>1,e[n[l]]0&&(t[i]=n[o-1]),n[o]=i)}}for(o=n.length,r=n[o-1];o-- >0;)n[o]=r,r=t[r];return n}const $l=e=>e.__isTeleport,he=Symbol.for("v-fgt"),wt=Symbol.for("v-txt"),be=Symbol.for("v-cmt"),Lt=Symbol.for("v-stc"),Nt=[];let Se=null;function Eo(e=!1){Nt.push(Se=e?null:[])}function Dl(){Nt.pop(),Se=Nt[Nt.length-1]||null}let Dt=1;function ss(e){Dt+=e}function To(e){return e.dynamicChildren=Dt>0?Se||ht:null,Dl(),Dt>0&&Se&&Se.push(e),e}function Ic(e,t,n,i,s,o){return To(So(e,t,n,i,s,o,!0))}function Ao(e,t,n,i,s){return To(ce(e,t,n,i,s,!0))}function xn(e){return e?e.__v_isVNode===!0:!1}function it(e,t){return e.type===t.type&&e.key===t.key}const In="__vInternal",jo=({key:e})=>e??null,ln=({ref:e,ref_key:t,ref_for:n})=>(typeof e=="number"&&(e=""+e),e!=null?ie(e)||pe(e)||K(e)?{i:ue,r:e,k:t,f:!!n}:e:null);function So(e,t=null,n=null,i=0,s=null,o=e===he?0:1,r=!1,l=!1){const a={__v_isVNode:!0,__v_skip:!0,type:e,props:t,key:t&&jo(t),ref:t&&ln(t),scopeId:Sn,slotScopeIds:null,children:n,component:null,suspense:null,ssContent:null,ssFallback:null,dirs:null,transition:null,el:null,anchor:null,target:null,targetAnchor:null,staticCount:0,shapeFlag:o,patchFlag:i,dynamicProps:s,dynamicChildren:null,appContext:null,ctx:ue};return l?(Oi(a,n),o&128&&e.normalize(a)):n&&(a.shapeFlag|=ie(n)?8:16),Dt>0&&!r&&Se&&(a.patchFlag>0||o&6)&&a.patchFlag!==32&&Se.push(a),a}const ce=Bl;function Bl(e,t=null,n=null,i=0,s=null,o=!1){if((!e||e===no)&&(e=be),xn(e)){const l=Qe(e,t,!0);return n&&Oi(l,n),Dt>0&&!o&&Se&&(l.shapeFlag&6?Se[Se.indexOf(e)]=l:Se.push(l)),l.patchFlag|=-2,l}if(Ql(e)&&(e=e.__vccOpts),t){t=Ul(t);let{class:l,style:a}=t;l&&!ie(l)&&(t.class=mi(l)),ee(a)&&(Ys(a)&&!$(a)&&(a=re({},a)),t.style=di(a))}const r=ie(e)?1:ol(e)?128:$l(e)?64:ee(e)?4:K(e)?2:0;return So(e,t,n,i,s,r,o,!0)}function Ul(e){return e?Ys(e)||In in e?re({},e):e:null}function Qe(e,t,n=!1){const{props:i,ref:s,patchFlag:o,children:r}=e,l=t?Kl(i||{},t):i;return{__v_isVNode:!0,__v_skip:!0,type:e.type,props:l,key:l&&jo(l),ref:t&&t.ref?n&&s?$(s)?s.concat(ln(t)):[s,ln(t)]:ln(t):s,scopeId:e.scopeId,slotScopeIds:e.slotScopeIds,children:r,target:e.target,targetAnchor:e.targetAnchor,staticCount:e.staticCount,shapeFlag:e.shapeFlag,patchFlag:t&&e.type!==he?o===-1?16:o|16:o,dynamicProps:e.dynamicProps,dynamicChildren:e.dynamicChildren,appContext:e.appContext,dirs:e.dirs,transition:e.transition,component:e.component,suspense:e.suspense,ssContent:e.ssContent&&Qe(e.ssContent),ssFallback:e.ssFallback&&Qe(e.ssFallback),el:e.el,anchor:e.anchor,ctx:e.ctx,ce:e.ce}}function Oo(e=" ",t=0){return ce(wt,null,e,t)}function Fc(e,t){const n=ce(Lt,null,e);return n.staticCount=t,n}function Lc(e="",t=!1){return t?(Eo(),Ao(be,null,e)):ce(be,null,e)}function Ae(e){return e==null||typeof e=="boolean"?ce(be):$(e)?ce(he,null,e.slice()):typeof e=="object"?qe(e):ce(wt,null,String(e))}function qe(e){return e.el===null&&e.patchFlag!==-1||e.memo?e:Qe(e)}function Oi(e,t){let n=0;const{shapeFlag:i}=e;if(t==null)t=null;else if($(t))n=16;else if(typeof t=="object")if(i&65){const s=t.default;s&&(s._c&&(s._d=!1),Oi(e,s()),s._c&&(s._d=!0));return}else{n=32;const s=t._;!s&&!(In in t)?t._ctx=ue:s===3&&ue&&(ue.slots._===1?t._=1:(t._=2,e.patchFlag|=1024))}else K(t)?(t={default:t,_ctx:ue},n=32):(t=String(t),i&64?(n=16,t=[Oo(t)]):n=8);e.children=t,e.shapeFlag|=n}function Kl(...e){const t={};for(let n=0;nle||ue;let Ri,dt,os="__VUE_INSTANCE_SETTERS__";(dt=Jn()[os])||(dt=Jn()[os]=[]),dt.push(e=>le=e),Ri=e=>{dt.length>1?dt.forEach(t=>t(e)):dt[0](e)};const Ct=e=>{Ri(e),e.scope.on()},rt=()=>{le&&le.scope.off(),Ri(null)};function Ro(e){return e.vnode.shapeFlag&4}let Bt=!1;function zl(e,t=!1){Bt=t;const{props:n,children:i}=e.vnode,s=Ro(e);Rl(e,n,s,t),Il(e,i);const o=s?Yl(e,t):void 0;return Bt=!1,o}function Yl(e,t){const n=e.type;e.accessCache=Object.create(null),e.proxy=It(new Proxy(e.ctx,bl));const{setup:i}=n;if(i){const s=e.setupContext=i.length>1?Mo(e):null;Ct(e),Tt();const o=ze(i,e,0,[e.props,s]);if(At(),rt(),Ms(o)){if(o.then(rt,rt),t)return o.then(r=>{rs(e,r,t)}).catch(r=>{Tn(r,e,0)});e.asyncDep=o}else rs(e,o,t)}else Po(e,t)}function rs(e,t,n){K(t)?e.type.__ssrInlineRender?e.ssrRender=t:e.render=t:ee(t)&&(e.setupState=Qs(t)),Po(e,n)}let ls;function Po(e,t,n){const i=e.type;if(!e.render){if(!t&&ls&&!i.render){const s=i.template||ji(e).template;if(s){const{isCustomElement:o,compilerOptions:r}=e.appContext.config,{delimiters:l,compilerOptions:a}=i,f=re(re({isCustomElement:o,delimiters:l},r),a);i.render=ls(s,f)}}e.render=i.render||Ie}{Ct(e),Tt();try{wl(e)}finally{At(),rt()}}}function Jl(e){return e.attrsProxy||(e.attrsProxy=new Proxy(e.attrs,{get(t,n){return xe(e,"get","$attrs"),t[n]}}))}function Mo(e){const t=n=>{e.exposed=n||{}};return{get attrs(){return Jl(e)},slots:e.slots,emit:e.emit,expose:t}}function Pi(e){if(e.exposed)return e.exposeProxy||(e.exposeProxy=new Proxy(Qs(It(e.exposed)),{get(t,n){if(n in t)return t[n];if(n in Ft)return Ft[n](e)},has(t,n){return n in t||n in Ft}}))}function Xl(e,t=!0){return K(e)?e.displayName||e.name:e.name||t&&e.__name}function Ql(e){return K(e)&&"__vccOpts"in e}const ne=(e,t)=>zr(e,t,Bt);function ri(e,t,n){const i=arguments.length;return i===2?ee(t)&&!$(t)?xn(t)?ce(e,null,[t]):ce(e,t):ce(e,null,t):(i>3?n=Array.prototype.slice.call(arguments,2):i===3&&xn(n)&&(n=[n]),ce(e,t,n))}const Zl=Symbol.for("v-scx"),Gl=()=>bt(Zl),ea="3.3.13",ta="http://www.w3.org/2000/svg",st=typeof document<"u"?document:null,as=st&&st.createElement("template"),na={insert:(e,t,n)=>{t.insertBefore(e,n||null)},remove:e=>{const t=e.parentNode;t&&t.removeChild(e)},createElement:(e,t,n,i)=>{const s=t?st.createElementNS(ta,e):st.createElement(e,n?{is:n}:void 0);return e==="select"&&i&&i.multiple!=null&&s.setAttribute("multiple",i.multiple),s},createText:e=>st.createTextNode(e),createComment:e=>st.createComment(e),setText:(e,t)=>{e.nodeValue=t},setElementText:(e,t)=>{e.textContent=t},parentNode:e=>e.parentNode,nextSibling:e=>e.nextSibling,querySelector:e=>st.querySelector(e),setScopeId(e,t){e.setAttribute(t,"")},insertStaticContent(e,t,n,i,s,o){const r=n?n.previousSibling:t.lastChild;if(s&&(s===o||s.nextSibling))for(;t.insertBefore(s.cloneNode(!0),n),!(s===o||!(s=s.nextSibling)););else{as.innerHTML=i?`${e}`:e;const l=as.content;if(i){const a=l.firstChild;for(;a.firstChild;)l.appendChild(a.firstChild);l.removeChild(a)}t.insertBefore(l,n)}return[r?r.nextSibling:t.firstChild,n?n.previousSibling:t.lastChild]}},Ue="transition",Ot="animation",Ut=Symbol("_vtc"),Io=(e,{slots:t})=>ri(cl,ia(e),t);Io.displayName="Transition";const Fo={name:String,type:String,css:{type:Boolean,default:!0},duration:[String,Number,Object],enterFromClass:String,enterActiveClass:String,enterToClass:String,appearFromClass:String,appearActiveClass:String,appearToClass:String,leaveFromClass:String,leaveActiveClass:String,leaveToClass:String};Io.props=re({},lo,Fo);const et=(e,t=[])=>{$(e)?e.forEach(n=>n(...t)):e&&e(...t)},cs=e=>e?$(e)?e.some(t=>t.length>1):e.length>1:!1;function ia(e){const t={};for(const j in e)j in Fo||(t[j]=e[j]);if(e.css===!1)return t;const{name:n="v",type:i,duration:s,enterFromClass:o=`${n}-enter-from`,enterActiveClass:r=`${n}-enter-active`,enterToClass:l=`${n}-enter-to`,appearFromClass:a=o,appearActiveClass:f=r,appearToClass:p=l,leaveFromClass:d=`${n}-leave-from`,leaveActiveClass:x=`${n}-leave-active`,leaveToClass:E=`${n}-leave-to`}=e,I=sa(s),R=I&&I[0],H=I&&I[1],{onBeforeEnter:q,onEnter:V,onEnterCancelled:g,onLeave:m,onLeaveCancelled:P,onBeforeAppear:U=q,onAppear:D=V,onAppearCancelled:O=g}=t,T=(j,z,oe)=>{tt(j,z?p:l),tt(j,z?f:r),oe&&oe()},A=(j,z)=>{j._isLeaving=!1,tt(j,d),tt(j,E),tt(j,x),z&&z()},W=j=>(z,oe)=>{const ae=j?D:V,k=()=>T(z,j,oe);et(ae,[z,k]),fs(()=>{tt(z,j?a:o),Ke(z,j?p:l),cs(ae)||us(z,i,R,k)})};return re(t,{onBeforeEnter(j){et(q,[j]),Ke(j,o),Ke(j,r)},onBeforeAppear(j){et(U,[j]),Ke(j,a),Ke(j,f)},onEnter:W(!1),onAppear:W(!0),onLeave(j,z){j._isLeaving=!0;const oe=()=>A(j,z);Ke(j,d),la(),Ke(j,x),fs(()=>{j._isLeaving&&(tt(j,d),Ke(j,E),cs(m)||us(j,i,H,oe))}),et(m,[j,oe])},onEnterCancelled(j){T(j,!1),et(g,[j])},onAppearCancelled(j){T(j,!0),et(O,[j])},onLeaveCancelled(j){A(j),et(P,[j])}})}function sa(e){if(e==null)return null;if(ee(e))return[Un(e.enter),Un(e.leave)];{const t=Un(e);return[t,t]}}function Un(e){return rr(e)}function Ke(e,t){t.split(/\s+/).forEach(n=>n&&e.classList.add(n)),(e[Ut]||(e[Ut]=new Set)).add(t)}function tt(e,t){t.split(/\s+/).forEach(i=>i&&e.classList.remove(i));const n=e[Ut];n&&(n.delete(t),n.size||(e[Ut]=void 0))}function fs(e){requestAnimationFrame(()=>{requestAnimationFrame(e)})}let oa=0;function us(e,t,n,i){const s=e._endId=++oa,o=()=>{s===e._endId&&i()};if(n)return setTimeout(o,n);const{type:r,timeout:l,propCount:a}=ra(e,t);if(!r)return i();const f=r+"end";let p=0;const d=()=>{e.removeEventListener(f,x),o()},x=E=>{E.target===e&&++p>=a&&d()};setTimeout(()=>{p(n[I]||"").split(", "),s=i(`${Ue}Delay`),o=i(`${Ue}Duration`),r=ps(s,o),l=i(`${Ot}Delay`),a=i(`${Ot}Duration`),f=ps(l,a);let p=null,d=0,x=0;t===Ue?r>0&&(p=Ue,d=r,x=o.length):t===Ot?f>0&&(p=Ot,d=f,x=a.length):(d=Math.max(r,f),p=d>0?r>f?Ue:Ot:null,x=p?p===Ue?o.length:a.length:0);const E=p===Ue&&/\b(transform|all)(,|$)/.test(i(`${Ue}Property`).toString());return{type:p,timeout:d,propCount:x,hasTransform:E}}function ps(e,t){for(;e.lengthds(n)+ds(e[i])))}function ds(e){return e==="auto"?0:Number(e.slice(0,-1).replace(",","."))*1e3}function la(){return document.body.offsetHeight}function aa(e,t,n){const i=e[Ut];i&&(t=(t?[t,...i]:[...i]).join(" ")),t==null?e.removeAttribute("class"):n?e.setAttribute("class",t):e.className=t}const ca=Symbol("_vod"),fa=Symbol("");function ua(e,t,n){const i=e.style,s=ie(n);if(n&&!s){if(t&&!ie(t))for(const o in t)n[o]==null&&li(i,o,"");for(const o in n)li(i,o,n[o])}else{const o=i.display;if(s){if(t!==n){const r=i[fa];r&&(n+=";"+r),i.cssText=n}}else t&&e.removeAttribute("style");ca in e&&(i.display=o)}}const ms=/\s*!important$/;function li(e,t,n){if($(n))n.forEach(i=>li(e,t,i));else if(n==null&&(n=""),t.startsWith("--"))e.setProperty(t,n);else{const i=pa(e,t);ms.test(n)?e.setProperty(ft(i),n.replace(ms,""),"important"):e[i]=n}}const hs=["Webkit","Moz","ms"],Kn={};function pa(e,t){const n=Kn[t];if(n)return n;let i=Fe(t);if(i!=="filter"&&i in e)return Kn[t]=i;i=_n(i);for(let s=0;sWn||(ya.then(()=>Wn=0),Wn=Date.now());function _a(e,t){const n=i=>{if(!i._vts)i._vts=Date.now();else if(i._vts<=n.attached)return;Ee(wa(i,n.value),t,5,[i])};return n.value=e,n.attached=ba(),n}function wa(e,t){if($(t)){const n=e.stopImmediatePropagation;return e.stopImmediatePropagation=()=>{n.call(e),e._stopped=!0},t.map(i=>s=>!s._stopped&&i&&i(s))}else return t}const ys=e=>e.charCodeAt(0)===111&&e.charCodeAt(1)===110&&e.charCodeAt(2)>96&&e.charCodeAt(2)<123,Ca=(e,t,n,i,s=!1,o,r,l,a)=>{t==="class"?aa(e,i,s):t==="style"?ua(e,n,i):Kt(t)?fi(t)||xa(e,t,n,i,r):(t[0]==="."?(t=t.slice(1),!0):t[0]==="^"?(t=t.slice(1),!1):Ea(e,t,i,s))?ma(e,t,i,o,r,l,a):(t==="true-value"?e._trueValue=i:t==="false-value"&&(e._falseValue=i),da(e,t,i,s))};function Ea(e,t,n,i){if(i)return!!(t==="innerHTML"||t==="textContent"||t in e&&ys(t)&&K(n));if(t==="spellcheck"||t==="draggable"||t==="translate"||t==="form"||t==="list"&&e.tagName==="INPUT"||t==="type"&&e.tagName==="TEXTAREA")return!1;if(t==="width"||t==="height"){const s=e.tagName;if(s==="IMG"||s==="VIDEO"||s==="CANVAS"||s==="SOURCE")return!1}return ys(t)&&ie(n)?!1:t in e}const Ta=["ctrl","shift","alt","meta"],Aa={stop:e=>e.stopPropagation(),prevent:e=>e.preventDefault(),self:e=>e.target!==e.currentTarget,ctrl:e=>!e.ctrlKey,shift:e=>!e.shiftKey,alt:e=>!e.altKey,meta:e=>!e.metaKey,left:e=>"button"in e&&e.button!==0,middle:e=>"button"in e&&e.button!==1,right:e=>"button"in e&&e.button!==2,exact:(e,t)=>Ta.some(n=>e[`${n}Key`]&&!t.includes(n))},Nc=(e,t)=>{const n=e._withMods||(e._withMods={}),i=t.join(".");return n[i]||(n[i]=(s,...o)=>{for(let r=0;r{const n=e._withKeys||(e._withKeys={}),i=t.join(".");return n[i]||(n[i]=s=>{if(!("key"in s))return;const o=ft(s.key);if(t.some(r=>r===o||ja[r]===o))return e(s)})},Sa=re({patchProp:Ca},na);let qn,bs=!1;function Oa(){return qn=bs?qn:Nl(Sa),bs=!0,qn}const Hc=(...e)=>{const t=Oa().createApp(...e),{mount:n}=t;return t.mount=i=>{const s=Ra(i);if(s)return n(s,!0,s instanceof SVGElement)},t};function Ra(e){return ie(e)?document.querySelector(e):e}const $c=(e,t)=>{const n=e.__vccOpts||e;for(const[i,s]of t)n[i]=s;return n},Dc="/learn-wgpu-zh/res/WebGPU-1.0.png",Bc="/learn-wgpu-zh/res/firefox.png",Uc="/learn-wgpu-zh/res/safari.png",Kc="/learn-wgpu-zh/res/tools.png",Pa="modulepreload",Ma=function(e){return"/learn-wgpu-zh/"+e},_s={},Wc=function(t,n,i){let s=Promise.resolve();if(n&&n.length>0){const o=document.getElementsByTagName("link");s=Promise.all(n.map(r=>{if(r=Ma(r),r in _s)return;_s[r]=!0;const l=r.endsWith(".css"),a=l?'[rel="stylesheet"]':"";if(!!i)for(let d=o.length-1;d>=0;d--){const x=o[d];if(x.href===r&&(!l||x.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${r}"]${a}`))return;const p=document.createElement("link");if(p.rel=l?"stylesheet":Pa,l||(p.as="script",p.crossOrigin=""),p.href=r,document.head.appendChild(p),l)return new Promise((d,x)=>{p.addEventListener("load",d),p.addEventListener("error",()=>x(new Error(`Unable to preload CSS for ${r}`)))})}))}return s.then(()=>t()).catch(o=>{const r=new Event("vite:preloadError",{cancelable:!0});if(r.payload=o,window.dispatchEvent(r),!r.defaultPrevented)throw o})},Ia=window.__VP_SITE_DATA__;function Mi(e){return ks()?(hr(e),!0):!1}function Je(e){return typeof e=="function"?e():Ci(e)}function qc(e,t){const n=(t==null?void 0:t.computedGetter)===!1?Ci:Je;return function(...i){return ne(()=>e.apply(this,i.map(s=>n(s))))}}const Lo=typeof window<"u"&&typeof document<"u";typeof WorkerGlobalScope<"u"&&globalThis instanceof WorkerGlobalScope;const Fa=Object.prototype.toString,La=e=>Fa.call(e)==="[object Object]",No=()=>{},ws=Na();function Na(){var e,t;return Lo&&((e=window==null?void 0:window.navigator)==null?void 0:e.userAgent)&&(/iP(ad|hone|od)/.test(window.navigator.userAgent)||((t=window==null?void 0:window.navigator)==null?void 0:t.maxTouchPoints)>2&&/iPad|Macintosh/.test(window==null?void 0:window.navigator.userAgent))}function ka(e,t){function n(...i){return new Promise((s,o)=>{Promise.resolve(e(()=>t.apply(this,i),{fn:t,thisArg:this,args:i})).then(s).catch(o)})}return n}const ko=e=>e();function Ha(e=ko){const t=ge(!0);function n(){t.value=!1}function i(){t.value=!0}const s=(...o)=>{t.value&&e(...o)};return{isActive:En(t),pause:n,resume:i,eventFilter:s}}function $a(e){return e||Fn()}function Ho(...e){if(e.length!==1)return Wr(...e);const t=e[0];return typeof t=="function"?En(Br(()=>({get:t,set:No}))):ge(t)}function Da(e,t,n={}){const{eventFilter:i=ko,...s}=n;return Ye(e,ka(i,t),s)}function Ba(e,t,n={}){const{eventFilter:i,...s}=n,{eventFilter:o,pause:r,resume:l,isActive:a}=Ha(i);return{stop:Da(e,t,{...s,eventFilter:o}),pause:r,resume:l,isActive:a}}function $o(e,t=!0,n){const i=$a(n);i?jt(e,i):t?e():An(e)}function Do(e){var t;const n=Je(e);return(t=n==null?void 0:n.$el)!=null?t:n}const ct=Lo?window:void 0;function vn(...e){let t,n,i,s;if(typeof e[0]=="string"||Array.isArray(e[0])?([n,i,s]=e,t=ct):[t,n,i,s]=e,!t)return No;Array.isArray(n)||(n=[n]),Array.isArray(i)||(i=[i]);const o=[],r=()=>{o.forEach(p=>p()),o.length=0},l=(p,d,x,E)=>(p.addEventListener(d,x,E),()=>p.removeEventListener(d,x,E)),a=Ye(()=>[Do(t),Je(s)],([p,d])=>{if(r(),!p)return;const x=La(d)?{...d}:d;o.push(...n.flatMap(E=>i.map(I=>l(p,E,I,x))))},{immediate:!0,flush:"post"}),f=()=>{a(),r()};return Mi(f),f}function Ua(){const e=ge(!1);return Fn()&&jt(()=>{e.value=!0}),e}function Ka(e){const t=Ua();return ne(()=>(t.value,!!e()))}function Wa(e,t={}){const{window:n=ct}=t,i=Ka(()=>n&&"matchMedia"in n&&typeof n.matchMedia=="function");let s;const o=ge(!1),r=f=>{o.value=f.matches},l=()=>{s&&("removeEventListener"in s?s.removeEventListener("change",r):s.removeListener(r))},a=oo(()=>{i.value&&(l(),s=n.matchMedia(Je(e)),"addEventListener"in s?s.addEventListener("change",r):s.addListener(r),o.value=s.matches)});return Mi(()=>{a(),l(),s=void 0}),o}const tn=typeof globalThis<"u"?globalThis:typeof window<"u"?window:typeof global<"u"?global:typeof self<"u"?self:{},nn="__vueuse_ssr_handlers__",qa=Va();function Va(){return nn in tn||(tn[nn]=tn[nn]||{}),tn[nn]}function Bo(e,t){return qa[e]||t}function za(e){return e==null?"any":e instanceof Set?"set":e instanceof Map?"map":e instanceof Date?"date":typeof e=="boolean"?"boolean":typeof e=="string"?"string":typeof e=="object"?"object":Number.isNaN(e)?"any":"number"}const Ya={boolean:{read:e=>e==="true",write:e=>String(e)},object:{read:e=>JSON.parse(e),write:e=>JSON.stringify(e)},number:{read:e=>Number.parseFloat(e),write:e=>String(e)},any:{read:e=>e,write:e=>String(e)},string:{read:e=>e,write:e=>String(e)},map:{read:e=>new Map(JSON.parse(e)),write:e=>JSON.stringify(Array.from(e.entries()))},set:{read:e=>new Set(JSON.parse(e)),write:e=>JSON.stringify(Array.from(e))},date:{read:e=>new Date(e),write:e=>e.toISOString()}},Cs="vueuse-storage";function Ja(e,t,n,i={}){var s;const{flush:o="pre",deep:r=!0,listenToStorageChanges:l=!0,writeDefaults:a=!0,mergeDefaults:f=!1,shallow:p,window:d=ct,eventFilter:x,onError:E=T=>{console.error(T)},initOnMounted:I}=i,R=(p?Js:ge)(typeof t=="function"?t():t);if(!n)try{n=Bo("getDefaultStorage",()=>{var T;return(T=ct)==null?void 0:T.localStorage})()}catch(T){E(T)}if(!n)return R;const H=Je(t),q=za(H),V=(s=i.serializer)!=null?s:Ya[q],{pause:g,resume:m}=Ba(R,()=>P(R.value),{flush:o,deep:r,eventFilter:x});return d&&l&&$o(()=>{vn(d,"storage",O),vn(d,Cs,D),I&&O()}),I||O(),R;function P(T){try{if(T==null)n.removeItem(e);else{const A=V.write(T),W=n.getItem(e);W!==A&&(n.setItem(e,A),d&&d.dispatchEvent(new CustomEvent(Cs,{detail:{key:e,oldValue:W,newValue:A,storageArea:n}})))}}catch(A){E(A)}}function U(T){const A=T?T.newValue:n.getItem(e);if(A==null)return a&&H!=null&&n.setItem(e,V.write(H)),H;if(!T&&f){const W=V.read(A);return typeof f=="function"?f(W,H):q==="object"&&!Array.isArray(W)?{...H,...W}:W}else return typeof A!="string"?A:V.read(A)}function D(T){O(T.detail)}function O(T){if(!(T&&T.storageArea!==n)){if(T&&T.key==null){R.value=H;return}if(!(T&&T.key!==e)){g();try{(T==null?void 0:T.newValue)!==V.write(R.value)&&(R.value=U(T))}catch(A){E(A)}finally{T?An(m):m()}}}}}function Uo(e){return Wa("(prefers-color-scheme: dark)",e)}function Xa(e={}){const{selector:t="html",attribute:n="class",initialValue:i="auto",window:s=ct,storage:o,storageKey:r="vueuse-color-scheme",listenToStorageChanges:l=!0,storageRef:a,emitAuto:f,disableTransition:p=!0}=e,d={auto:"",light:"light",dark:"dark",...e.modes||{}},x=Uo({window:s}),E=ne(()=>x.value?"dark":"light"),I=a||(r==null?Ho(i):Ja(r,i,o,{window:s,listenToStorageChanges:l})),R=ne(()=>I.value==="auto"?E.value:I.value),H=Bo("updateHTMLAttrs",(m,P,U)=>{const D=typeof m=="string"?s==null?void 0:s.document.querySelector(m):Do(m);if(!D)return;let O;if(p){O=s.document.createElement("style");const T="*,*::before,*::after{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;-ms-transition:none!important;transition:none!important}";O.appendChild(document.createTextNode(T)),s.document.head.appendChild(O)}if(P==="class"){const T=U.split(/\s/g);Object.values(d).flatMap(A=>(A||"").split(/\s/g)).filter(Boolean).forEach(A=>{T.includes(A)?D.classList.add(A):D.classList.remove(A)})}else D.setAttribute(P,U);p&&(s.getComputedStyle(O).opacity,document.head.removeChild(O))});function q(m){var P;H(t,n,(P=d[m])!=null?P:m)}function V(m){e.onChanged?e.onChanged(m,q):q(m)}Ye(R,V,{flush:"post",immediate:!0}),$o(()=>V(R.value));const g=ne({get(){return f?I.value:R.value},set(m){I.value=m}});try{return Object.assign(g,{store:I,system:E,state:R})}catch{return g}}function Qa(e={}){const{valueDark:t="dark",valueLight:n="",window:i=ct}=e,s=Xa({...e,onChanged:(l,a)=>{var f;e.onChanged?(f=e.onChanged)==null||f.call(e,l==="dark",a,l):a(l)},modes:{dark:t,light:n}}),o=ne(()=>s.system?s.system.value:Uo({window:i}).value?"dark":"light");return ne({get(){return s.value==="dark"},set(l){const a=l?"dark":"light";o.value===a?s.value="auto":s.value=a}})}function Vn(e){return typeof Window<"u"&&e instanceof Window?e.document.documentElement:typeof Document<"u"&&e instanceof Document?e.documentElement:e}function Ko(e){const t=window.getComputedStyle(e);if(t.overflowX==="scroll"||t.overflowY==="scroll"||t.overflowX==="auto"&&e.clientWidth1?!0:(t.preventDefault&&t.preventDefault(),!1)}const sn=new WeakMap;function Vc(e,t=!1){const n=ge(t);let i=null,s;Ye(Ho(e),l=>{const a=Vn(Je(l));if(a){const f=a;sn.get(f)||sn.set(f,s),n.value&&(f.style.overflow="hidden")}},{immediate:!0});const o=()=>{const l=Vn(Je(e));!l||n.value||(ws&&(i=vn(l,"touchmove",a=>{Za(a)},{passive:!1})),l.style.overflow="hidden",n.value=!0)},r=()=>{var l;const a=Vn(Je(e));!a||!n.value||(ws&&(i==null||i()),a.style.overflow=(l=sn.get(a))!=null?l:"",sn.delete(a),n.value=!1)};return Mi(r),ne({get(){return n.value},set(l){l?o():r()}})}function zc(e={}){const{window:t=ct,behavior:n="auto"}=e;if(!t)return{x:ge(0),y:ge(0)};const i=ge(t.scrollX),s=ge(t.scrollY),o=ne({get(){return i.value},set(l){scrollTo({left:l,behavior:n})}}),r=ne({get(){return s.value},set(l){scrollTo({top:l,behavior:n})}});return vn(t,"scroll",()=>{i.value=t.scrollX,s.value=t.scrollY},{capture:!1,passive:!0}),{x:o,y:r}}const Wo=/^(?:[a-z]+:|\/\/)/i,Ga="vitepress-theme-appearance",qo=/#.*$/,ec=/(index)?\.(md|html)$/,Ce=typeof document<"u",Vo={relativePath:"",filePath:"",title:"404",description:"Not Found",headers:[],frontmatter:{sidebar:!1,layout:"page"},lastUpdated:0,isNotFound:!0};function tc(e,t,n=!1){if(t===void 0)return!1;if(e=Es(`/${e}`),n)return new RegExp(t).test(e);if(Es(t)!==e)return!1;const i=t.match(qo);return i?(Ce?location.hash:"")===i[0]:!0}function Es(e){return decodeURI(e).replace(qo,"").replace(ec,"")}function nc(e){return Wo.test(e)}function ic(e,t){var i,s,o,r,l,a,f;const n=Object.keys(e.locales).find(p=>p!=="root"&&!nc(p)&&tc(t,`/${p}/`,!0))||"root";return Object.assign({},e,{localeIndex:n,lang:((i=e.locales[n])==null?void 0:i.lang)??e.lang,dir:((s=e.locales[n])==null?void 0:s.dir)??e.dir,title:((o=e.locales[n])==null?void 0:o.title)??e.title,titleTemplate:((r=e.locales[n])==null?void 0:r.titleTemplate)??e.titleTemplate,description:((l=e.locales[n])==null?void 0:l.description)??e.description,head:Yo(e.head,((a=e.locales[n])==null?void 0:a.head)??[]),themeConfig:{...e.themeConfig,...(f=e.locales[n])==null?void 0:f.themeConfig}})}function zo(e,t){const n=t.title||e.title,i=t.titleTemplate??e.titleTemplate;if(typeof i=="string"&&i.includes(":title"))return i.replace(/:title/g,n);const s=sc(e.title,i);return`${n}${s}`}function sc(e,t){return t===!1?"":t===!0||t===void 0?` | ${e}`:e===t?"":` | ${t}`}function oc(e,t){const[n,i]=t;if(n!=="meta")return!1;const s=Object.entries(i)[0];return s==null?!1:e.some(([o,r])=>o===n&&r[s[0]]===s[1])}function Yo(e,t){return[...e.filter(n=>!oc(t,n)),...t]}const rc=/[\u0000-\u001F"#$&*+,:;<=>?[\]^`{|}\u007F]/g,lc=/^[a-z]:/i;function Ts(e){const t=lc.exec(e),n=t?t[0]:"";return n+e.slice(n.length).replace(rc,"_").replace(/(^|\/)_+(?=[^/]*$)/,"$1")}const ac=Symbol(),lt=Js(Ia);function Yc(e){const t=ne(()=>ic(lt.value,e.data.relativePath)),n=t.value.appearance,i=n==="force-dark"?ge(!0):n?Qa({storageKey:Ga,initialValue:()=>typeof n=="string"?n:"auto",...typeof n=="object"?n:{}}):ge(!1);return{site:t,theme:ne(()=>t.value.themeConfig),page:ne(()=>e.data),frontmatter:ne(()=>e.data.frontmatter),params:ne(()=>e.data.params),lang:ne(()=>t.value.lang),dir:ne(()=>t.value.dir),localeIndex:ne(()=>t.value.localeIndex||"root"),title:ne(()=>zo(t.value,e.data)),description:ne(()=>e.data.description||t.value.description),isDark:i}}function cc(){const e=bt(ac);if(!e)throw new Error("vitepress data not properly injected in app");return e}const fc={ez:"application/andrew-inset",aw:"application/applixware",atom:"application/atom+xml",atomcat:"application/atomcat+xml",atomdeleted:"application/atomdeleted+xml",atomsvc:"application/atomsvc+xml",dwd:"application/atsc-dwd+xml",held:"application/atsc-held+xml",rsat:"application/atsc-rsat+xml",bdoc:"application/bdoc",xcs:"application/calendar+xml",ccxml:"application/ccxml+xml",cdfx:"application/cdfx+xml",cdmia:"application/cdmi-capability",cdmic:"application/cdmi-container",cdmid:"application/cdmi-domain",cdmio:"application/cdmi-object",cdmiq:"application/cdmi-queue",cu:"application/cu-seeme",mpd:"application/dash+xml",davmount:"application/davmount+xml",dbk:"application/docbook+xml",dssc:"application/dssc+der",xdssc:"application/dssc+xml",es:"application/ecmascript",ecma:"application/ecmascript",emma:"application/emma+xml",emotionml:"application/emotionml+xml",epub:"application/epub+zip",exi:"application/exi",fdt:"application/fdt+xml",pfr:"application/font-tdpfr",geojson:"application/geo+json",gml:"application/gml+xml",gpx:"application/gpx+xml",gxf:"application/gxf",gz:"application/gzip",hjson:"application/hjson",stk:"application/hyperstudio",ink:"application/inkml+xml",inkml:"application/inkml+xml",ipfix:"application/ipfix",its:"application/its+xml",jar:"application/java-archive",war:"application/java-archive",ear:"application/java-archive",ser:"application/java-serialized-object",class:"application/java-vm",js:"application/javascript",mjs:"application/javascript",json:"application/json",map:"application/json",json5:"application/json5",jsonml:"application/jsonml+json",jsonld:"application/ld+json",lgr:"application/lgr+xml",lostxml:"application/lost+xml",hqx:"application/mac-binhex40",cpt:"application/mac-compactpro",mads:"application/mads+xml",webmanifest:"application/manifest+json",mrc:"application/marc",mrcx:"application/marcxml+xml",ma:"application/mathematica",nb:"application/mathematica",mb:"application/mathematica",mathml:"application/mathml+xml",mbox:"application/mbox",mscml:"application/mediaservercontrol+xml",metalink:"application/metalink+xml",meta4:"application/metalink4+xml",mets:"application/mets+xml",maei:"application/mmt-aei+xml",musd:"application/mmt-usd+xml",mods:"application/mods+xml",m21:"application/mp21",mp21:"application/mp21",mp4s:"application/mp4",m4p:"application/mp4",doc:"application/msword",dot:"application/msword",mxf:"application/mxf",nq:"application/n-quads",nt:"application/n-triples",cjs:"application/node",bin:"application/octet-stream",dms:"application/octet-stream",lrf:"application/octet-stream",mar:"application/octet-stream",so:"application/octet-stream",dist:"application/octet-stream",distz:"application/octet-stream",pkg:"application/octet-stream",bpk:"application/octet-stream",dump:"application/octet-stream",elc:"application/octet-stream",deploy:"application/octet-stream",exe:"application/octet-stream",dll:"application/octet-stream",deb:"application/octet-stream",dmg:"application/octet-stream",iso:"application/octet-stream",img:"application/octet-stream",msi:"application/octet-stream",msp:"application/octet-stream",msm:"application/octet-stream",buffer:"application/octet-stream",oda:"application/oda",opf:"application/oebps-package+xml",ogx:"application/ogg",omdoc:"application/omdoc+xml",onetoc:"application/onenote",onetoc2:"application/onenote",onetmp:"application/onenote",onepkg:"application/onenote",oxps:"application/oxps",relo:"application/p2p-overlay+xml",xer:"application/patch-ops-error+xml",pdf:"application/pdf",pgp:"application/pgp-encrypted",asc:"application/pgp-signature",sig:"application/pgp-signature",prf:"application/pics-rules",p10:"application/pkcs10",p7m:"application/pkcs7-mime",p7c:"application/pkcs7-mime",p7s:"application/pkcs7-signature",p8:"application/pkcs8",ac:"application/pkix-attr-cert",cer:"application/pkix-cert",crl:"application/pkix-crl",pkipath:"application/pkix-pkipath",pki:"application/pkixcmp",pls:"application/pls+xml",ai:"application/postscript",eps:"application/postscript",ps:"application/postscript",provx:"application/provenance+xml",cww:"application/prs.cww",pskcxml:"application/pskc+xml",raml:"application/raml+yaml",rdf:"application/rdf+xml",owl:"application/rdf+xml",rif:"application/reginfo+xml",rnc:"application/relax-ng-compact-syntax",rl:"application/resource-lists+xml",rld:"application/resource-lists-diff+xml",rs:"application/rls-services+xml",rapd:"application/route-apd+xml",sls:"application/route-s-tsid+xml",rusd:"application/route-usd+xml",gbr:"application/rpki-ghostbusters",mft:"application/rpki-manifest",roa:"application/rpki-roa",rsd:"application/rsd+xml",rss:"application/rss+xml",rtf:"application/rtf",sbml:"application/sbml+xml",scq:"application/scvp-cv-request",scs:"application/scvp-cv-response",spq:"application/scvp-vp-request",spp:"application/scvp-vp-response",sdp:"application/sdp",senmlx:"application/senml+xml",sensmlx:"application/sensml+xml",setpay:"application/set-payment-initiation",setreg:"application/set-registration-initiation",shf:"application/shf+xml",siv:"application/sieve",sieve:"application/sieve",smi:"application/smil+xml",smil:"application/smil+xml",rq:"application/sparql-query",srx:"application/sparql-results+xml",gram:"application/srgs",grxml:"application/srgs+xml",sru:"application/sru+xml",ssdl:"application/ssdl+xml",ssml:"application/ssml+xml",swidtag:"application/swid+xml",tei:"application/tei+xml",teicorpus:"application/tei+xml",tfi:"application/thraud+xml",tsd:"application/timestamped-data",toml:"application/toml",trig:"application/trig",ttml:"application/ttml+xml",ubj:"application/ubjson",rsheet:"application/urc-ressheet+xml",td:"application/urc-targetdesc+xml",vxml:"application/voicexml+xml",wasm:"application/wasm",wgt:"application/widget",hlp:"application/winhlp",wsdl:"application/wsdl+xml",wspolicy:"application/wspolicy+xml",xaml:"application/xaml+xml",xav:"application/xcap-att+xml",xca:"application/xcap-caps+xml",xdf:"application/xcap-diff+xml",xel:"application/xcap-el+xml",xns:"application/xcap-ns+xml",xenc:"application/xenc+xml",xhtml:"application/xhtml+xml",xht:"application/xhtml+xml",xlf:"application/xliff+xml",xml:"application/xml",xsl:"application/xml",xsd:"application/xml",rng:"application/xml",dtd:"application/xml-dtd",xop:"application/xop+xml",xpl:"application/xproc+xml",xslt:"application/xml",xspf:"application/xspf+xml",mxml:"application/xv+xml",xhvml:"application/xv+xml",xvml:"application/xv+xml",xvm:"application/xv+xml",yang:"application/yang",yin:"application/yin+xml",zip:"application/zip","3gpp":"video/3gpp",adp:"audio/adpcm",amr:"audio/amr",au:"audio/basic",snd:"audio/basic",mid:"audio/midi",midi:"audio/midi",kar:"audio/midi",rmi:"audio/midi",mxmf:"audio/mobile-xmf",mp3:"audio/mpeg",m4a:"audio/mp4",mp4a:"audio/mp4",mpga:"audio/mpeg",mp2:"audio/mpeg",mp2a:"audio/mpeg",m2a:"audio/mpeg",m3a:"audio/mpeg",oga:"audio/ogg",ogg:"audio/ogg",spx:"audio/ogg",opus:"audio/ogg",s3m:"audio/s3m",sil:"audio/silk",wav:"audio/wav",weba:"audio/webm",xm:"audio/xm",ttc:"font/collection",otf:"font/otf",ttf:"font/ttf",woff:"font/woff",woff2:"font/woff2",exr:"image/aces",apng:"image/apng",avif:"image/avif",bmp:"image/bmp",cgm:"image/cgm",drle:"image/dicom-rle",emf:"image/emf",fits:"image/fits",g3:"image/g3fax",gif:"image/gif",heic:"image/heic",heics:"image/heic-sequence",heif:"image/heif",heifs:"image/heif-sequence",hej2:"image/hej2k",hsj2:"image/hsj2",ief:"image/ief",jls:"image/jls",jp2:"image/jp2",jpg2:"image/jp2",jpeg:"image/jpeg",jpg:"image/jpeg",jpe:"image/jpeg",jph:"image/jph",jhc:"image/jphc",jpm:"image/jpm",jpx:"image/jpx",jpf:"image/jpx",jxr:"image/jxr",jxra:"image/jxra",jxrs:"image/jxrs",jxs:"image/jxs",jxsc:"image/jxsc",jxsi:"image/jxsi",jxss:"image/jxss",ktx:"image/ktx",ktx2:"image/ktx2",png:"image/png",btif:"image/prs.btif",pti:"image/prs.pti",sgi:"image/sgi",svg:"image/svg+xml",svgz:"image/svg+xml",t38:"image/t38",tif:"image/tiff",tiff:"image/tiff",tfx:"image/tiff-fx",webp:"image/webp",wmf:"image/wmf","disposition-notification":"message/disposition-notification",u8msg:"message/global",u8dsn:"message/global-delivery-status",u8mdn:"message/global-disposition-notification",u8hdr:"message/global-headers",eml:"message/rfc822",mime:"message/rfc822","3mf":"model/3mf",gltf:"model/gltf+json",glb:"model/gltf-binary",igs:"model/iges",iges:"model/iges",msh:"model/mesh",mesh:"model/mesh",silo:"model/mesh",mtl:"model/mtl",obj:"model/obj",stpz:"model/step+zip",stpxz:"model/step-xml+zip",stl:"model/stl",wrl:"model/vrml",vrml:"model/vrml",x3db:"model/x3d+fastinfoset",x3dbz:"model/x3d+binary",x3dv:"model/x3d-vrml",x3dvz:"model/x3d+vrml",x3d:"model/x3d+xml",x3dz:"model/x3d+xml",appcache:"text/cache-manifest",manifest:"text/cache-manifest",ics:"text/calendar",ifb:"text/calendar",coffee:"text/coffeescript",litcoffee:"text/coffeescript",css:"text/css",csv:"text/csv",html:"text/html",htm:"text/html",shtml:"text/html",jade:"text/jade",jsx:"text/jsx",less:"text/less",markdown:"text/markdown",md:"text/markdown",mml:"text/mathml",mdx:"text/mdx",n3:"text/n3",txt:"text/plain",text:"text/plain",conf:"text/plain",def:"text/plain",list:"text/plain",log:"text/plain",in:"text/plain",ini:"text/plain",dsc:"text/prs.lines.tag",rtx:"text/richtext",sgml:"text/sgml",sgm:"text/sgml",shex:"text/shex",slim:"text/slim",slm:"text/slim",spdx:"text/spdx",stylus:"text/stylus",styl:"text/stylus",tsv:"text/tab-separated-values",t:"text/troff",tr:"text/troff",roff:"text/troff",man:"text/troff",me:"text/troff",ms:"text/troff",ttl:"text/turtle",uri:"text/uri-list",uris:"text/uri-list",urls:"text/uri-list",vcard:"text/vcard",vtt:"text/vtt",yaml:"text/yaml",yml:"text/yaml","3gp":"video/3gpp","3g2":"video/3gpp2",h261:"video/h261",h263:"video/h263",h264:"video/h264",m4s:"video/iso.segment",jpgv:"video/jpeg",jpgm:"image/jpm",mj2:"video/mj2",mjp2:"video/mj2",ts:"video/mp2t",mp4:"video/mp4",mp4v:"video/mp4",mpg4:"video/mp4",mpeg:"video/mpeg",mpg:"video/mpeg",mpe:"video/mpeg",m1v:"video/mpeg",m2v:"video/mpeg",ogv:"video/ogg",qt:"video/quicktime",mov:"video/quicktime",webm:"video/webm"};function uc(e){let t=(""+e).trim().toLowerCase(),n=t.lastIndexOf(".");return fc[~n?t.substring(++n):t]}function pc(e,t){return`${e}${t}`.replace(/\/+/g,"/")}function As(e){return Wo.test(e)||!e.startsWith("/")?e:pc(lt.value.base,e)}function dc(e){let t=e.replace(/\.html$/,"");if(t=decodeURIComponent(t),t=t.replace(/\/$/,"/index"),Ce){const n="/learn-wgpu-zh/";t=Ts(t.slice(n.length).replace(/\//g,"_")||"index")+".md";let i=__VP_HASH_MAP__[t.toLowerCase()];if(i||(t=t.endsWith("_index.md")?t.slice(0,-9)+".md":t.slice(0,-3)+"_index.md",i=__VP_HASH_MAP__[t.toLowerCase()]),!i)return null;t=`${n}assets/${t}.${i}.js`}else t=`./${Ts(t.slice(1).replace(/\//g,"_"))}.md.js`;return t}let an=[];function Jc(e){an.push(e),Mn(()=>{an=an.filter(t=>t!==e)})}const mc=Symbol(),Jo="http://a.com",hc=()=>({path:"/",component:null,data:Vo});function Xc(e,t){const n=Cn(hc()),i={route:n,go:s};async function s(l=Ce?location.href:"/"){var a,f;l=ai(l),await((a=i.onBeforeRouteChange)==null?void 0:a.call(i,l))!==!1&&(Os(l),await r(l),await((f=i.onAfterRouteChanged)==null?void 0:f.call(i,l)))}let o=null;async function r(l,a=0,f=!1){var x;if(await((x=i.onBeforePageLoad)==null?void 0:x.call(i,l))===!1)return;const p=new URL(l,Jo),d=o=p.pathname;try{let E=await e(d);if(!E)throw new Error(`Page not found: ${d}`);if(o===d){o=null;const{default:I,__pageData:R}=E;if(!I)throw new Error(`Invalid route component: ${I}`);n.path=Ce?d:As(d),n.component=It(I),n.data=It(R),Ce&&An(()=>{let H=lt.value.base+R.relativePath.replace(/(?:(^|\/)index)?\.md$/,"$1");if(!lt.value.cleanUrls&&!H.endsWith("/")&&(H+=".html"),H!==p.pathname&&(p.pathname=H,l=H+p.search+p.hash,history.replaceState(null,"",l)),p.hash&&!a){let q=null;try{q=document.getElementById(decodeURIComponent(p.hash).slice(1))}catch(V){console.warn(V)}if(q){js(q,p.hash);return}}window.scrollTo(0,a)})}}catch(E){if(!/fetch|Page not found/.test(E.message)&&!/^\/404(\.html|\/)?$/.test(l)&&console.error(E),!f)try{const I=await fetch(lt.value.base+"hashmap.json");window.__VP_HASH_MAP__=await I.json(),await r(l,a,!0);return}catch{}o===d&&(o=null,n.path=Ce?d:As(d),n.component=t?It(t):null,n.data=Vo)}}return Ce&&(window.addEventListener("click",l=>{if(l.target.closest("button"))return;const f=l.target.closest("a");if(f&&!f.closest(".vp-raw")&&(f instanceof SVGElement||!f.download)){const{target:p}=f,{href:d,origin:x,pathname:E,hash:I,search:R}=new URL(f.href instanceof SVGAnimatedString?f.href.animVal:f.href,f.baseURI),H=window.location,q=uc(E);!l.ctrlKey&&!l.shiftKey&&!l.altKey&&!l.metaKey&&!p&&x===H.origin&&(!q||q==="text/html")&&(l.preventDefault(),E===H.pathname&&R===H.search?(I!==H.hash&&(history.pushState(null,"",I),window.dispatchEvent(new Event("hashchange"))),I?js(f,I,f.classList.contains("header-anchor")):(Os(d),window.scrollTo(0,0))):s(d))}},{capture:!0}),window.addEventListener("popstate",async l=>{var a;await r(ai(location.href),l.state&&l.state.scrollPosition||0),(a=i.onAfterRouteChanged)==null||a.call(i,location.href)}),window.addEventListener("hashchange",l=>{l.preventDefault()})),i}function gc(){const e=bt(mc);if(!e)throw new Error("useRouter() is called without provider.");return e}function Xo(){return gc().route}function js(e,t,n=!1){let i=null;try{i=e.classList.contains("header-anchor")?e:document.getElementById(decodeURIComponent(t).slice(1))}catch(s){console.warn(s)}if(i){let s=function(){!n||Math.abs(f-window.scrollY)>window.innerHeight?window.scrollTo(0,f):window.scrollTo({left:0,top:f,behavior:"smooth"})},o=lt.value.scrollOffset,r=0,l=24;if(typeof o=="object"&&"padding"in o&&(l=o.padding,o=o.selector),typeof o=="number")r=o;else if(typeof o=="string")r=Ss(o,l);else if(Array.isArray(o))for(const p of o){const d=Ss(p,l);if(d){r=d;break}}const a=parseInt(window.getComputedStyle(i).paddingTop,10),f=window.scrollY+i.getBoundingClientRect().top-r+a;requestAnimationFrame(s)}}function Ss(e,t){const n=document.querySelector(e);if(!n)return 0;const i=n.getBoundingClientRect().bottom;return i<0?0:i+t}function Os(e){Ce&&e!==ai(location.href)&&(history.replaceState({scrollPosition:window.scrollY},document.title),history.pushState(null,"",e))}function ai(e){const t=new URL(e,Jo);return t.pathname=t.pathname.replace(/(^|\/)index(\.html)?$/,"$1"),lt.value.cleanUrls?t.pathname=t.pathname.replace(/\.html$/,""):!t.pathname.endsWith("/")&&!t.pathname.endsWith(".html")&&(t.pathname+=".html"),t.pathname+t.search+t.hash}const zn=()=>an.forEach(e=>e()),Qc=fo({name:"VitePressContent",props:{as:{type:[Object,String],default:"div"}},setup(e){const t=Xo(),{site:n}=cc();return()=>ri(e.as,n.value.contentProps??{style:{position:"relative"}},[t.component?ri(t.component,{onVnodeMounted:zn,onVnodeUpdated:zn,onVnodeUnmounted:zn}):"404 Page Not Found"])}}),Zc="/learn-wgpu-zh/res/wx.jpg",Gc=fo({setup(e,{slots:t}){const n=ge(!1);return jt(()=>{n.value=!0}),()=>n.value&&t.default?t.default():null}});function ef(){Ce&&window.addEventListener("click",e=>{var n;const t=e.target;if(t.matches(".vp-code-group input")){const i=(n=t.parentElement)==null?void 0:n.parentElement;if(!i)return;const s=Array.from(i.querySelectorAll("input")).indexOf(t);if(s<0)return;const o=i.querySelector(".blocks");if(!o)return;const r=Array.from(o.children).find(f=>f.classList.contains("active"));if(!r)return;const l=o.children[s];if(!l||r===l)return;r.classList.remove("active"),l.classList.add("active");const a=i==null?void 0:i.querySelector(`label[for="${t.id}"]`);a==null||a.scrollIntoView({block:"nearest"})}})}function tf(){if(Ce){const e=new WeakMap;window.addEventListener("click",t=>{var i;const n=t.target;if(n.matches('div[class*="language-"] > button.copy')){const s=n.parentElement,o=(i=n.nextElementSibling)==null?void 0:i.nextElementSibling;if(!s||!o)return;const r=/language-(shellscript|shell|bash|sh|zsh)/.test(s.className);let l="";o.querySelectorAll("span.line:not(.diff.remove)").forEach(a=>l+=(a.textContent||"")+` +`),l=l.slice(0,-1),r&&(l=l.replace(/^ *(\$|>) /gm,"").trim()),xc(l).then(()=>{n.classList.add("copied"),clearTimeout(e.get(n));const a=setTimeout(()=>{n.classList.remove("copied"),n.blur(),e.delete(n)},2e3);e.set(n,a)})}})}}async function xc(e){try{return navigator.clipboard.writeText(e)}catch{const t=document.createElement("textarea"),n=document.activeElement;t.value=e,t.setAttribute("readonly",""),t.style.contain="strict",t.style.position="absolute",t.style.left="-9999px",t.style.fontSize="12pt";const i=document.getSelection(),s=i?i.rangeCount>0&&i.getRangeAt(0):null;document.body.appendChild(t),t.select(),t.selectionStart=0,t.selectionEnd=e.length,document.execCommand("copy"),document.body.removeChild(t),s&&(i.removeAllRanges(),i.addRange(s)),n&&n.focus()}}function nf(e,t){let n=[],i=!0;const s=o=>{if(i){i=!1;return}const r=o.map(Rs);n.forEach((l,a)=>{const f=r.findIndex(p=>p==null?void 0:p.isEqualNode(l??null));f!==-1?delete r[f]:(l==null||l.remove(),delete n[a])}),r.forEach(l=>l&&document.head.appendChild(l)),n=[...n,...r].filter(Boolean)};oo(()=>{const o=e.data,r=t.value,l=o&&o.description,a=o&&o.frontmatter.head||[],f=zo(r,o);f!==document.title&&(document.title=f);const p=l||r.description;let d=document.querySelector("meta[name=description]");d?d.getAttribute("content")!==p&&d.setAttribute("content",p):Rs(["meta",{name:"description",content:p}]),s(Yo(r.head,yc(a)))})}function Rs([e,t,n]){const i=document.createElement(e);for(const s in t)i.setAttribute(s,t[s]);return n&&(i.innerHTML=n),e==="script"&&!t.async&&(i.async=!1),i}function vc(e){return e[0]==="meta"&&e[1]&&e[1].name==="description"}function yc(e){return e.filter(t=>!vc(t))}const Yn=new Set,Qo=()=>document.createElement("link"),bc=e=>{const t=Qo();t.rel="prefetch",t.href=e,document.head.appendChild(t)},_c=e=>{const t=new XMLHttpRequest;t.open("GET",e,t.withCredentials=!0),t.send()};let on;const wc=Ce&&(on=Qo())&&on.relList&&on.relList.supports&&on.relList.supports("prefetch")?bc:_c;function sf(){if(!Ce||!window.IntersectionObserver)return;let e;if((e=navigator.connection)&&(e.saveData||/2g/.test(e.effectiveType)))return;const t=window.requestIdleCallback||setTimeout;let n=null;const i=()=>{n&&n.disconnect(),n=new IntersectionObserver(o=>{o.forEach(r=>{if(r.isIntersecting){const l=r.target;n.unobserve(l);const{pathname:a}=l;if(!Yn.has(a)){Yn.add(a);const f=dc(a);f&&wc(f)}}})}),t(()=>{document.querySelectorAll("#app a").forEach(o=>{const{hostname:r,pathname:l}=new URL(o.href instanceof SVGAnimatedString?o.href.animVal:o.href,o.baseURI),a=l.match(/\.\w+$/);a&&a[0]!==".html"||o.target!=="_blank"&&r===location.hostname&&(l!==location.pathname?n.observe(o):Yn.add(l))})})};jt(i);const s=Xo();Ye(()=>s.path,i),Mn(()=>{n&&n.disconnect()})}export{Nc as $,Mn as A,Sc as B,hl as C,Ac as D,Oc as E,he as F,Js as G,Jc as H,ce as I,jc as J,Wo as K,Xo as L,Kl as M,bt as N,di as O,An as P,zc as Q,Fc as R,En as S,Io as T,qc as U,Wr as V,Vc as W,Ol as X,kc as Y,Pc as Z,$c as _,Oo as a,Mc as a0,Dc as a1,Bc as a2,Uc as a3,Kc as a4,Zc as a5,Wc as a6,nf as a7,mc as a8,Yc as a9,ac as aa,Qc as ab,Gc as ac,lt as ad,Hc as ae,Xc as af,dc as ag,sf as ah,tf as ai,ef as aj,ri as ak,Ao as b,Ic as c,fo as d,Lc as e,As as f,ne as g,ge as h,nc as i,jt as j,So as k,uc as l,Ci as m,mi as n,Eo as o,Ec as p,Tc as q,Rc as r,tc as s,Cc as t,cc as u,Ce as v,el as w,Wa as x,Ye as y,oo as z}; diff --git a/assets/chunks/theme.366df15a.js b/assets/chunks/theme.366df15a.js deleted file mode 100644 index 6d2322d44..000000000 --- a/assets/chunks/theme.366df15a.js +++ /dev/null @@ -1,7 +0,0 @@ -import{d as g,o as a,c as i,r as u,n as B,a as x,t as S,_ as m,b,w as p,T as he,e as f,u as et,i as tt,P as nt,f as fe,g as $,h as P,j as H,k as c,l,p as E,m as D,q as st,s as ot,v as R,x as le,y as at,z as q,A as ie,B as me,C as ge,D as rt,E as U,F as T,G as A,H as ye,I as Q,J as h,K as G,L as ze,M as ce,N as te,O as be,Q as Fe,R as lt,S as it,U as xe,V as ct,W as ne,X as ut,Y as dt,Z as _t,$ as vt}from"./framework.adbf3c9e.js";const pt=g({__name:"VPBadge",props:{text:{},type:{}},setup(n){return(e,t)=>(a(),i("span",{class:B(["VPBadge",e.type??"tip"])},[u(e.$slots,"default",{},()=>[x(S(e.text),1)],!0)],2))}});const ht=m(pt,[["__scopeId","data-v-02919808"]]),ft={key:0,class:"VPBackdrop"},mt=g({__name:"VPBackdrop",props:{show:{type:Boolean}},setup(n){return(e,t)=>(a(),b(he,{name:"fade"},{default:p(()=>[e.show?(a(),i("div",ft)):f("",!0)]),_:1}))}});const gt=m(mt,[["__scopeId","data-v-c79a1216"]]),V=et;function yt(n,e){let t,s=!1;return()=>{t&&clearTimeout(t),s?t=setTimeout(n,e):(n(),s=!0,setTimeout(()=>{s=!1},e))}}function _e(n){return/^\//.test(n)?n:`/${n}`}function J(n){if(tt(n))return n.replace(nt,"");const{site:e}=V(),{pathname:t,search:s,hash:o}=new URL(n,"http://a.com"),r=t.endsWith("/")||t.endsWith(".html")?n:n.replace(/(?:(^\.+)\/)?.*$/,`$1${t.replace(/(\.md)?$/,e.value.cleanUrls?"":".html")}${s}${o}`);return fe(r)}function Z({removeCurrent:n=!0,correspondingLink:e=!1}={}){const{site:t,localeIndex:s,page:o,theme:r}=V(),d=$(()=>{var _,y;return{label:(_=t.value.locales[s.value])==null?void 0:_.label,link:((y=t.value.locales[s.value])==null?void 0:y.link)||(s.value==="root"?"/":`/${s.value}/`)}});return{localeLinks:$(()=>Object.entries(t.value.locales).flatMap(([_,y])=>n&&d.value.label===y.label?[]:{text:y.label,link:bt(y.link||(_==="root"?"/":`/${_}/`),r.value.i18nRouting!==!1&&e,o.value.relativePath.slice(d.value.link.length-1),!t.value.cleanUrls)})),currentLang:d}}function bt(n,e,t,s){return e?n.replace(/\/$/,"")+_e(t.replace(/(^|\/)index\.md$/,"$1").replace(/\.md$/,s?".html":"")):n}const kt=n=>(E("data-v-a172abb3"),n=n(),D(),n),$t={class:"NotFound"},Pt={class:"code"},Vt={class:"title"},wt=kt(()=>c("div",{class:"divider"},null,-1)),St={class:"quote"},Lt={class:"action"},Mt=["href","aria-label"],Tt=g({__name:"NotFound",setup(n){const{site:e,theme:t}=V(),{localeLinks:s}=Z({removeCurrent:!1}),o=P("/");return H(()=>{var d;const r=window.location.pathname.replace(e.value.base,"").replace(/(^.*?\/).*$/,"/$1");s.value.length&&(o.value=((d=s.value.find(({link:v})=>v.startsWith(r)))==null?void 0:d.link)||s.value[0].link)}),(r,d)=>{var v,_,y,w,M;return a(),i("div",$t,[c("p",Pt,S(((v=l(t).notFound)==null?void 0:v.code)??"404"),1),c("h1",Vt,S(((_=l(t).notFound)==null?void 0:_.title)??"PAGE NOT FOUND"),1),wt,c("blockquote",St,S(((y=l(t).notFound)==null?void 0:y.quote)??"But if you don't change your direction, and if you keep looking, you may end up where you are heading."),1),c("div",Lt,[c("a",{class:"link",href:l(fe)(o.value),"aria-label":((w=l(t).notFound)==null?void 0:w.linkLabel)??"go to home"},S(((M=l(t).notFound)==null?void 0:M.linkText)??"Take me home"),9,Mt)])])}}});const Bt=m(Tt,[["__scopeId","data-v-a172abb3"]]);function Oe(n){return st()?(ot(n),!0):!1}function ke(n){return typeof n=="function"?n():l(n)}const Ct=typeof window<"u",It=()=>{};function Nt(n){var e;const t=ke(n);return(e=t==null?void 0:t.$el)!=null?e:t}const $e=Ct?window:void 0;function At(...n){let e,t,s,o;if(typeof n[0]=="string"||Array.isArray(n[0])?([t,s,o]=n,e=$e):[e,t,s,o]=n,!e)return It;Array.isArray(t)||(t=[t]),Array.isArray(s)||(s=[s]);const r=[],d=()=>{r.forEach(w=>w()),r.length=0},v=(w,M,C,k)=>(w.addEventListener(M,C,k),()=>w.removeEventListener(M,C,k)),_=R(()=>[Nt(e),ke(o)],([w,M])=>{d(),w&&r.push(...t.flatMap(C=>s.map(k=>v(w,C,k,M))))},{immediate:!0,flush:"post"}),y=()=>{_(),d()};return Oe(y),y}function xt(){const n=P(!1);return at()&&H(()=>{n.value=!0}),n}function Ht(n){const e=xt();return $(()=>(e.value,!!n()))}function ve(n,e={}){const{window:t=$e}=e,s=Ht(()=>t&&"matchMedia"in t&&typeof t.matchMedia=="function");let o;const r=P(!1),d=y=>{r.value=y.matches},v=()=>{o&&("removeEventListener"in o?o.removeEventListener("change",d):o.removeListener(d))},_=le(()=>{s.value&&(v(),o=t.matchMedia(ke(n)),"addEventListener"in o?o.addEventListener("change",d):o.addListener(d),r.value=o.matches)});return Oe(()=>{_(),v(),o=void 0}),r}function Ge({window:n=$e}={}){if(!n)return{x:P(0),y:P(0)};const e=P(n.scrollX),t=P(n.scrollY);return At(n,"scroll",()=>{e.value=n.scrollX,t.value=n.scrollY},{capture:!1,passive:!0}),{x:e,y:t}}function Re(n,e){if(Array.isArray(n))return se(n);if(n==null)return[];e=_e(e);const t=Object.keys(n).sort((o,r)=>r.split("/").length-o.split("/").length).find(o=>e.startsWith(_e(o))),s=t?n[t]:[];return Array.isArray(s)?se(s):se(s.items,s.base)}function Et(n){const e=[];let t=0;for(const s in n){const o=n[s];if(o.items){t=e.push(o);continue}e[t]||e.push({items:[]}),e[t].items.push(o)}return e}function Dt(n){const e=[];function t(s){for(const o of s)o.text&&o.link&&e.push({text:o.text,link:o.link,docFooterText:o.docFooterText}),o.items&&t(o.items)}return t(n),e}function pe(n,e){return Array.isArray(e)?e.some(t=>pe(n,t)):q(n,e.link)?!0:e.items?pe(n,e.items):!1}function se(n,e){return[...n].map(t=>{const s={...t},o=s.base||e;return o&&s.link&&(s.link=o+s.link),s.items&&(s.items=se(s.items,o)),s})}function z(){const{frontmatter:n,page:e,theme:t}=V(),s=ve("(min-width: 960px)"),o=P(!1),r=$(()=>{const I=t.value.sidebar,N=e.value.relativePath;return I?Re(I,N):[]}),d=$(()=>n.value.sidebar!==!1&&r.value.length>0&&n.value.layout!=="home"),v=$(()=>_?n.value.aside==null?t.value.aside==="left":n.value.aside==="left":!1),_=$(()=>n.value.layout==="home"?!1:n.value.aside!=null?!!n.value.aside:t.value.aside!==!1),y=$(()=>d.value&&s.value),w=$(()=>d.value?Et(r.value):[]);function M(){o.value=!0}function C(){o.value=!1}function k(){o.value?C():M()}return{isOpen:o,sidebar:r,sidebarGroups:w,hasSidebar:d,hasAside:_,leftAside:v,isSidebarEnabled:y,open:M,close:C,toggle:k}}function zt(n,e){let t;le(()=>{t=n.value?document.activeElement:void 0}),H(()=>{window.addEventListener("keyup",s)}),me(()=>{window.removeEventListener("keyup",s)});function s(o){o.key==="Escape"&&n.value&&(e(),t==null||t.focus())}}const Ue=P(ie?location.hash:"");ie&&window.addEventListener("hashchange",()=>{Ue.value=location.hash});function Ft(n){const{page:e}=V(),t=P(!1),s=$(()=>n.value.collapsed!=null),o=$(()=>!!n.value.link),r=P(!1),d=()=>{r.value=q(e.value.relativePath,n.value.link)};R([e,n,Ue],d),H(d);const v=$(()=>r.value?!0:n.value.items?pe(e.value.relativePath,n.value.items):!1),_=$(()=>!!(n.value.items&&n.value.items.length));le(()=>{t.value=!!(s.value&&n.value.collapsed)}),ge(()=>{(r.value||v.value)&&(t.value=!1)});function y(){s.value&&(t.value=!t.value)}return{collapsed:t,collapsible:s,isLink:o,isActiveLink:r,hasActiveLink:v,hasChildren:_,toggle:y}}function Ot(){const{hasSidebar:n}=z(),e=ve("(min-width: 960px)"),t=ve("(min-width: 1280px)");return{isAsideEnabled:$(()=>!t.value&&!e.value?!1:n.value?t.value:e.value)}}const Gt=71;function Pe(n){return typeof n.outline=="object"&&!Array.isArray(n.outline)&&n.outline.label||n.outlineTitle||"On this page"}function Ve(n){const e=[...document.querySelectorAll(".VPDoc :where(h1,h2,h3,h4,h5,h6)")].filter(t=>t.id&&t.hasChildNodes()).map(t=>{const s=Number(t.tagName[1]);return{title:Rt(t),link:"#"+t.id,level:s}});return Ut(e,n)}function Rt(n){let e="";for(const t of n.childNodes)if(t.nodeType===1){if(t.classList.contains("VPBadge")||t.classList.contains("header-anchor"))continue;e+=t.textContent}else t.nodeType===3&&(e+=t.textContent);return e.trim()}function Ut(n,e){if(e===!1)return[];const t=(typeof e=="object"&&!Array.isArray(e)?e.level:e)||2,[s,o]=typeof t=="number"?[t,t]:t==="deep"?[2,6]:t;n=n.filter(d=>d.level>=s&&d.level<=o);const r=[];e:for(let d=0;d=0;_--){const y=n[_];if(y.level{requestAnimationFrame(r),window.addEventListener("scroll",s)}),rt(()=>{d(location.hash)}),me(()=>{window.removeEventListener("scroll",s)});function r(){if(!t.value)return;const v=[].slice.call(n.value.querySelectorAll(".outline-link")),_=[].slice.call(document.querySelectorAll(".content .header-anchor")).filter(k=>v.some(I=>I.hash===k.hash&&k.offsetParent!==null)),y=window.scrollY,w=window.innerHeight,M=document.body.offsetHeight,C=Math.abs(y+w-M)<1;if(_.length&&C){d(_[_.length-1].hash);return}for(let k=0;k<_.length;k++){const I=_[k],N=_[k+1],[L,W]=qt(k,I,N);if(L){d(W);return}}}function d(v){o&&o.classList.remove("active"),v==null?o=null:o=n.value.querySelector(`a[href="${decodeURIComponent(v)}"]`);const _=o;_?(_.classList.add("active"),e.value.style.top=_.offsetTop+33+"px",e.value.style.opacity="1"):(e.value.style.top="33px",e.value.style.opacity="0")}}function He(n){return n.parentElement.offsetTop-Gt}function qt(n,e,t){const s=window.scrollY;return n===0&&s===0?[!0,null]:s{const o=U("VPDocOutlineItem",!0);return a(),i("ul",{class:B(t.root?"root":"nested")},[(a(!0),i(T,null,A(t.headers,({children:r,link:d,title:v})=>(a(),i("li",null,[c("a",{class:"outline-link",href:d,onClick:e,title:v},S(v),9,Wt),r!=null&&r.length?(a(),b(o,{key:0,headers:r},null,8,["headers"])):f("",!0)]))),256))],2)}}});const we=m(Kt,[["__scopeId","data-v-d0ee3533"]]),Yt=n=>(E("data-v-ff0f39c8"),n=n(),D(),n),Xt={class:"content"},Qt={class:"outline-title"},Jt={"aria-labelledby":"doc-outline-aria-label"},Zt=Yt(()=>c("span",{class:"visually-hidden",id:"doc-outline-aria-label"}," Table of Contents for current page ",-1)),en=g({__name:"VPDocAsideOutline",setup(n){const{frontmatter:e,theme:t}=V(),s=ye([]);Q(()=>{s.value=Ve(e.value.outline??t.value.outline)});const o=P(),r=P();return jt(o,r),(d,v)=>(a(),i("div",{class:B(["VPDocAsideOutline",{"has-outline":s.value.length>0}]),ref_key:"container",ref:o},[c("div",Xt,[c("div",{class:"outline-marker",ref_key:"marker",ref:r},null,512),c("div",Qt,S(l(Pe)(l(t))),1),c("nav",Jt,[Zt,h(we,{headers:s.value,root:!0},null,8,["headers"])])])],2))}});const tn=m(en,[["__scopeId","data-v-ff0f39c8"]]),nn={class:"VPDocAsideCarbonAds"},sn=g({__name:"VPDocAsideCarbonAds",props:{carbonAds:{}},setup(n){const e=()=>null;return(t,s)=>(a(),i("div",nn,[h(l(e),{"carbon-ads":t.carbonAds},null,8,["carbon-ads"])]))}}),on=n=>(E("data-v-3f215769"),n=n(),D(),n),an={class:"VPDocAside"},rn=on(()=>c("div",{class:"spacer"},null,-1)),ln=g({__name:"VPDocAside",setup(n){const{theme:e}=V();return(t,s)=>(a(),i("div",an,[u(t.$slots,"aside-top",{},void 0,!0),u(t.$slots,"aside-outline-before",{},void 0,!0),h(tn),u(t.$slots,"aside-outline-after",{},void 0,!0),rn,u(t.$slots,"aside-ads-before",{},void 0,!0),l(e).carbonAds?(a(),b(sn,{key:0,"carbon-ads":l(e).carbonAds},null,8,["carbon-ads"])):f("",!0),u(t.$slots,"aside-ads-after",{},void 0,!0),u(t.$slots,"aside-bottom",{},void 0,!0)]))}});const cn=m(ln,[["__scopeId","data-v-3f215769"]]);function un(){const{theme:n,page:e}=V();return $(()=>{const{text:t="Edit this page",pattern:s=""}=n.value.editLink||{};let o;return typeof s=="function"?o=s(e.value):o=s.replace(/:path/g,e.value.filePath),{url:o,text:t}})}function dn(){const{page:n,theme:e,frontmatter:t}=V();return $(()=>{var _,y,w,M,C,k,I,N;const s=Re(e.value.sidebar,n.value.relativePath),o=Dt(s),r=o.findIndex(L=>q(n.value.relativePath,L.link)),d=((_=e.value.docFooter)==null?void 0:_.prev)===!1&&!t.value.prev||t.value.prev===!1,v=((y=e.value.docFooter)==null?void 0:y.next)===!1&&!t.value.next||t.value.next===!1;return{prev:d?void 0:{text:(typeof t.value.prev=="string"?t.value.prev:typeof t.value.prev=="object"?t.value.prev.text:void 0)??((w=o[r-1])==null?void 0:w.docFooterText)??((M=o[r-1])==null?void 0:M.text),link:(typeof t.value.prev=="object"?t.value.prev.link:void 0)??((C=o[r-1])==null?void 0:C.link)},next:v?void 0:{text:(typeof t.value.next=="string"?t.value.next:typeof t.value.next=="object"?t.value.next.text:void 0)??((k=o[r+1])==null?void 0:k.docFooterText)??((I=o[r+1])==null?void 0:I.text),link:(typeof t.value.next=="object"?t.value.next.link:void 0)??((N=o[r+1])==null?void 0:N.link)}}})}const _n={},vn={xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24"},pn=c("path",{d:"M18,23H4c-1.7,0-3-1.3-3-3V6c0-1.7,1.3-3,3-3h7c0.6,0,1,0.4,1,1s-0.4,1-1,1H4C3.4,5,3,5.4,3,6v14c0,0.6,0.4,1,1,1h14c0.6,0,1-0.4,1-1v-7c0-0.6,0.4-1,1-1s1,0.4,1,1v7C21,21.7,19.7,23,18,23z"},null,-1),hn=c("path",{d:"M8,17c-0.3,0-0.5-0.1-0.7-0.3C7,16.5,6.9,16.1,7,15.8l1-4c0-0.2,0.1-0.3,0.3-0.5l9.5-9.5c1.2-1.2,3.2-1.2,4.4,0c1.2,1.2,1.2,3.2,0,4.4l-9.5,9.5c-0.1,0.1-0.3,0.2-0.5,0.3l-4,1C8.2,17,8.1,17,8,17zM9.9,12.5l-0.5,2.1l2.1-0.5l9.3-9.3c0.4-0.4,0.4-1.1,0-1.6c-0.4-0.4-1.2-0.4-1.6,0l0,0L9.9,12.5z M18.5,2.5L18.5,2.5L18.5,2.5z"},null,-1),fn=[pn,hn];function mn(n,e){return a(),i("svg",vn,fn)}const gn=m(_n,[["render",mn]]),F=g({__name:"VPLink",props:{tag:{},href:{},noIcon:{type:Boolean},target:{},rel:{}},setup(n){const e=n,t=$(()=>e.tag??(e.href?"a":"span")),s=$(()=>e.href&&ze.test(e.href));return(o,r)=>(a(),b(G(t.value),{class:B(["VPLink",{link:o.href,"vp-external-link-icon":s.value,"no-icon":o.noIcon}]),href:o.href?l(J)(o.href):void 0,target:o.target??(s.value?"_blank":void 0),rel:o.rel??(s.value?"noreferrer":void 0)},{default:p(()=>[u(o.$slots,"default")]),_:3},8,["class","href","target","rel"]))}}),yn={class:"VPLastUpdated"},bn=["datetime"],kn=g({__name:"VPDocFooterLastUpdated",setup(n){const{theme:e,page:t}=V(),s=$(()=>new Date(t.value.lastUpdated)),o=$(()=>s.value.toISOString()),r=P("");return H(()=>{le(()=>{var d;r.value=new Intl.DateTimeFormat(void 0,((d=e.value.lastUpdated)==null?void 0:d.formatOptions)??{dateStyle:"short",timeStyle:"short"}).format(s.value)})}),(d,v)=>{var _;return a(),i("p",yn,[x(S(((_=l(e).lastUpdated)==null?void 0:_.text)||l(e).lastUpdatedText||"Last updated")+": ",1),c("time",{datetime:o.value},S(r.value),9,bn)])}}});const $n=m(kn,[["__scopeId","data-v-149a99df"]]),Pn={key:0,class:"VPDocFooter"},Vn={key:0,class:"edit-info"},wn={key:0,class:"edit-link"},Sn={key:1,class:"last-updated"},Ln={key:1,class:"prev-next"},Mn={class:"pager"},Tn=["href"],Bn=["innerHTML"],Cn=["innerHTML"],In={class:"pager"},Nn=["href"],An=["innerHTML"],xn=["innerHTML"],Hn=g({__name:"VPDocFooter",setup(n){const{theme:e,page:t,frontmatter:s}=V(),o=un(),r=dn(),d=$(()=>e.value.editLink&&s.value.editLink!==!1),v=$(()=>t.value.lastUpdated&&s.value.lastUpdated!==!1),_=$(()=>d.value||v.value||r.value.prev||r.value.next);return(y,w)=>{var M,C,k,I,N,L;return _.value?(a(),i("footer",Pn,[u(y.$slots,"doc-footer-before",{},void 0,!0),d.value||v.value?(a(),i("div",Vn,[d.value?(a(),i("div",wn,[h(F,{class:"edit-link-button",href:l(o).url,"no-icon":!0},{default:p(()=>[h(gn,{class:"edit-link-icon","aria-label":"edit icon"}),x(" "+S(l(o).text),1)]),_:1},8,["href"])])):f("",!0),v.value?(a(),i("div",Sn,[h($n)])):f("",!0)])):f("",!0),(M=l(r).prev)!=null&&M.link||(C=l(r).next)!=null&&C.link?(a(),i("nav",Ln,[c("div",Mn,[(k=l(r).prev)!=null&&k.link?(a(),i("a",{key:0,class:"pager-link prev",href:l(J)(l(r).prev.link)},[c("span",{class:"desc",innerHTML:((I=l(e).docFooter)==null?void 0:I.prev)||"Previous page"},null,8,Bn),c("span",{class:"title",innerHTML:l(r).prev.text},null,8,Cn)],8,Tn)):f("",!0)]),c("div",In,[(N=l(r).next)!=null&&N.link?(a(),i("a",{key:0,class:"pager-link next",href:l(J)(l(r).next.link)},[c("span",{class:"desc",innerHTML:((L=l(e).docFooter)==null?void 0:L.next)||"Next page"},null,8,An),c("span",{class:"title",innerHTML:l(r).next.text},null,8,xn)],8,Nn)):f("",!0)])])):f("",!0)])):f("",!0)}}});const En=m(Hn,[["__scopeId","data-v-37656e44"]]),Dn={},zn={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},Fn=c("path",{d:"M9,19c-0.3,0-0.5-0.1-0.7-0.3c-0.4-0.4-0.4-1,0-1.4l5.3-5.3L8.3,6.7c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l6,6c0.4,0.4,0.4,1,0,1.4l-6,6C9.5,18.9,9.3,19,9,19z"},null,-1),On=[Fn];function Gn(n,e){return a(),i("svg",zn,On)}const Se=m(Dn,[["render",Gn]]),Rn={key:0,class:"VPDocOutlineDropdown"},Un={key:0,class:"items"},jn=g({__name:"VPDocOutlineDropdown",setup(n){const{frontmatter:e,theme:t}=V(),s=P(!1);Q(()=>{s.value=!1});const o=ye([]);return Q(()=>{o.value=Ve(e.value.outline??t.value.outline)}),(r,d)=>o.value.length>0?(a(),i("div",Rn,[c("button",{onClick:d[0]||(d[0]=v=>s.value=!s.value),class:B({open:s.value})},[x(S(l(Pe)(l(t)))+" ",1),h(Se,{class:"icon"})],2),s.value?(a(),i("div",Un,[h(we,{headers:o.value},null,8,["headers"])])):f("",!0)])):f("",!0)}});const qn=m(jn,[["__scopeId","data-v-0c1fc463"]]),Wn=n=>(E("data-v-6b87e69f"),n=n(),D(),n),Kn={class:"container"},Yn=Wn(()=>c("div",{class:"aside-curtain"},null,-1)),Xn={class:"aside-container"},Qn={class:"aside-content"},Jn={class:"content"},Zn={class:"content-container"},es={class:"main"},ts=g({__name:"VPDoc",setup(n){const{theme:e}=V(),t=ce(),{hasSidebar:s,hasAside:o,leftAside:r}=z(),d=$(()=>t.path.replace(/[./]+/g,"_").replace(/_html$/,""));return(v,_)=>{const y=U("Content");return a(),i("div",{class:B(["VPDoc",{"has-sidebar":l(s),"has-aside":l(o)}])},[u(v.$slots,"doc-top",{},void 0,!0),c("div",Kn,[l(o)?(a(),i("div",{key:0,class:B(["aside",{"left-aside":l(r)}])},[Yn,c("div",Xn,[c("div",Qn,[h(cn,null,{"aside-top":p(()=>[u(v.$slots,"aside-top",{},void 0,!0)]),"aside-bottom":p(()=>[u(v.$slots,"aside-bottom",{},void 0,!0)]),"aside-outline-before":p(()=>[u(v.$slots,"aside-outline-before",{},void 0,!0)]),"aside-outline-after":p(()=>[u(v.$slots,"aside-outline-after",{},void 0,!0)]),"aside-ads-before":p(()=>[u(v.$slots,"aside-ads-before",{},void 0,!0)]),"aside-ads-after":p(()=>[u(v.$slots,"aside-ads-after",{},void 0,!0)]),_:3})])])],2)):f("",!0),c("div",Jn,[c("div",Zn,[u(v.$slots,"doc-before",{},void 0,!0),h(qn),c("main",es,[h(y,{class:B(["vp-doc",[d.value,l(e).externalLinkIcon&&"external-link-icon-enabled"]])},null,8,["class"])]),h(En,null,{"doc-footer-before":p(()=>[u(v.$slots,"doc-footer-before",{},void 0,!0)]),_:3}),u(v.$slots,"doc-after",{},void 0,!0)])])]),u(v.$slots,"doc-bottom",{},void 0,!0)],2)}}});const ns=m(ts,[["__scopeId","data-v-6b87e69f"]]),ss=g({__name:"VPButton",props:{tag:{},size:{},theme:{},text:{},href:{}},setup(n){const e=n,t=$(()=>[e.size??"medium",e.theme??"brand"]),s=$(()=>e.href&&ze.test(e.href)),o=$(()=>e.tag?e.tag:e.href?"a":"button");return(r,d)=>(a(),b(G(o.value),{class:B(["VPButton",t.value]),href:r.href?l(J)(r.href):void 0,target:s.value?"_blank":void 0,rel:s.value?"noreferrer":void 0},{default:p(()=>[x(S(r.text),1)]),_:1},8,["class","href","target","rel"]))}});const os=m(ss,[["__scopeId","data-v-567ba664"]]),as=["src","alt"],rs=g({inheritAttrs:!1,__name:"VPImage",props:{image:{},alt:{}},setup(n){return(e,t)=>{const s=U("VPImage",!0);return e.image?(a(),i(T,{key:0},[typeof e.image=="string"||"src"in e.image?(a(),i("img",te({key:0,class:"VPImage"},typeof e.image=="string"?e.$attrs:{...e.image,...e.$attrs},{src:l(fe)(typeof e.image=="string"?e.image:e.image.src),alt:e.alt??(typeof e.image=="string"?"":e.image.alt||"")}),null,16,as)):(a(),i(T,{key:1},[h(s,te({class:"dark",image:e.image.dark,alt:e.image.alt},e.$attrs),null,16,["image","alt"]),h(s,te({class:"light",image:e.image.light,alt:e.image.alt},e.$attrs),null,16,["image","alt"])],64))],64)):f("",!0)}}});const Le=m(rs,[["__scopeId","data-v-8426fc1a"]]),ls=n=>(E("data-v-da5d1713"),n=n(),D(),n),is={class:"container"},cs={class:"main"},us={key:0,class:"name"},ds=["innerHTML"],_s=["innerHTML"],vs=["innerHTML"],ps={key:0,class:"actions"},hs={key:0,class:"image"},fs={class:"image-container"},ms=ls(()=>c("div",{class:"image-bg"},null,-1)),gs=g({__name:"VPHero",props:{name:{},text:{},tagline:{},image:{},actions:{}},setup(n){const e=be("hero-image-slot-exists");return(t,s)=>(a(),i("div",{class:B(["VPHero",{"has-image":t.image||l(e)}])},[c("div",is,[c("div",cs,[u(t.$slots,"home-hero-info",{},()=>[t.name?(a(),i("h1",us,[c("span",{innerHTML:t.name,class:"clip"},null,8,ds)])):f("",!0),t.text?(a(),i("p",{key:1,innerHTML:t.text,class:"text"},null,8,_s)):f("",!0),t.tagline?(a(),i("p",{key:2,innerHTML:t.tagline,class:"tagline"},null,8,vs)):f("",!0)],!0),t.actions?(a(),i("div",ps,[(a(!0),i(T,null,A(t.actions,o=>(a(),i("div",{key:o.link,class:"action"},[h(os,{tag:"a",size:"medium",theme:o.theme,text:o.text,href:o.link},null,8,["theme","text","href"])]))),128))])):f("",!0)]),t.image||l(e)?(a(),i("div",hs,[c("div",fs,[ms,u(t.$slots,"home-hero-image",{},()=>[t.image?(a(),b(Le,{key:0,class:"image-src",image:t.image},null,8,["image"])):f("",!0)],!0)])])):f("",!0)])],2))}});const ys=m(gs,[["__scopeId","data-v-da5d1713"]]),bs=g({__name:"VPHomeHero",setup(n){const{frontmatter:e}=V();return(t,s)=>l(e).hero?(a(),b(ys,{key:0,class:"VPHomeHero",name:l(e).hero.name,text:l(e).hero.text,tagline:l(e).hero.tagline,image:l(e).hero.image,actions:l(e).hero.actions},{"home-hero-info":p(()=>[u(t.$slots,"home-hero-info")]),"home-hero-image":p(()=>[u(t.$slots,"home-hero-image")]),_:3},8,["name","text","tagline","image","actions"])):f("",!0)}}),ks={},$s={xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24"},Ps=c("path",{d:"M19.9,12.4c0.1-0.2,0.1-0.5,0-0.8c-0.1-0.1-0.1-0.2-0.2-0.3l-7-7c-0.4-0.4-1-0.4-1.4,0s-0.4,1,0,1.4l5.3,5.3H5c-0.6,0-1,0.4-1,1s0.4,1,1,1h11.6l-5.3,5.3c-0.4,0.4-0.4,1,0,1.4c0.2,0.2,0.5,0.3,0.7,0.3s0.5-0.1,0.7-0.3l7-7C19.8,12.6,19.9,12.5,19.9,12.4z"},null,-1),Vs=[Ps];function ws(n,e){return a(),i("svg",$s,Vs)}const Ss=m(ks,[["render",ws]]),Ls={class:"box"},Ms=["innerHTML"],Ts=["innerHTML"],Bs=["innerHTML"],Cs={key:3,class:"link-text"},Is={class:"link-text-value"},Ns=g({__name:"VPFeature",props:{icon:{},title:{},details:{},link:{},linkText:{},rel:{}},setup(n){return(e,t)=>(a(),b(F,{class:"VPFeature",href:e.link,rel:e.rel,"no-icon":!0,tag:e.link?"a":"div"},{default:p(()=>[c("article",Ls,[typeof e.icon=="object"?(a(),b(Le,{key:0,image:e.icon,alt:e.icon.alt,height:e.icon.height||48,width:e.icon.width||48},null,8,["image","alt","height","width"])):e.icon?(a(),i("div",{key:1,class:"icon",innerHTML:e.icon},null,8,Ms)):f("",!0),c("h2",{class:"title",innerHTML:e.title},null,8,Ts),e.details?(a(),i("p",{key:2,class:"details",innerHTML:e.details},null,8,Bs)):f("",!0),e.linkText?(a(),i("div",Cs,[c("p",Is,[x(S(e.linkText)+" ",1),h(Ss,{class:"link-text-icon"})])])):f("",!0)])]),_:1},8,["href","rel","tag"]))}});const As=m(Ns,[["__scopeId","data-v-33086751"]]),xs={key:0,class:"VPFeatures"},Hs={class:"container"},Es={class:"items"},Ds=g({__name:"VPFeatures",props:{features:{}},setup(n){const e=n,t=$(()=>{const s=e.features.length;if(s){if(s===2)return"grid-2";if(s===3)return"grid-3";if(s%3===0)return"grid-6";if(s>3)return"grid-4"}else return});return(s,o)=>s.features?(a(),i("div",xs,[c("div",Hs,[c("div",Es,[(a(!0),i(T,null,A(s.features,r=>(a(),i("div",{key:r.title,class:B(["item",[t.value]])},[h(As,{icon:r.icon,title:r.title,details:r.details,link:r.link,"link-text":r.linkText,rel:r.rel},null,8,["icon","title","details","link","link-text","rel"])],2))),128))])])])):f("",!0)}});const zs=m(Ds,[["__scopeId","data-v-39646fad"]]),Fs=g({__name:"VPHomeFeatures",setup(n){const{frontmatter:e}=V();return(t,s)=>l(e).features?(a(),b(zs,{key:0,class:"VPHomeFeatures",features:l(e).features},null,8,["features"])):f("",!0)}}),Os={class:"VPHome"},Gs=g({__name:"VPHome",setup(n){return(e,t)=>{const s=U("Content");return a(),i("div",Os,[u(e.$slots,"home-hero-before",{},void 0,!0),h(bs,null,{"home-hero-info":p(()=>[u(e.$slots,"home-hero-info",{},void 0,!0)]),"home-hero-image":p(()=>[u(e.$slots,"home-hero-image",{},void 0,!0)]),_:3}),u(e.$slots,"home-hero-after",{},void 0,!0),u(e.$slots,"home-features-before",{},void 0,!0),h(Fs),u(e.$slots,"home-features-after",{},void 0,!0),h(s)])}}});const Rs=m(Gs,[["__scopeId","data-v-d82743a8"]]),Us={},js={class:"VPPage"};function qs(n,e){const t=U("Content");return a(),i("div",js,[u(n.$slots,"page-top"),h(t),u(n.$slots,"page-bottom")])}const Ws=m(Us,[["render",qs]]),Ks=g({__name:"VPContent",setup(n){const{page:e,frontmatter:t}=V(),{hasSidebar:s}=z();return(o,r)=>(a(),i("div",{class:B(["VPContent",{"has-sidebar":l(s),"is-home":l(t).layout==="home"}]),id:"VPContent"},[l(e).isNotFound?u(o.$slots,"not-found",{key:0},()=>[h(Bt)],!0):l(t).layout==="page"?(a(),b(Ws,{key:1},{"page-top":p(()=>[u(o.$slots,"page-top",{},void 0,!0)]),"page-bottom":p(()=>[u(o.$slots,"page-bottom",{},void 0,!0)]),_:3})):l(t).layout==="home"?(a(),b(Rs,{key:2},{"home-hero-before":p(()=>[u(o.$slots,"home-hero-before",{},void 0,!0)]),"home-hero-info":p(()=>[u(o.$slots,"home-hero-info",{},void 0,!0)]),"home-hero-image":p(()=>[u(o.$slots,"home-hero-image",{},void 0,!0)]),"home-hero-after":p(()=>[u(o.$slots,"home-hero-after",{},void 0,!0)]),"home-features-before":p(()=>[u(o.$slots,"home-features-before",{},void 0,!0)]),"home-features-after":p(()=>[u(o.$slots,"home-features-after",{},void 0,!0)]),_:3})):l(t).layout&&l(t).layout!=="doc"?(a(),b(G(l(t).layout),{key:3})):(a(),b(ns,{key:4},{"doc-top":p(()=>[u(o.$slots,"doc-top",{},void 0,!0)]),"doc-bottom":p(()=>[u(o.$slots,"doc-bottom",{},void 0,!0)]),"doc-footer-before":p(()=>[u(o.$slots,"doc-footer-before",{},void 0,!0)]),"doc-before":p(()=>[u(o.$slots,"doc-before",{},void 0,!0)]),"doc-after":p(()=>[u(o.$slots,"doc-after",{},void 0,!0)]),"aside-top":p(()=>[u(o.$slots,"aside-top",{},void 0,!0)]),"aside-outline-before":p(()=>[u(o.$slots,"aside-outline-before",{},void 0,!0)]),"aside-outline-after":p(()=>[u(o.$slots,"aside-outline-after",{},void 0,!0)]),"aside-ads-before":p(()=>[u(o.$slots,"aside-ads-before",{},void 0,!0)]),"aside-ads-after":p(()=>[u(o.$slots,"aside-ads-after",{},void 0,!0)]),"aside-bottom":p(()=>[u(o.$slots,"aside-bottom",{},void 0,!0)]),_:3}))],2))}});const Ys=m(Ks,[["__scopeId","data-v-669faec9"]]),Xs={class:"container"},Qs=["innerHTML"],Js=["innerHTML"],Zs=g({__name:"VPFooter",setup(n){const{theme:e,frontmatter:t}=V(),{hasSidebar:s}=z();return(o,r)=>l(e).footer&&l(t).footer!==!1?(a(),i("footer",{key:0,class:B(["VPFooter",{"has-sidebar":l(s)}])},[c("div",Xs,[l(e).footer.message?(a(),i("p",{key:0,class:"message",innerHTML:l(e).footer.message},null,8,Qs)):f("",!0),l(e).footer.copyright?(a(),i("p",{key:1,class:"copyright",innerHTML:l(e).footer.copyright},null,8,Js)):f("",!0)])],2)):f("",!0)}});const eo=m(Zs,[["__scopeId","data-v-e03eb2e1"]]),to={class:"header"},no={class:"outline"},so=g({__name:"VPLocalNavOutlineDropdown",props:{headers:{},navHeight:{}},setup(n){const e=n,{theme:t}=V(),s=P(!1),o=P(0),r=P();Q(()=>{s.value=!1});function d(){s.value=!s.value,o.value=window.innerHeight+Math.min(window.scrollY-e.navHeight,0)}function v(y){y.target.classList.contains("outline-link")&&(r.value&&(r.value.style.transition="none"),lt(()=>{s.value=!1}))}function _(){s.value=!1,window.scrollTo({top:0,left:0,behavior:"smooth"})}return(y,w)=>(a(),i("div",{class:"VPLocalNavOutlineDropdown",style:Fe({"--vp-vh":o.value+"px"})},[y.headers.length>0?(a(),i("button",{key:0,onClick:d,class:B({open:s.value})},[x(S(l(Pe)(l(t)))+" ",1),h(Se,{class:"icon"})],2)):(a(),i("button",{key:1,onClick:_},S(l(t).returnToTopLabel||"Return to top"),1)),h(he,{name:"flyout"},{default:p(()=>[s.value?(a(),i("div",{key:0,ref_key:"items",ref:r,class:"items",onClick:v},[c("div",to,[c("a",{class:"top-link",href:"#",onClick:_},S(l(t).returnToTopLabel||"Return to top"),1)]),c("div",no,[h(we,{headers:y.headers},null,8,["headers"])])],512)):f("",!0)]),_:1})],4))}});const oo=m(so,[["__scopeId","data-v-18201f51"]]),ao={},ro={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},lo=c("path",{d:"M17,11H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h14c0.6,0,1,0.4,1,1S17.6,11,17,11z"},null,-1),io=c("path",{d:"M21,7H3C2.4,7,2,6.6,2,6s0.4-1,1-1h18c0.6,0,1,0.4,1,1S21.6,7,21,7z"},null,-1),co=c("path",{d:"M21,15H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h18c0.6,0,1,0.4,1,1S21.6,15,21,15z"},null,-1),uo=c("path",{d:"M17,19H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h14c0.6,0,1,0.4,1,1S17.6,19,17,19z"},null,-1),_o=[lo,io,co,uo];function vo(n,e){return a(),i("svg",ro,_o)}const po=m(ao,[["render",vo]]),ho=["aria-expanded"],fo={class:"menu-text"},mo=g({__name:"VPLocalNav",props:{open:{type:Boolean}},emits:["open-menu"],setup(n){const{theme:e,frontmatter:t}=V(),{hasSidebar:s}=z(),{y:o}=Ge(),r=ye([]),d=P(0);H(()=>{d.value=parseInt(getComputedStyle(document.documentElement).getPropertyValue("--vp-nav-height"))}),Q(()=>{r.value=Ve(t.value.outline??e.value.outline)});const v=$(()=>r.value.length===0&&!s.value),_=$(()=>({VPLocalNav:!0,fixed:v.value,"reached-top":o.value>=d.value}));return(y,w)=>l(t).layout!=="home"&&(!v.value||l(o)>=d.value)?(a(),i("div",{key:0,class:B(_.value)},[l(s)?(a(),i("button",{key:0,class:"menu","aria-expanded":y.open,"aria-controls":"VPSidebarNav",onClick:w[0]||(w[0]=M=>y.$emit("open-menu"))},[h(po,{class:"menu-icon"}),c("span",fo,S(l(e).sidebarMenuLabel||"Menu"),1)],8,ho)):f("",!0),h(oo,{headers:r.value,navHeight:d.value},null,8,["headers","navHeight"])],2)):f("",!0)}});const go=m(mo,[["__scopeId","data-v-5cfd5582"]]);function yo(){const n=P(!1);function e(){n.value=!0,window.addEventListener("resize",o)}function t(){n.value=!1,window.removeEventListener("resize",o)}function s(){n.value?t():e()}function o(){window.outerWidth>=768&&t()}const r=ce();return R(()=>r.path,t),{isScreenOpen:n,openScreen:e,closeScreen:t,toggleScreen:s}}const bo={},ko={class:"VPSwitch",type:"button",role:"switch"},$o={class:"check"},Po={key:0,class:"icon"};function Vo(n,e){return a(),i("button",ko,[c("span",$o,[n.$slots.default?(a(),i("span",Po,[u(n.$slots,"default",{},void 0,!0)])):f("",!0)])])}const wo=m(bo,[["render",Vo],["__scopeId","data-v-f3c41672"]]),So={},Lo={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},Mo=it('',9),To=[Mo];function Bo(n,e){return a(),i("svg",Lo,To)}const Co=m(So,[["render",Bo]]),Io={},No={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},Ao=c("path",{d:"M12.1,22c-0.3,0-0.6,0-0.9,0c-5.5-0.5-9.5-5.4-9-10.9c0.4-4.8,4.2-8.6,9-9c0.4,0,0.8,0.2,1,0.5c0.2,0.3,0.2,0.8-0.1,1.1c-2,2.7-1.4,6.4,1.3,8.4c2.1,1.6,5,1.6,7.1,0c0.3-0.2,0.7-0.3,1.1-0.1c0.3,0.2,0.5,0.6,0.5,1c-0.2,2.7-1.5,5.1-3.6,6.8C16.6,21.2,14.4,22,12.1,22zM9.3,4.4c-2.9,1-5,3.6-5.2,6.8c-0.4,4.4,2.8,8.3,7.2,8.7c2.1,0.2,4.2-0.4,5.8-1.8c1.1-0.9,1.9-2.1,2.4-3.4c-2.5,0.9-5.3,0.5-7.5-1.1C9.2,11.4,8.1,7.7,9.3,4.4z"},null,-1),xo=[Ao];function Ho(n,e){return a(),i("svg",No,xo)}const Eo=m(Io,[["render",Ho]]),Do=g({__name:"VPSwitchAppearance",setup(n){const{site:e,isDark:t}=V(),s=P(!1),o=ie?r():()=>{};H(()=>{s.value=document.documentElement.classList.contains("dark")});function r(){const d=window.matchMedia("(prefers-color-scheme: dark)"),v=document.documentElement.classList;let _=localStorage.getItem(xe),y=e.value.appearance==="dark"&&_==null||(_==="auto"||_==null?d.matches:_==="dark");d.onchange=C=>{_==="auto"&&M(y=C.matches)};function w(){M(y=!y),_=y?d.matches?"auto":"dark":d.matches?"light":"auto",localStorage.setItem(xe,_)}function M(C){const k=document.createElement("style");k.type="text/css",k.appendChild(document.createTextNode(`:not(.VPSwitchAppearance):not(.VPSwitchAppearance *) { - -webkit-transition: none !important; - -moz-transition: none !important; - -o-transition: none !important; - -ms-transition: none !important; - transition: none !important; -}`)),document.head.appendChild(k),s.value=C,v[C?"add":"remove"]("dark"),window.getComputedStyle(k).opacity,document.head.removeChild(k)}return w}return R(s,d=>{t.value=d}),(d,v)=>(a(),b(wo,{title:"toggle dark mode",class:"VPSwitchAppearance","aria-checked":s.value,onClick:l(o)},{default:p(()=>[h(Co,{class:"sun"}),h(Eo,{class:"moon"})]),_:1},8,["aria-checked","onClick"]))}});const Me=m(Do,[["__scopeId","data-v-82b282f1"]]),zo={key:0,class:"VPNavBarAppearance"},Fo=g({__name:"VPNavBarAppearance",setup(n){const{site:e}=V();return(t,s)=>l(e).appearance?(a(),i("div",zo,[h(Me)])):f("",!0)}});const Oo=m(Fo,[["__scopeId","data-v-f6a63727"]]),Te=P();let je=!1,de=0;function Go(n){const e=P(!1);if(ie){!je&&Ro(),de++;const t=R(Te,s=>{var o,r,d;s===n.el.value||(o=n.el.value)!=null&&o.contains(s)?(e.value=!0,(r=n.onFocus)==null||r.call(n)):(e.value=!1,(d=n.onBlur)==null||d.call(n))});me(()=>{t(),de--,de||Uo()})}return ct(e)}function Ro(){document.addEventListener("focusin",qe),je=!0,Te.value=document.activeElement}function Uo(){document.removeEventListener("focusin",qe)}function qe(){Te.value=document.activeElement}const jo={},qo={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},Wo=c("path",{d:"M12,16c-0.3,0-0.5-0.1-0.7-0.3l-6-6c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l5.3,5.3l5.3-5.3c0.4-0.4,1-0.4,1.4,0s0.4,1,0,1.4l-6,6C12.5,15.9,12.3,16,12,16z"},null,-1),Ko=[Wo];function Yo(n,e){return a(),i("svg",qo,Ko)}const We=m(jo,[["render",Yo]]),Xo={},Qo={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},Jo=c("circle",{cx:"12",cy:"12",r:"2"},null,-1),Zo=c("circle",{cx:"19",cy:"12",r:"2"},null,-1),ea=c("circle",{cx:"5",cy:"12",r:"2"},null,-1),ta=[Jo,Zo,ea];function na(n,e){return a(),i("svg",Qo,ta)}const sa=m(Xo,[["render",na]]),oa={class:"VPMenuLink"},aa=g({__name:"VPMenuLink",props:{item:{}},setup(n){const{page:e}=V();return(t,s)=>(a(),i("div",oa,[h(F,{class:B({active:l(q)(l(e).relativePath,t.item.activeMatch||t.item.link,!!t.item.activeMatch)}),href:t.item.link,target:t.item.target,rel:t.item.rel},{default:p(()=>[x(S(t.item.text),1)]),_:1},8,["class","href","target","rel"])]))}});const ue=m(aa,[["__scopeId","data-v-2f2cfafc"]]),ra={class:"VPMenuGroup"},la={key:0,class:"title"},ia=g({__name:"VPMenuGroup",props:{text:{},items:{}},setup(n){return(e,t)=>(a(),i("div",ra,[e.text?(a(),i("p",la,S(e.text),1)):f("",!0),(a(!0),i(T,null,A(e.items,s=>(a(),i(T,null,["link"in s?(a(),b(ue,{key:0,item:s},null,8,["item"])):f("",!0)],64))),256))]))}});const ca=m(ia,[["__scopeId","data-v-69e747b5"]]),ua={class:"VPMenu"},da={key:0,class:"items"},_a=g({__name:"VPMenu",props:{items:{}},setup(n){return(e,t)=>(a(),i("div",ua,[e.items?(a(),i("div",da,[(a(!0),i(T,null,A(e.items,s=>(a(),i(T,{key:s.text},["link"in s?(a(),b(ue,{key:0,item:s},null,8,["item"])):(a(),b(ca,{key:1,text:s.text,items:s.items},null,8,["text","items"]))],64))),128))])):f("",!0),u(e.$slots,"default",{},void 0,!0)]))}});const va=m(_a,[["__scopeId","data-v-e7ea1737"]]),pa=["aria-expanded","aria-label"],ha={key:0,class:"text"},fa=["innerHTML"],ma={class:"menu"},ga=g({__name:"VPFlyout",props:{icon:{},button:{},label:{},items:{}},setup(n){const e=P(!1),t=P();Go({el:t,onBlur:s});function s(){e.value=!1}return(o,r)=>(a(),i("div",{class:"VPFlyout",ref_key:"el",ref:t,onMouseenter:r[1]||(r[1]=d=>e.value=!0),onMouseleave:r[2]||(r[2]=d=>e.value=!1)},[c("button",{type:"button",class:"button","aria-haspopup":"true","aria-expanded":e.value,"aria-label":o.label,onClick:r[0]||(r[0]=d=>e.value=!e.value)},[o.button||o.icon?(a(),i("span",ha,[o.icon?(a(),b(G(o.icon),{key:0,class:"option-icon"})):f("",!0),o.button?(a(),i("span",{key:1,innerHTML:o.button},null,8,fa)):f("",!0),h(We,{class:"text-icon"})])):(a(),b(sa,{key:1,class:"icon"}))],8,pa),c("div",ma,[h(va,{items:o.items},{default:p(()=>[u(o.$slots,"default",{},void 0,!0)]),_:3},8,["items"])])],544))}});const Be=m(ga,[["__scopeId","data-v-a7b5672a"]]),ya={discord:'Discord',facebook:'Facebook',github:'GitHub',instagram:'Instagram',linkedin:'LinkedIn',mastodon:'Mastodon',slack:'Slack',twitter:'Twitter',youtube:'YouTube'},ba=["href","aria-label","innerHTML"],ka=g({__name:"VPSocialLink",props:{icon:{},link:{},ariaLabel:{}},setup(n){const e=n,t=$(()=>typeof e.icon=="object"?e.icon.svg:ya[e.icon]);return(s,o)=>(a(),i("a",{class:"VPSocialLink no-icon",href:s.link,"aria-label":s.ariaLabel??(typeof s.icon=="string"?s.icon:""),target:"_blank",rel:"noopener",innerHTML:t.value},null,8,ba))}});const $a=m(ka,[["__scopeId","data-v-f80f8133"]]),Pa={class:"VPSocialLinks"},Va=g({__name:"VPSocialLinks",props:{links:{}},setup(n){return(e,t)=>(a(),i("div",Pa,[(a(!0),i(T,null,A(e.links,({link:s,icon:o,ariaLabel:r})=>(a(),b($a,{key:s,icon:o,link:s,ariaLabel:r},null,8,["icon","link","ariaLabel"]))),128))]))}});const Ce=m(Va,[["__scopeId","data-v-7bc22406"]]),wa={key:0,class:"group translations"},Sa={class:"trans-title"},La={key:1,class:"group"},Ma={class:"item appearance"},Ta={class:"label"},Ba={class:"appearance-action"},Ca={key:2,class:"group"},Ia={class:"item social-links"},Na=g({__name:"VPNavBarExtra",setup(n){const{site:e,theme:t}=V(),{localeLinks:s,currentLang:o}=Z({correspondingLink:!0}),r=$(()=>s.value.length&&o.value.label||e.value.appearance||t.value.socialLinks);return(d,v)=>r.value?(a(),b(Be,{key:0,class:"VPNavBarExtra",label:"extra navigation"},{default:p(()=>[l(s).length&&l(o).label?(a(),i("div",wa,[c("p",Sa,S(l(o).label),1),(a(!0),i(T,null,A(l(s),_=>(a(),b(ue,{key:_.link,item:_},null,8,["item"]))),128))])):f("",!0),l(e).appearance?(a(),i("div",La,[c("div",Ma,[c("p",Ta,S(l(t).darkModeSwitchLabel||"Appearance"),1),c("div",Ba,[h(Me)])])])):f("",!0),l(t).socialLinks?(a(),i("div",Ca,[c("div",Ia,[h(Ce,{class:"social-links-list",links:l(t).socialLinks},null,8,["links"])])])):f("",!0)]),_:1})):f("",!0)}});const Aa=m(Na,[["__scopeId","data-v-40855f84"]]),xa=n=>(E("data-v-e5dd9c1c"),n=n(),D(),n),Ha=["aria-expanded"],Ea=xa(()=>c("span",{class:"container"},[c("span",{class:"top"}),c("span",{class:"middle"}),c("span",{class:"bottom"})],-1)),Da=[Ea],za=g({__name:"VPNavBarHamburger",props:{active:{type:Boolean}},emits:["click"],setup(n){return(e,t)=>(a(),i("button",{type:"button",class:B(["VPNavBarHamburger",{active:e.active}]),"aria-label":"mobile navigation","aria-expanded":e.active,"aria-controls":"VPNavScreen",onClick:t[0]||(t[0]=s=>e.$emit("click"))},Da,10,Ha))}});const Fa=m(za,[["__scopeId","data-v-e5dd9c1c"]]),Oa=["innerHTML"],Ga=g({__name:"VPNavBarMenuLink",props:{item:{}},setup(n){const{page:e}=V();return(t,s)=>(a(),b(F,{class:B({VPNavBarMenuLink:!0,active:l(q)(l(e).relativePath,t.item.activeMatch||t.item.link,!!t.item.activeMatch)}),href:t.item.link,target:t.item.target,rel:t.item.rel,tabindex:"0"},{default:p(()=>[c("span",{innerHTML:t.item.text},null,8,Oa)]),_:1},8,["class","href","target","rel"]))}});const Ra=m(Ga,[["__scopeId","data-v-0b525393"]]),Ua=g({__name:"VPNavBarMenuGroup",props:{item:{}},setup(n){const{page:e}=V();return(t,s)=>(a(),b(Be,{class:B({VPNavBarMenuGroup:!0,active:l(q)(l(e).relativePath,t.item.activeMatch,!!t.item.activeMatch)}),button:t.item.text,items:t.item.items},null,8,["class","button","items"]))}}),ja=n=>(E("data-v-7f418b0f"),n=n(),D(),n),qa={key:0,"aria-labelledby":"main-nav-aria-label",class:"VPNavBarMenu"},Wa=ja(()=>c("span",{id:"main-nav-aria-label",class:"visually-hidden"},"Main Navigation",-1)),Ka=g({__name:"VPNavBarMenu",setup(n){const{theme:e}=V();return(t,s)=>l(e).nav?(a(),i("nav",qa,[Wa,(a(!0),i(T,null,A(l(e).nav,o=>(a(),i(T,{key:o.text},["link"in o?(a(),b(Ra,{key:0,item:o},null,8,["item"])):(a(),b(Ua,{key:1,item:o},null,8,["item"]))],64))),128))])):f("",!0)}});const Ya=m(Ka,[["__scopeId","data-v-7f418b0f"]]);const Xa={type:"button",class:"DocSearch DocSearch-Button","aria-label":"Search"},Qa={class:"DocSearch-Button-Container"},Ja=c("svg",{class:"DocSearch-Search-Icon",width:"20",height:"20",viewBox:"0 0 20 20","aria-label":"search icon"},[c("path",{d:"M14.386 14.386l4.0877 4.0877-4.0877-4.0877c-2.9418 2.9419-7.7115 2.9419-10.6533 0-2.9419-2.9418-2.9419-7.7115 0-10.6533 2.9418-2.9419 7.7115-2.9419 10.6533 0 2.9419 2.9418 2.9419 7.7115 0 10.6533z",stroke:"currentColor",fill:"none","fill-rule":"evenodd","stroke-linecap":"round","stroke-linejoin":"round"})],-1),Za={class:"DocSearch-Button-Placeholder"},er=c("span",{class:"DocSearch-Button-Keys"},[c("kbd",{class:"DocSearch-Button-Key"}),c("kbd",{class:"DocSearch-Button-Key"},"K")],-1),Ee=g({__name:"VPNavBarSearchButton",props:{placeholder:{}},setup(n){return(e,t)=>(a(),i("button",Xa,[c("span",Qa,[Ja,c("span",Za,S(e.placeholder),1)]),er]))}});const tr={id:"local-search"},nr={key:1,id:"docsearch"},sr=g({__name:"VPNavBarSearch",setup(n){const e=()=>null,t=()=>null,{theme:s,localeIndex:o}=V(),r=P(!1),d=P(!1),v=$(()=>{var I,N,L,W,ee,K,Ae;const k=((I=s.value.search)==null?void 0:I.options)??s.value.algolia;return((ee=(W=(L=(N=k==null?void 0:k.locales)==null?void 0:N[o.value])==null?void 0:L.translations)==null?void 0:W.button)==null?void 0:ee.buttonText)||((Ae=(K=k==null?void 0:k.translations)==null?void 0:K.button)==null?void 0:Ae.buttonText)||"Search"});H(()=>{});function _(){r.value||(r.value=!0,setTimeout(y,16))}function y(){const k=new Event("keydown");k.key="k",k.metaKey=!0,window.dispatchEvent(k),setTimeout(()=>{document.querySelector(".DocSearch-Modal")||y()},16)}const w=P(!1),M=P("'Meta'");H(()=>{M.value=/(Mac|iPhone|iPod|iPad)/i.test(navigator.platform)?"'⌘'":"'Ctrl'"});const C="";return(k,I)=>{var N;return a(),i("div",{class:"VPNavBarSearch",style:Fe({"--vp-meta-key":M.value})},[l(C)==="local"?(a(),i(T,{key:0},[w.value?(a(),b(l(e),{key:0,placeholder:v.value,onClose:I[0]||(I[0]=L=>w.value=!1)},null,8,["placeholder"])):f("",!0),c("div",tr,[h(Ee,{placeholder:v.value,onClick:I[1]||(I[1]=L=>w.value=!0)},null,8,["placeholder"])])],64)):l(C)==="algolia"?(a(),i(T,{key:1},[r.value?(a(),b(l(t),{key:0,algolia:((N=l(s).search)==null?void 0:N.options)??l(s).algolia,onVnodeBeforeMount:I[2]||(I[2]=L=>d.value=!0)},null,8,["algolia"])):f("",!0),d.value?f("",!0):(a(),i("div",nr,[h(Ee,{placeholder:v.value,onClick:_},null,8,["placeholder"])]))],64)):f("",!0)],4)}}});const or=g({__name:"VPNavBarSocialLinks",setup(n){const{theme:e}=V();return(t,s)=>l(e).socialLinks?(a(),b(Ce,{key:0,class:"VPNavBarSocialLinks",links:l(e).socialLinks},null,8,["links"])):f("",!0)}});const ar=m(or,[["__scopeId","data-v-0394ad82"]]),rr=["href"],lr=g({__name:"VPNavBarTitle",setup(n){const{site:e,theme:t}=V(),{hasSidebar:s}=z(),{currentLang:o}=Z();return(r,d)=>(a(),i("div",{class:B(["VPNavBarTitle",{"has-sidebar":l(s)}])},[c("a",{class:"title",href:l(t).logoLink??l(J)(l(o).link)},[u(r.$slots,"nav-bar-title-before",{},void 0,!0),l(t).logo?(a(),b(Le,{key:0,class:"logo",image:l(t).logo},null,8,["image"])):f("",!0),l(t).siteTitle?(a(),i(T,{key:1},[x(S(l(t).siteTitle),1)],64)):l(t).siteTitle===void 0?(a(),i(T,{key:2},[x(S(l(e).title),1)],64)):f("",!0),u(r.$slots,"nav-bar-title-after",{},void 0,!0)],8,rr)],2))}});const ir=m(lr,[["__scopeId","data-v-86d1bed8"]]),cr={},ur={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},dr=c("path",{d:"M0 0h24v24H0z",fill:"none"},null,-1),_r=c("path",{d:" M12.87 15.07l-2.54-2.51.03-.03c1.74-1.94 2.98-4.17 3.71-6.53H17V4h-7V2H8v2H1v1.99h11.17C11.5 7.92 10.44 9.75 9 11.35 8.07 10.32 7.3 9.19 6.69 8h-2c.73 1.63 1.73 3.17 2.98 4.56l-5.09 5.02L4 19l5-5 3.11 3.11.76-2.04zM18.5 10h-2L12 22h2l1.12-3h4.75L21 22h2l-4.5-12zm-2.62 7l1.62-4.33L19.12 17h-3.24z ",class:"css-c4d79v"},null,-1),vr=[dr,_r];function pr(n,e){return a(),i("svg",ur,vr)}const Ke=m(cr,[["render",pr]]),hr={class:"items"},fr={class:"title"},mr=g({__name:"VPNavBarTranslations",setup(n){const{theme:e}=V(),{localeLinks:t,currentLang:s}=Z({correspondingLink:!0});return(o,r)=>l(t).length&&l(s).label?(a(),b(Be,{key:0,class:"VPNavBarTranslations",icon:Ke,label:l(e).langMenuLabel||"Change language"},{default:p(()=>[c("div",hr,[c("p",fr,S(l(s).label),1),(a(!0),i(T,null,A(l(t),d=>(a(),b(ue,{key:d.link,item:d},null,8,["item"]))),128))])]),_:1},8,["label"])):f("",!0)}});const gr=m(mr,[["__scopeId","data-v-74abcbb9"]]),yr=n=>(E("data-v-0937f67c"),n=n(),D(),n),br={class:"container"},kr={class:"title"},$r={class:"content"},Pr=yr(()=>c("div",{class:"curtain"},null,-1)),Vr={class:"content-body"},wr=g({__name:"VPNavBar",props:{isScreenOpen:{type:Boolean}},emits:["toggle-screen"],setup(n){const{y:e}=Ge(),{hasSidebar:t}=z(),{frontmatter:s}=V(),o=P({});return ge(()=>{var r;o.value={"has-sidebar":t.value,top:((r=s.value)==null?void 0:r.layout)==="home"&&e.value===0}}),(r,d)=>(a(),i("div",{class:B(["VPNavBar",o.value])},[c("div",br,[c("div",kr,[h(ir,null,{"nav-bar-title-before":p(()=>[u(r.$slots,"nav-bar-title-before",{},void 0,!0)]),"nav-bar-title-after":p(()=>[u(r.$slots,"nav-bar-title-after",{},void 0,!0)]),_:3})]),c("div",$r,[Pr,c("div",Vr,[u(r.$slots,"nav-bar-content-before",{},void 0,!0),h(sr,{class:"search"}),h(Ya,{class:"menu"}),h(gr,{class:"translations"}),h(Oo,{class:"appearance"}),h(ar,{class:"social-links"}),h(Aa,{class:"extra"}),u(r.$slots,"nav-bar-content-after",{},void 0,!0),h(Fa,{class:"hamburger",active:r.isScreenOpen,onClick:d[0]||(d[0]=v=>r.$emit("toggle-screen"))},null,8,["active"])])])])],2))}});const Sr=m(wr,[["__scopeId","data-v-0937f67c"]]);function Lr(n){if(Array.isArray(n)){for(var e=0,t=Array(n.length);e1),j=[],ae=!1,Ne=-1,Y=void 0,O=void 0,X=void 0,Ye=function(e){return j.some(function(t){return!!(t.options.allowTouchMove&&t.options.allowTouchMove(e))})},re=function(e){var t=e||window.event;return Ye(t.target)||t.touches.length>1?!0:(t.preventDefault&&t.preventDefault(),!1)},Mr=function(e){if(X===void 0){var t=!!e&&e.reserveScrollBarGap===!0,s=window.innerWidth-document.documentElement.clientWidth;if(t&&s>0){var o=parseInt(window.getComputedStyle(document.body).getPropertyValue("padding-right"),10);X=document.body.style.paddingRight,document.body.style.paddingRight=o+s+"px"}}Y===void 0&&(Y=document.body.style.overflow,document.body.style.overflow="hidden")},Tr=function(){X!==void 0&&(document.body.style.paddingRight=X,X=void 0),Y!==void 0&&(document.body.style.overflow=Y,Y=void 0)},Br=function(){return window.requestAnimationFrame(function(){if(O===void 0){O={position:document.body.style.position,top:document.body.style.top,left:document.body.style.left};var e=window,t=e.scrollY,s=e.scrollX,o=e.innerHeight;document.body.style.position="fixed",document.body.style.top=-t,document.body.style.left=-s,setTimeout(function(){return window.requestAnimationFrame(function(){var r=o-window.innerHeight;r&&t>=o&&(document.body.style.top=-(t+r))})},300)}})},Cr=function(){if(O!==void 0){var e=-parseInt(document.body.style.top,10),t=-parseInt(document.body.style.left,10);document.body.style.position=O.position,document.body.style.top=O.top,document.body.style.left=O.left,window.scrollTo(t,e),O=void 0}},Ir=function(e){return e?e.scrollHeight-e.scrollTop<=e.clientHeight:!1},Nr=function(e,t){var s=e.targetTouches[0].clientY-Ne;return Ye(e.target)?!1:t&&t.scrollTop===0&&s>0||Ir(t)&&s<0?re(e):(e.stopPropagation(),!0)},Xe=function(e,t){if(!e){console.error("disableBodyScroll unsuccessful - targetElement must be provided when calling disableBodyScroll on IOS devices.");return}if(!j.some(function(o){return o.targetElement===e})){var s={targetElement:e,options:t||{}};j=[].concat(Lr(j),[s]),oe?Br():Mr(t),oe&&(e.ontouchstart=function(o){o.targetTouches.length===1&&(Ne=o.targetTouches[0].clientY)},e.ontouchmove=function(o){o.targetTouches.length===1&&Nr(o,e)},ae||(document.addEventListener("touchmove",re,Ie?{passive:!1}:void 0),ae=!0))}},Qe=function(){oe&&(j.forEach(function(e){e.targetElement.ontouchstart=null,e.targetElement.ontouchmove=null}),ae&&(document.removeEventListener("touchmove",re,Ie?{passive:!1}:void 0),ae=!1),Ne=-1),oe?Cr():Tr(),j=[]};const Ar=g({__name:"VPNavScreenMenuLink",props:{item:{}},setup(n){const e=be("close-screen");return(t,s)=>(a(),b(F,{class:"VPNavScreenMenuLink",href:t.item.link,target:t.item.target,rel:t.item.rel,onClick:l(e)},{default:p(()=>[x(S(t.item.text),1)]),_:1},8,["href","target","rel","onClick"]))}});const xr=m(Ar,[["__scopeId","data-v-30be0acb"]]),Hr={},Er={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},Dr=c("path",{d:"M18.9,10.9h-6v-6c0-0.6-0.4-1-1-1s-1,0.4-1,1v6h-6c-0.6,0-1,0.4-1,1s0.4,1,1,1h6v6c0,0.6,0.4,1,1,1s1-0.4,1-1v-6h6c0.6,0,1-0.4,1-1S19.5,10.9,18.9,10.9z"},null,-1),zr=[Dr];function Fr(n,e){return a(),i("svg",Er,zr)}const Or=m(Hr,[["render",Fr]]),Gr=g({__name:"VPNavScreenMenuGroupLink",props:{item:{}},setup(n){const e=be("close-screen");return(t,s)=>(a(),b(F,{class:"VPNavScreenMenuGroupLink",href:t.item.link,target:t.item.target,rel:t.item.rel,onClick:l(e)},{default:p(()=>[x(S(t.item.text),1)]),_:1},8,["href","target","rel","onClick"]))}});const Je=m(Gr,[["__scopeId","data-v-6656c42a"]]),Rr={class:"VPNavScreenMenuGroupSection"},Ur={key:0,class:"title"},jr=g({__name:"VPNavScreenMenuGroupSection",props:{text:{},items:{}},setup(n){return(e,t)=>(a(),i("div",Rr,[e.text?(a(),i("p",Ur,S(e.text),1)):f("",!0),(a(!0),i(T,null,A(e.items,s=>(a(),b(Je,{key:s.text,item:s},null,8,["item"]))),128))]))}});const qr=m(jr,[["__scopeId","data-v-8133b170"]]),Wr=["aria-controls","aria-expanded"],Kr={class:"button-text"},Yr=["id"],Xr={key:1,class:"group"},Qr=g({__name:"VPNavScreenMenuGroup",props:{text:{},items:{}},setup(n){const e=n,t=P(!1),s=$(()=>`NavScreenGroup-${e.text.replace(" ","-").toLowerCase()}`);function o(){t.value=!t.value}return(r,d)=>(a(),i("div",{class:B(["VPNavScreenMenuGroup",{open:t.value}])},[c("button",{class:"button","aria-controls":s.value,"aria-expanded":t.value,onClick:o},[c("span",Kr,S(r.text),1),h(Or,{class:"button-icon"})],8,Wr),c("div",{id:s.value,class:"items"},[(a(!0),i(T,null,A(r.items,v=>(a(),i(T,{key:v.text},["link"in v?(a(),i("div",{key:v.text,class:"item"},[h(Je,{item:v},null,8,["item"])])):(a(),i("div",Xr,[h(qr,{text:v.text,items:v.items},null,8,["text","items"])]))],64))),128))],8,Yr)],2))}});const Jr=m(Qr,[["__scopeId","data-v-338a1689"]]),Zr={key:0,class:"VPNavScreenMenu"},el=g({__name:"VPNavScreenMenu",setup(n){const{theme:e}=V();return(t,s)=>l(e).nav?(a(),i("nav",Zr,[(a(!0),i(T,null,A(l(e).nav,o=>(a(),i(T,{key:o.text},["link"in o?(a(),b(xr,{key:0,item:o},null,8,["item"])):(a(),b(Jr,{key:1,text:o.text||"",items:o.items},null,8,["text","items"]))],64))),128))])):f("",!0)}}),tl={key:0,class:"VPNavScreenAppearance"},nl={class:"text"},sl=g({__name:"VPNavScreenAppearance",setup(n){const{site:e,theme:t}=V();return(s,o)=>l(e).appearance?(a(),i("div",tl,[c("p",nl,S(l(t).darkModeSwitchLabel||"Appearance"),1),h(Me)])):f("",!0)}});const ol=m(sl,[["__scopeId","data-v-add8f686"]]),al={class:"list"},rl=g({__name:"VPNavScreenTranslations",setup(n){const{localeLinks:e,currentLang:t}=Z({correspondingLink:!0}),s=P(!1);function o(){s.value=!s.value}return(r,d)=>l(e).length&&l(t).label?(a(),i("div",{key:0,class:B(["VPNavScreenTranslations",{open:s.value}])},[c("button",{class:"title",onClick:o},[h(Ke,{class:"icon lang"}),x(" "+S(l(t).label)+" ",1),h(We,{class:"icon chevron"})]),c("ul",al,[(a(!0),i(T,null,A(l(e),v=>(a(),i("li",{key:v.link,class:"item"},[h(F,{class:"link",href:v.link},{default:p(()=>[x(S(v.text),1)]),_:2},1032,["href"])]))),128))])],2)):f("",!0)}});const ll=m(rl,[["__scopeId","data-v-d72aa483"]]),il=g({__name:"VPNavScreenSocialLinks",setup(n){const{theme:e}=V();return(t,s)=>l(e).socialLinks?(a(),b(Ce,{key:0,class:"VPNavScreenSocialLinks",links:l(e).socialLinks},null,8,["links"])):f("",!0)}}),cl={class:"container"},ul=g({__name:"VPNavScreen",props:{open:{type:Boolean}},setup(n){const e=P(null);function t(){Xe(e.value,{reserveScrollBarGap:!0})}function s(){Qe()}return(o,r)=>(a(),b(he,{name:"fade",onEnter:t,onAfterLeave:s},{default:p(()=>[o.open?(a(),i("div",{key:0,class:"VPNavScreen",ref_key:"screen",ref:e,id:"VPNavScreen"},[c("div",cl,[u(o.$slots,"nav-screen-content-before",{},void 0,!0),h(el,{class:"menu"}),h(ll,{class:"translations"}),h(ol,{class:"appearance"}),h(il,{class:"social-links"}),u(o.$slots,"nav-screen-content-after",{},void 0,!0)])],512)):f("",!0)]),_:3}))}});const dl=m(ul,[["__scopeId","data-v-69fcc70f"]]),_l={class:"VPNav"},vl=g({__name:"VPNav",setup(n){const{isScreenOpen:e,closeScreen:t,toggleScreen:s}=yo();return ne("close-screen",t),(o,r)=>(a(),i("header",_l,[h(Sr,{"is-screen-open":l(e),onToggleScreen:l(s)},{"nav-bar-title-before":p(()=>[u(o.$slots,"nav-bar-title-before",{},void 0,!0)]),"nav-bar-title-after":p(()=>[u(o.$slots,"nav-bar-title-after",{},void 0,!0)]),"nav-bar-content-before":p(()=>[u(o.$slots,"nav-bar-content-before",{},void 0,!0)]),"nav-bar-content-after":p(()=>[u(o.$slots,"nav-bar-content-after",{},void 0,!0)]),_:3},8,["is-screen-open","onToggleScreen"]),h(dl,{open:l(e)},{"nav-screen-content-before":p(()=>[u(o.$slots,"nav-screen-content-before",{},void 0,!0)]),"nav-screen-content-after":p(()=>[u(o.$slots,"nav-screen-content-after",{},void 0,!0)]),_:3},8,["open"])]))}});const pl=m(vl,[["__scopeId","data-v-7e5bc4a5"]]),hl=n=>(E("data-v-9b797284"),n=n(),D(),n),fl=["role","tabindex"],ml=hl(()=>c("div",{class:"indicator"},null,-1)),gl=["onKeydown"],yl={key:1,class:"items"},bl=g({__name:"VPSidebarItem",props:{item:{},depth:{}},setup(n){const e=n,{collapsed:t,collapsible:s,isLink:o,isActiveLink:r,hasActiveLink:d,hasChildren:v,toggle:_}=Ft($(()=>e.item)),y=$(()=>v.value?"section":"div"),w=$(()=>o.value?"a":"div"),M=$(()=>v.value?e.depth+2===7?"p":`h${e.depth+2}`:"p"),C=$(()=>o.value?void 0:"button"),k=$(()=>[[`level-${e.depth}`],{collapsible:s.value},{collapsed:t.value},{"is-link":o.value},{"is-active":r.value},{"has-active":d.value}]);function I(L){"key"in L&&L.key!=="Enter"||!e.item.link&&_()}function N(){e.item.link&&_()}return(L,W)=>{const ee=U("VPSidebarItem",!0);return a(),b(G(y.value),{class:B(["VPSidebarItem",k.value])},{default:p(()=>[L.item.text?(a(),i("div",te({key:0,class:"item",role:C.value},dt(L.item.items?{click:I,keydown:I}:{},!0),{tabindex:L.item.items&&0}),[ml,L.item.link?(a(),b(F,{key:0,tag:w.value,class:"link",href:L.item.link},{default:p(()=>[(a(),b(G(M.value),{class:"text",innerHTML:L.item.text},null,8,["innerHTML"]))]),_:1},8,["tag","href"])):(a(),b(G(M.value),{key:1,class:"text",innerHTML:L.item.text},null,8,["innerHTML"])),L.item.collapsed!=null?(a(),i("div",{key:2,class:"caret",role:"button","aria-label":"toggle section",onClick:N,onKeydown:ut(N,["enter"]),tabindex:"0"},[h(Se,{class:"caret-icon"})],40,gl)):f("",!0)],16,fl)):f("",!0),L.item.items&&L.item.items.length?(a(),i("div",yl,[L.depth<5?(a(!0),i(T,{key:0},A(L.item.items,K=>(a(),b(ee,{key:K.text,item:K,depth:L.depth+1},null,8,["item","depth"]))),128)):f("",!0)])):f("",!0)]),_:1},8,["class"])}}});const kl=m(bl,[["__scopeId","data-v-9b797284"]]),Ze=n=>(E("data-v-845b8fc6"),n=n(),D(),n),$l=Ze(()=>c("div",{class:"curtain"},null,-1)),Pl={class:"nav",id:"VPSidebarNav","aria-labelledby":"sidebar-aria-label",tabindex:"-1"},Vl=Ze(()=>c("span",{class:"visually-hidden",id:"sidebar-aria-label"}," Sidebar Navigation ",-1)),wl=g({__name:"VPSidebar",props:{open:{type:Boolean}},setup(n){const e=n,{sidebarGroups:t,hasSidebar:s}=z();let o=P(null);function r(){Xe(o.value,{reserveScrollBarGap:!0})}function d(){Qe()}return ge(async()=>{var v;e.open?(r(),(v=o.value)==null||v.focus()):d()}),(v,_)=>l(s)?(a(),i("aside",{key:0,class:B(["VPSidebar",{open:v.open}]),ref_key:"navEl",ref:o,onClick:_[0]||(_[0]=_t(()=>{},["stop"]))},[$l,c("nav",Pl,[Vl,u(v.$slots,"sidebar-nav-before",{},void 0,!0),(a(!0),i(T,null,A(l(t),y=>(a(),i("div",{key:y.text,class:"group"},[h(kl,{item:y,depth:0},null,8,["item"])]))),128)),u(v.$slots,"sidebar-nav-after",{},void 0,!0)])],2)):f("",!0)}});const Sl=m(wl,[["__scopeId","data-v-845b8fc6"]]),Ll=g({__name:"VPSkipLink",setup(n){const e=ce(),t=P();R(()=>e.path,()=>t.value.focus());function s({target:o}){const r=document.getElementById(decodeURIComponent(o.hash).slice(1));if(r){const d=()=>{r.removeAttribute("tabindex"),r.removeEventListener("blur",d)};r.setAttribute("tabindex","-1"),r.addEventListener("blur",d),r.focus(),window.scrollTo(0,0)}}return(o,r)=>(a(),i(T,null,[c("span",{ref_key:"backToTop",ref:t,tabindex:"-1"},null,512),c("a",{href:"#VPContent",class:"VPSkipLink visually-hidden",onClick:s}," Skip to content ")],64))}});const Ml=m(Ll,[["__scopeId","data-v-ae3e3f51"]]),Tl=g({__name:"Layout",setup(n){const{isOpen:e,open:t,close:s}=z(),o=ce();R(()=>o.path,s),zt(e,s),ne("close-sidebar",s),ne("is-sidebar-open",e);const{frontmatter:r}=V(),d=vt(),v=$(()=>!!d["home-hero-image"]);return ne("hero-image-slot-exists",v),(_,y)=>{const w=U("Content");return l(r).layout!==!1?(a(),i("div",{key:0,class:B(["Layout",l(r).pageClass])},[u(_.$slots,"layout-top",{},void 0,!0),h(Ml),h(gt,{class:"backdrop",show:l(e),onClick:l(s)},null,8,["show","onClick"]),l(r).navbar!==!1?(a(),b(pl,{key:0},{"nav-bar-title-before":p(()=>[u(_.$slots,"nav-bar-title-before",{},void 0,!0)]),"nav-bar-title-after":p(()=>[u(_.$slots,"nav-bar-title-after",{},void 0,!0)]),"nav-bar-content-before":p(()=>[u(_.$slots,"nav-bar-content-before",{},void 0,!0)]),"nav-bar-content-after":p(()=>[u(_.$slots,"nav-bar-content-after",{},void 0,!0)]),"nav-screen-content-before":p(()=>[u(_.$slots,"nav-screen-content-before",{},void 0,!0)]),"nav-screen-content-after":p(()=>[u(_.$slots,"nav-screen-content-after",{},void 0,!0)]),_:3})):f("",!0),h(go,{open:l(e),onOpenMenu:l(t)},null,8,["open","onOpenMenu"]),h(Sl,{open:l(e)},{"sidebar-nav-before":p(()=>[u(_.$slots,"sidebar-nav-before",{},void 0,!0)]),"sidebar-nav-after":p(()=>[u(_.$slots,"sidebar-nav-after",{},void 0,!0)]),_:3},8,["open"]),h(Ys,null,{"page-top":p(()=>[u(_.$slots,"page-top",{},void 0,!0)]),"page-bottom":p(()=>[u(_.$slots,"page-bottom",{},void 0,!0)]),"not-found":p(()=>[u(_.$slots,"not-found",{},void 0,!0)]),"home-hero-before":p(()=>[u(_.$slots,"home-hero-before",{},void 0,!0)]),"home-hero-info":p(()=>[u(_.$slots,"home-hero-info",{},void 0,!0)]),"home-hero-image":p(()=>[u(_.$slots,"home-hero-image",{},void 0,!0)]),"home-hero-after":p(()=>[u(_.$slots,"home-hero-after",{},void 0,!0)]),"home-features-before":p(()=>[u(_.$slots,"home-features-before",{},void 0,!0)]),"home-features-after":p(()=>[u(_.$slots,"home-features-after",{},void 0,!0)]),"doc-footer-before":p(()=>[u(_.$slots,"doc-footer-before",{},void 0,!0)]),"doc-before":p(()=>[u(_.$slots,"doc-before",{},void 0,!0)]),"doc-after":p(()=>[u(_.$slots,"doc-after",{},void 0,!0)]),"doc-top":p(()=>[u(_.$slots,"doc-top",{},void 0,!0)]),"doc-bottom":p(()=>[u(_.$slots,"doc-bottom",{},void 0,!0)]),"aside-top":p(()=>[u(_.$slots,"aside-top",{},void 0,!0)]),"aside-bottom":p(()=>[u(_.$slots,"aside-bottom",{},void 0,!0)]),"aside-outline-before":p(()=>[u(_.$slots,"aside-outline-before",{},void 0,!0)]),"aside-outline-after":p(()=>[u(_.$slots,"aside-outline-after",{},void 0,!0)]),"aside-ads-before":p(()=>[u(_.$slots,"aside-ads-before",{},void 0,!0)]),"aside-ads-after":p(()=>[u(_.$slots,"aside-ads-after",{},void 0,!0)]),_:3}),h(eo),u(_.$slots,"layout-bottom",{},void 0,!0)],2)):(a(),b(w,{key:1}))}}});const Bl=m(Tl,[["__scopeId","data-v-255ec12d"]]);const Il={Layout:Bl,enhanceApp:({app:n})=>{n.component("Badge",ht)}};export{Il as t}; diff --git a/assets/chunks/theme.fe7zvf8B.js b/assets/chunks/theme.fe7zvf8B.js new file mode 100644 index 000000000..da3245dbd --- /dev/null +++ b/assets/chunks/theme.fe7zvf8B.js @@ -0,0 +1 @@ +import{d as g,o as a,c as l,r as u,n as I,a as H,t as L,_ as m,b as k,w as h,T as le,e as f,u as ze,i as Ee,l as De,f as ce,g as b,h as S,j as G,k as c,m as i,p as E,q as D,s as x,v as K,x as ae,y as U,z as ee,A as ue,B as Pe,C as Fe,D as j,F as M,E as A,G as de,H as Y,I as _,J as O,K as we,L as te,M as Z,N as se,O as Oe,P as xe,Q as Ve,R as Ge,S as Ue,U as je,V as qe,W as Le,X as Se,Y as Re,Z as Ke,$ as We,a0 as Ye}from"./framework.bMtwhlie.js";const Je=g({__name:"VPBadge",props:{text:{},type:{default:"tip"}},setup(s){return(e,t)=>(a(),l("span",{class:I(["VPBadge",e.type])},[u(e.$slots,"default",{},()=>[H(L(e.text),1)],!0)],2))}}),Ze=m(Je,[["__scopeId","data-v-9613cc9f"]]),Qe={key:0,class:"VPBackdrop"},Xe=g({__name:"VPBackdrop",props:{show:{type:Boolean}},setup(s){return(e,t)=>(a(),k(le,{name:"fade"},{default:h(()=>[e.show?(a(),l("div",Qe)):f("",!0)]),_:1}))}}),et=m(Xe,[["__scopeId","data-v-c79a1216"]]),P=ze;function tt(s,e){let t,n=!1;return()=>{t&&clearTimeout(t),n?t=setTimeout(s,e):(s(),(n=!0)&&setTimeout(()=>n=!1,e))}}function re(s){return/^\//.test(s)?s:`/${s}`}function ve(s){const{pathname:e,search:t,hash:n,protocol:o}=new URL(s,"http://a.com");if(Ee(s)||s.startsWith("#")||!o.startsWith("http")||/\.(?!html|md)\w+($|\?)/i.test(s)&&De(s))return s;const{site:r}=P(),d=e.endsWith("/")||e.endsWith(".html")?s:s.replace(/(?:(^\.+)\/)?.*$/,`$1${e.replace(/(\.md)?$/,r.value.cleanUrls?"":".html")}${t}${n}`);return ce(d)}function J({removeCurrent:s=!0,correspondingLink:e=!1}={}){const{site:t,localeIndex:n,page:o,theme:r}=P(),d=b(()=>{var v,$;return{label:(v=t.value.locales[n.value])==null?void 0:v.label,link:(($=t.value.locales[n.value])==null?void 0:$.link)||(n.value==="root"?"/":`/${n.value}/`)}});return{localeLinks:b(()=>Object.entries(t.value.locales).flatMap(([v,$])=>s&&d.value.label===$.label?[]:{text:$.label,link:st($.link||(v==="root"?"/":`/${v}/`),r.value.i18nRouting!==!1&&e,o.value.relativePath.slice(d.value.link.length-1),!t.value.cleanUrls)})),currentLang:d}}function st(s,e,t,n){return e?s.replace(/\/$/,"")+re(t.replace(/(^|\/)index\.md$/,"$1").replace(/\.md$/,n?".html":"")):s}const nt=s=>(E("data-v-f87ff6e4"),s=s(),D(),s),ot={class:"NotFound"},at={class:"code"},rt={class:"title"},it=nt(()=>c("div",{class:"divider"},null,-1)),lt={class:"quote"},ct={class:"action"},ut=["href","aria-label"],dt=g({__name:"NotFound",setup(s){const{site:e,theme:t}=P(),{localeLinks:n}=J({removeCurrent:!1}),o=S("/");return G(()=>{var d;const r=window.location.pathname.replace(e.value.base,"").replace(/(^.*?\/).*$/,"/$1");n.value.length&&(o.value=((d=n.value.find(({link:p})=>p.startsWith(r)))==null?void 0:d.link)||n.value[0].link)}),(r,d)=>{var p,v,$,y,V;return a(),l("div",ot,[c("p",at,L(((p=i(t).notFound)==null?void 0:p.code)??"404"),1),c("h1",rt,L(((v=i(t).notFound)==null?void 0:v.title)??"PAGE NOT FOUND"),1),it,c("blockquote",lt,L((($=i(t).notFound)==null?void 0:$.quote)??"But if you don't change your direction, and if you keep looking, you may end up where you are heading."),1),c("div",ct,[c("a",{class:"link",href:i(ce)(o.value),"aria-label":((y=i(t).notFound)==null?void 0:y.linkLabel)??"go to home"},L(((V=i(t).notFound)==null?void 0:V.linkText)??"Take me home"),9,ut)])])}}}),vt=m(dt,[["__scopeId","data-v-f87ff6e4"]]);function Me(s,e){if(Array.isArray(s))return Q(s);if(s==null)return[];e=re(e);const t=Object.keys(s).sort((o,r)=>r.split("/").length-o.split("/").length).find(o=>e.startsWith(re(o))),n=t?s[t]:[];return Array.isArray(n)?Q(n):Q(n.items,n.base)}function ht(s){const e=[];let t=0;for(const n in s){const o=s[n];if(o.items){t=e.push(o);continue}e[t]||e.push({items:[]}),e[t].items.push(o)}return e}function pt(s){const e=[];function t(n){for(const o of n)o.text&&o.link&&e.push({text:o.text,link:o.link,docFooterText:o.docFooterText}),o.items&&t(o.items)}return t(s),e}function ie(s,e){return Array.isArray(e)?e.some(t=>ie(s,t)):x(s,e.link)?!0:e.items?ie(s,e.items):!1}function Q(s,e){return[...s].map(t=>{const n={...t},o=n.base||e;return o&&n.link&&(n.link=o+n.link),n.items&&(n.items=Q(n.items,o)),n})}function F(){const{frontmatter:s,page:e,theme:t}=P(),n=ae("(min-width: 960px)"),o=S(!1),r=b(()=>{const T=t.value.sidebar,w=e.value.relativePath;return T?Me(T,w):[]}),d=S(r.value);U(r,(T,w)=>{JSON.stringify(T)!==JSON.stringify(w)&&(d.value=r.value)});const p=b(()=>s.value.sidebar!==!1&&d.value.length>0&&s.value.layout!=="home"),v=b(()=>$?s.value.aside==null?t.value.aside==="left":s.value.aside==="left":!1),$=b(()=>s.value.layout==="home"?!1:s.value.aside!=null?!!s.value.aside:t.value.aside!==!1),y=b(()=>p.value&&n.value),V=b(()=>p.value?ht(d.value):[]);function B(){o.value=!0}function C(){o.value=!1}function N(){o.value?C():B()}return{isOpen:o,sidebar:d,sidebarGroups:V,hasSidebar:p,hasAside:$,leftAside:v,isSidebarEnabled:y,open:B,close:C,toggle:N}}function _t(s,e){let t;ee(()=>{t=s.value?document.activeElement:void 0}),G(()=>{window.addEventListener("keyup",n)}),ue(()=>{window.removeEventListener("keyup",n)});function n(o){o.key==="Escape"&&s.value&&(e(),t==null||t.focus())}}const Ie=S(K?location.hash:"");K&&window.addEventListener("hashchange",()=>{Ie.value=location.hash});function ft(s){const{page:e}=P(),t=S(!1),n=b(()=>s.value.collapsed!=null),o=b(()=>!!s.value.link),r=S(!1),d=()=>{r.value=x(e.value.relativePath,s.value.link)};U([e,s,Ie],d),G(d);const p=b(()=>r.value?!0:s.value.items?ie(e.value.relativePath,s.value.items):!1),v=b(()=>!!(s.value.items&&s.value.items.length));ee(()=>{t.value=!!(n.value&&s.value.collapsed)}),Pe(()=>{(r.value||p.value)&&(t.value=!1)});function $(){n.value&&(t.value=!t.value)}return{collapsed:t,collapsible:n,isLink:o,isActiveLink:r,hasActiveLink:p,hasChildren:v,toggle:$}}function mt(){const{hasSidebar:s}=F(),e=ae("(min-width: 960px)"),t=ae("(min-width: 1280px)");return{isAsideEnabled:b(()=>!t.value&&!e.value?!1:s.value?t.value:e.value)}}const gt=71;function he(s){return typeof s.outline=="object"&&!Array.isArray(s.outline)&&s.outline.label||s.outlineTitle||"On this page"}function pe(s){const e=[...document.querySelectorAll(".VPDoc :where(h1,h2,h3,h4,h5,h6)")].filter(t=>t.id&&t.hasChildNodes()).map(t=>{const n=Number(t.tagName[1]);return{title:$t(t),link:"#"+t.id,level:n}});return kt(e,s)}function $t(s){let e="";for(const t of s.childNodes)if(t.nodeType===1){if(t.classList.contains("VPBadge")||t.classList.contains("header-anchor")||t.classList.contains("ignore-header"))continue;e+=t.textContent}else t.nodeType===3&&(e+=t.textContent);return e.trim()}function kt(s,e){if(e===!1)return[];const t=(typeof e=="object"&&!Array.isArray(e)?e.level:e)||2,[n,o]=typeof t=="number"?[t,t]:t==="deep"?[2,6]:t;s=s.filter(d=>d.level>=n&&d.level<=o);const r=[];e:for(let d=0;d=0;v--){const $=s[v];if($.level{requestAnimationFrame(r),window.addEventListener("scroll",n)}),Fe(()=>{d(location.hash)}),ue(()=>{window.removeEventListener("scroll",n)});function r(){if(!t.value)return;const p=[].slice.call(s.value.querySelectorAll(".outline-link")),v=[].slice.call(document.querySelectorAll(".content .header-anchor")).filter(C=>p.some(N=>N.hash===C.hash&&C.offsetParent!==null)),$=window.scrollY,y=window.innerHeight,V=document.body.offsetHeight,B=Math.abs($+y-V)<1;if(v.length&&B){d(v[v.length-1].hash);return}for(let C=0;C{const o=j("VPDocOutlineItem",!0);return a(),l("ul",{class:I(t.root?"root":"nested")},[(a(!0),l(M,null,A(t.headers,({children:r,link:d,title:p})=>(a(),l("li",null,[c("a",{class:"outline-link",href:d,onClick:e,title:p},L(p),9,Pt),r!=null&&r.length?(a(),k(o,{key:0,headers:r},null,8,["headers"])):f("",!0)]))),256))],2)}}}),_e=m(wt,[["__scopeId","data-v-d0ee3533"]]),Vt=s=>(E("data-v-d330b1bb"),s=s(),D(),s),Lt={class:"content"},St={class:"outline-title",role:"heading","aria-level":"2"},Mt={"aria-labelledby":"doc-outline-aria-label"},It=Vt(()=>c("span",{class:"visually-hidden",id:"doc-outline-aria-label"}," Table of Contents for current page ",-1)),Ct=g({__name:"VPDocAsideOutline",setup(s){const{frontmatter:e,theme:t}=P(),n=de([]);Y(()=>{n.value=pe(e.value.outline??t.value.outline)});const o=S(),r=S();return bt(o,r),(d,p)=>(a(),l("div",{class:I(["VPDocAsideOutline",{"has-outline":n.value.length>0}]),ref_key:"container",ref:o,role:"navigation"},[c("div",Lt,[c("div",{class:"outline-marker",ref_key:"marker",ref:r},null,512),c("div",St,L(i(he)(i(t))),1),c("nav",Mt,[It,_(_e,{headers:n.value,root:!0},null,8,["headers"])])])],2))}}),Tt=m(Ct,[["__scopeId","data-v-d330b1bb"]]),Bt={class:"VPDocAsideCarbonAds"},Nt=g({__name:"VPDocAsideCarbonAds",props:{carbonAds:{}},setup(s){const e=()=>null;return(t,n)=>(a(),l("div",Bt,[_(i(e),{"carbon-ads":t.carbonAds},null,8,["carbon-ads"])]))}}),At=s=>(E("data-v-3f215769"),s=s(),D(),s),Ht={class:"VPDocAside"},zt=At(()=>c("div",{class:"spacer"},null,-1)),Et=g({__name:"VPDocAside",setup(s){const{theme:e}=P();return(t,n)=>(a(),l("div",Ht,[u(t.$slots,"aside-top",{},void 0,!0),u(t.$slots,"aside-outline-before",{},void 0,!0),_(Tt),u(t.$slots,"aside-outline-after",{},void 0,!0),zt,u(t.$slots,"aside-ads-before",{},void 0,!0),i(e).carbonAds?(a(),k(Nt,{key:0,"carbon-ads":i(e).carbonAds},null,8,["carbon-ads"])):f("",!0),u(t.$slots,"aside-ads-after",{},void 0,!0),u(t.$slots,"aside-bottom",{},void 0,!0)]))}}),Dt=m(Et,[["__scopeId","data-v-3f215769"]]);function Ft(){const{theme:s,page:e}=P();return b(()=>{const{text:t="Edit this page",pattern:n=""}=s.value.editLink||{};let o;return typeof n=="function"?o=n(e.value):o=n.replace(/:path/g,e.value.filePath),{url:o,text:t}})}function Ot(){const{page:s,theme:e,frontmatter:t}=P();return b(()=>{var v,$,y,V,B,C,N,T;const n=Me(e.value.sidebar,s.value.relativePath),o=pt(n),r=o.findIndex(w=>x(s.value.relativePath,w.link)),d=((v=e.value.docFooter)==null?void 0:v.prev)===!1&&!t.value.prev||t.value.prev===!1,p=(($=e.value.docFooter)==null?void 0:$.next)===!1&&!t.value.next||t.value.next===!1;return{prev:d?void 0:{text:(typeof t.value.prev=="string"?t.value.prev:typeof t.value.prev=="object"?t.value.prev.text:void 0)??((y=o[r-1])==null?void 0:y.docFooterText)??((V=o[r-1])==null?void 0:V.text),link:(typeof t.value.prev=="object"?t.value.prev.link:void 0)??((B=o[r-1])==null?void 0:B.link)},next:p?void 0:{text:(typeof t.value.next=="string"?t.value.next:typeof t.value.next=="object"?t.value.next.text:void 0)??((C=o[r+1])==null?void 0:C.docFooterText)??((N=o[r+1])==null?void 0:N.text),link:(typeof t.value.next=="object"?t.value.next.link:void 0)??((T=o[r+1])==null?void 0:T.link)}}})}const xt={},Gt={xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24"},Ut=c("path",{d:"M18,23H4c-1.7,0-3-1.3-3-3V6c0-1.7,1.3-3,3-3h7c0.6,0,1,0.4,1,1s-0.4,1-1,1H4C3.4,5,3,5.4,3,6v14c0,0.6,0.4,1,1,1h14c0.6,0,1-0.4,1-1v-7c0-0.6,0.4-1,1-1s1,0.4,1,1v7C21,21.7,19.7,23,18,23z"},null,-1),jt=c("path",{d:"M8,17c-0.3,0-0.5-0.1-0.7-0.3C7,16.5,6.9,16.1,7,15.8l1-4c0-0.2,0.1-0.3,0.3-0.5l9.5-9.5c1.2-1.2,3.2-1.2,4.4,0c1.2,1.2,1.2,3.2,0,4.4l-9.5,9.5c-0.1,0.1-0.3,0.2-0.5,0.3l-4,1C8.2,17,8.1,17,8,17zM9.9,12.5l-0.5,2.1l2.1-0.5l9.3-9.3c0.4-0.4,0.4-1.1,0-1.6c-0.4-0.4-1.2-0.4-1.6,0l0,0L9.9,12.5z M18.5,2.5L18.5,2.5L18.5,2.5z"},null,-1),qt=[Ut,jt];function Rt(s,e){return a(),l("svg",Gt,qt)}const Kt=m(xt,[["render",Rt]]),z=g({__name:"VPLink",props:{tag:{},href:{},noIcon:{type:Boolean},target:{},rel:{}},setup(s){const e=s,t=b(()=>e.tag??(e.href?"a":"span")),n=b(()=>e.href&&we.test(e.href));return(o,r)=>(a(),k(O(t.value),{class:I(["VPLink",{link:o.href,"vp-external-link-icon":n.value,"no-icon":o.noIcon}]),href:o.href?i(ve)(o.href):void 0,target:o.target??(n.value?"_blank":void 0),rel:o.rel??(n.value?"noreferrer":void 0)},{default:h(()=>[u(o.$slots,"default")]),_:3},8,["class","href","target","rel"]))}}),Wt={class:"VPLastUpdated"},Yt=["datetime"],Jt=g({__name:"VPDocFooterLastUpdated",setup(s){const{theme:e,page:t,frontmatter:n,lang:o}=P(),r=b(()=>new Date(n.value.lastUpdated??t.value.lastUpdated)),d=b(()=>r.value.toISOString()),p=S("");return G(()=>{ee(()=>{var v,$,y;p.value=new Intl.DateTimeFormat(($=(v=e.value.lastUpdated)==null?void 0:v.formatOptions)!=null&&$.forceLocale?o.value:void 0,((y=e.value.lastUpdated)==null?void 0:y.formatOptions)??{dateStyle:"short",timeStyle:"short"}).format(r.value)})}),(v,$)=>{var y;return a(),l("p",Wt,[H(L(((y=i(e).lastUpdated)==null?void 0:y.text)||i(e).lastUpdatedText||"Last updated")+": ",1),c("time",{datetime:d.value},L(p.value),9,Yt)])}}}),Zt=m(Jt,[["__scopeId","data-v-7e05ebdb"]]),Qt={key:0,class:"VPDocFooter"},Xt={key:0,class:"edit-info"},es={key:0,class:"edit-link"},ts={key:1,class:"last-updated"},ss={key:1,class:"prev-next"},ns={class:"pager"},os=["innerHTML"],as=["innerHTML"],rs={class:"pager"},is=["innerHTML"],ls=["innerHTML"],cs=g({__name:"VPDocFooter",setup(s){const{theme:e,page:t,frontmatter:n}=P(),o=Ft(),r=Ot(),d=b(()=>e.value.editLink&&n.value.editLink!==!1),p=b(()=>t.value.lastUpdated&&n.value.lastUpdated!==!1),v=b(()=>d.value||p.value||r.value.prev||r.value.next);return($,y)=>{var V,B,C,N;return v.value?(a(),l("footer",Qt,[u($.$slots,"doc-footer-before",{},void 0,!0),d.value||p.value?(a(),l("div",Xt,[d.value?(a(),l("div",es,[_(z,{class:"edit-link-button",href:i(o).url,"no-icon":!0},{default:h(()=>[_(Kt,{class:"edit-link-icon","aria-label":"edit icon"}),H(" "+L(i(o).text),1)]),_:1},8,["href"])])):f("",!0),p.value?(a(),l("div",ts,[_(Zt)])):f("",!0)])):f("",!0),(V=i(r).prev)!=null&&V.link||(B=i(r).next)!=null&&B.link?(a(),l("nav",ss,[c("div",ns,[(C=i(r).prev)!=null&&C.link?(a(),k(z,{key:0,class:"pager-link prev",href:i(r).prev.link},{default:h(()=>{var T;return[c("span",{class:"desc",innerHTML:((T=i(e).docFooter)==null?void 0:T.prev)||"Previous page"},null,8,os),c("span",{class:"title",innerHTML:i(r).prev.text},null,8,as)]}),_:1},8,["href"])):f("",!0)]),c("div",rs,[(N=i(r).next)!=null&&N.link?(a(),k(z,{key:0,class:"pager-link next",href:i(r).next.link},{default:h(()=>{var T;return[c("span",{class:"desc",innerHTML:((T=i(e).docFooter)==null?void 0:T.next)||"Next page"},null,8,is),c("span",{class:"title",innerHTML:i(r).next.text},null,8,ls)]}),_:1},8,["href"])):f("",!0)])])):f("",!0)])):f("",!0)}}}),us=m(cs,[["__scopeId","data-v-48f9bb55"]]),ds={},vs={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},hs=c("path",{d:"M9,19c-0.3,0-0.5-0.1-0.7-0.3c-0.4-0.4-0.4-1,0-1.4l5.3-5.3L8.3,6.7c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l6,6c0.4,0.4,0.4,1,0,1.4l-6,6C9.5,18.9,9.3,19,9,19z"},null,-1),ps=[hs];function _s(s,e){return a(),l("svg",vs,ps)}const fe=m(ds,[["render",_s]]),fs={key:0,class:"VPDocOutlineDropdown"},ms={key:0,class:"items"},gs=g({__name:"VPDocOutlineDropdown",setup(s){const{frontmatter:e,theme:t}=P(),n=S(!1);Y(()=>{n.value=!1});const o=de([]);return Y(()=>{o.value=pe(e.value.outline??t.value.outline)}),(r,d)=>o.value.length>0?(a(),l("div",fs,[c("button",{onClick:d[0]||(d[0]=p=>n.value=!n.value),class:I({open:n.value})},[H(L(i(he)(i(t)))+" ",1),_(fe,{class:"icon"})],2),n.value?(a(),l("div",ms,[_(_e,{headers:o.value},null,8,["headers"])])):f("",!0)])):f("",!0)}}),$s=m(gs,[["__scopeId","data-v-eadfb36b"]]),ks=s=>(E("data-v-6b87e69f"),s=s(),D(),s),bs={class:"container"},ys=ks(()=>c("div",{class:"aside-curtain"},null,-1)),Ps={class:"aside-container"},ws={class:"aside-content"},Vs={class:"content"},Ls={class:"content-container"},Ss={class:"main"},Ms=g({__name:"VPDoc",setup(s){const{theme:e}=P(),t=te(),{hasSidebar:n,hasAside:o,leftAside:r}=F(),d=b(()=>t.path.replace(/[./]+/g,"_").replace(/_html$/,""));return(p,v)=>{const $=j("Content");return a(),l("div",{class:I(["VPDoc",{"has-sidebar":i(n),"has-aside":i(o)}])},[u(p.$slots,"doc-top",{},void 0,!0),c("div",bs,[i(o)?(a(),l("div",{key:0,class:I(["aside",{"left-aside":i(r)}])},[ys,c("div",Ps,[c("div",ws,[_(Dt,null,{"aside-top":h(()=>[u(p.$slots,"aside-top",{},void 0,!0)]),"aside-bottom":h(()=>[u(p.$slots,"aside-bottom",{},void 0,!0)]),"aside-outline-before":h(()=>[u(p.$slots,"aside-outline-before",{},void 0,!0)]),"aside-outline-after":h(()=>[u(p.$slots,"aside-outline-after",{},void 0,!0)]),"aside-ads-before":h(()=>[u(p.$slots,"aside-ads-before",{},void 0,!0)]),"aside-ads-after":h(()=>[u(p.$slots,"aside-ads-after",{},void 0,!0)]),_:3})])])],2)):f("",!0),c("div",Vs,[c("div",Ls,[u(p.$slots,"doc-before",{},void 0,!0),_($s),c("main",Ss,[_($,{class:I(["vp-doc",[d.value,i(e).externalLinkIcon&&"external-link-icon-enabled"]])},null,8,["class"])]),_(us,null,{"doc-footer-before":h(()=>[u(p.$slots,"doc-footer-before",{},void 0,!0)]),_:3}),u(p.$slots,"doc-after",{},void 0,!0)])])]),u(p.$slots,"doc-bottom",{},void 0,!0)],2)}}}),Is=m(Ms,[["__scopeId","data-v-6b87e69f"]]),Cs=g({__name:"VPButton",props:{tag:{},size:{default:"medium"},theme:{default:"brand"},text:{},href:{}},setup(s){const e=s,t=b(()=>e.href&&we.test(e.href)),n=b(()=>e.tag||e.href?"a":"button");return(o,r)=>(a(),k(O(n.value),{class:I(["VPButton",[o.size,o.theme]]),href:o.href?i(ve)(o.href):void 0,target:t.value?"_blank":void 0,rel:t.value?"noreferrer":void 0},{default:h(()=>[H(L(o.text),1)]),_:1},8,["class","href","target","rel"]))}}),Ts=m(Cs,[["__scopeId","data-v-c1c5efc1"]]),Bs=["src","alt"],Ns=g({inheritAttrs:!1,__name:"VPImage",props:{image:{},alt:{}},setup(s){return(e,t)=>{const n=j("VPImage",!0);return e.image?(a(),l(M,{key:0},[typeof e.image=="string"||"src"in e.image?(a(),l("img",Z({key:0,class:"VPImage"},typeof e.image=="string"?e.$attrs:{...e.image,...e.$attrs},{src:i(ce)(typeof e.image=="string"?e.image:e.image.src),alt:e.alt??(typeof e.image=="string"?"":e.image.alt||"")}),null,16,Bs)):(a(),l(M,{key:1},[_(n,Z({class:"dark",image:e.image.dark,alt:e.image.alt},e.$attrs),null,16,["image","alt"]),_(n,Z({class:"light",image:e.image.light,alt:e.image.alt},e.$attrs),null,16,["image","alt"])],64))],64)):f("",!0)}}}),X=m(Ns,[["__scopeId","data-v-8426fc1a"]]),As=s=>(E("data-v-da5d1713"),s=s(),D(),s),Hs={class:"container"},zs={class:"main"},Es={key:0,class:"name"},Ds=["innerHTML"],Fs=["innerHTML"],Os=["innerHTML"],xs={key:0,class:"actions"},Gs={key:0,class:"image"},Us={class:"image-container"},js=As(()=>c("div",{class:"image-bg"},null,-1)),qs=g({__name:"VPHero",props:{name:{},text:{},tagline:{},image:{},actions:{}},setup(s){const e=se("hero-image-slot-exists");return(t,n)=>(a(),l("div",{class:I(["VPHero",{"has-image":t.image||i(e)}])},[c("div",Hs,[c("div",zs,[u(t.$slots,"home-hero-info",{},()=>[t.name?(a(),l("h1",Es,[c("span",{innerHTML:t.name,class:"clip"},null,8,Ds)])):f("",!0),t.text?(a(),l("p",{key:1,innerHTML:t.text,class:"text"},null,8,Fs)):f("",!0),t.tagline?(a(),l("p",{key:2,innerHTML:t.tagline,class:"tagline"},null,8,Os)):f("",!0)],!0),t.actions?(a(),l("div",xs,[(a(!0),l(M,null,A(t.actions,o=>(a(),l("div",{key:o.link,class:"action"},[_(Ts,{tag:"a",size:"medium",theme:o.theme,text:o.text,href:o.link},null,8,["theme","text","href"])]))),128))])):f("",!0)]),t.image||i(e)?(a(),l("div",Gs,[c("div",Us,[js,u(t.$slots,"home-hero-image",{},()=>[t.image?(a(),k(X,{key:0,class:"image-src",image:t.image},null,8,["image"])):f("",!0)],!0)])])):f("",!0)])],2))}}),Rs=m(qs,[["__scopeId","data-v-da5d1713"]]),Ks=g({__name:"VPHomeHero",setup(s){const{frontmatter:e}=P();return(t,n)=>i(e).hero?(a(),k(Rs,{key:0,class:"VPHomeHero",name:i(e).hero.name,text:i(e).hero.text,tagline:i(e).hero.tagline,image:i(e).hero.image,actions:i(e).hero.actions},{"home-hero-info":h(()=>[u(t.$slots,"home-hero-info")]),"home-hero-image":h(()=>[u(t.$slots,"home-hero-image")]),_:3},8,["name","text","tagline","image","actions"])):f("",!0)}}),Ws={},Ys={xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24"},Js=c("path",{d:"M19.9,12.4c0.1-0.2,0.1-0.5,0-0.8c-0.1-0.1-0.1-0.2-0.2-0.3l-7-7c-0.4-0.4-1-0.4-1.4,0s-0.4,1,0,1.4l5.3,5.3H5c-0.6,0-1,0.4-1,1s0.4,1,1,1h11.6l-5.3,5.3c-0.4,0.4-0.4,1,0,1.4c0.2,0.2,0.5,0.3,0.7,0.3s0.5-0.1,0.7-0.3l7-7C19.8,12.6,19.9,12.5,19.9,12.4z"},null,-1),Zs=[Js];function Qs(s,e){return a(),l("svg",Ys,Zs)}const Xs=m(Ws,[["render",Qs]]),en={class:"box"},tn={key:0,class:"icon"},sn=["innerHTML"],nn=["innerHTML"],on=["innerHTML"],an={key:4,class:"link-text"},rn={class:"link-text-value"},ln=g({__name:"VPFeature",props:{icon:{},title:{},details:{},link:{},linkText:{},rel:{},target:{}},setup(s){return(e,t)=>(a(),k(z,{class:"VPFeature",href:e.link,rel:e.rel,target:e.target,"no-icon":!0,tag:e.link?"a":"div"},{default:h(()=>[c("article",en,[typeof e.icon=="object"&&e.icon.wrap?(a(),l("div",tn,[_(X,{image:e.icon,alt:e.icon.alt,height:e.icon.height||48,width:e.icon.width||48},null,8,["image","alt","height","width"])])):typeof e.icon=="object"?(a(),k(X,{key:1,image:e.icon,alt:e.icon.alt,height:e.icon.height||48,width:e.icon.width||48},null,8,["image","alt","height","width"])):e.icon?(a(),l("div",{key:2,class:"icon",innerHTML:e.icon},null,8,sn)):f("",!0),c("h2",{class:"title",innerHTML:e.title},null,8,nn),e.details?(a(),l("p",{key:3,class:"details",innerHTML:e.details},null,8,on)):f("",!0),e.linkText?(a(),l("div",an,[c("p",rn,[H(L(e.linkText)+" ",1),_(Xs,{class:"link-text-icon"})])])):f("",!0)])]),_:1},8,["href","rel","target","tag"]))}}),cn=m(ln,[["__scopeId","data-v-33204567"]]),un={key:0,class:"VPFeatures"},dn={class:"container"},vn={class:"items"},hn=g({__name:"VPFeatures",props:{features:{}},setup(s){const e=s,t=b(()=>{const n=e.features.length;if(n){if(n===2)return"grid-2";if(n===3)return"grid-3";if(n%3===0)return"grid-6";if(n>3)return"grid-4"}else return});return(n,o)=>n.features?(a(),l("div",un,[c("div",dn,[c("div",vn,[(a(!0),l(M,null,A(n.features,r=>(a(),l("div",{key:r.title,class:I(["item",[t.value]])},[_(cn,{icon:r.icon,title:r.title,details:r.details,link:r.link,"link-text":r.linkText,rel:r.rel,target:r.target},null,8,["icon","title","details","link","link-text","rel","target"])],2))),128))])])])):f("",!0)}}),pn=m(hn,[["__scopeId","data-v-a6181336"]]),_n=g({__name:"VPHomeFeatures",setup(s){const{frontmatter:e}=P();return(t,n)=>i(e).features?(a(),k(pn,{key:0,class:"VPHomeFeatures",features:i(e).features},null,8,["features"])):f("",!0)}}),fn={class:"VPHome"},mn=g({__name:"VPHome",setup(s){return(e,t)=>{const n=j("Content");return a(),l("div",fn,[u(e.$slots,"home-hero-before",{},void 0,!0),_(Ks,null,{"home-hero-info":h(()=>[u(e.$slots,"home-hero-info",{},void 0,!0)]),"home-hero-image":h(()=>[u(e.$slots,"home-hero-image",{},void 0,!0)]),_:3}),u(e.$slots,"home-hero-after",{},void 0,!0),u(e.$slots,"home-features-before",{},void 0,!0),_(_n),u(e.$slots,"home-features-after",{},void 0,!0),_(n)])}}}),gn=m(mn,[["__scopeId","data-v-d82743a8"]]),$n={},kn={class:"VPPage"};function bn(s,e){const t=j("Content");return a(),l("div",kn,[u(s.$slots,"page-top"),_(t),u(s.$slots,"page-bottom")])}const yn=m($n,[["render",bn]]),Pn=g({__name:"VPContent",setup(s){const{page:e,frontmatter:t}=P(),{hasSidebar:n}=F();return(o,r)=>(a(),l("div",{class:I(["VPContent",{"has-sidebar":i(n),"is-home":i(t).layout==="home"}]),id:"VPContent"},[i(e).isNotFound?u(o.$slots,"not-found",{key:0},()=>[_(vt)],!0):i(t).layout==="page"?(a(),k(yn,{key:1},{"page-top":h(()=>[u(o.$slots,"page-top",{},void 0,!0)]),"page-bottom":h(()=>[u(o.$slots,"page-bottom",{},void 0,!0)]),_:3})):i(t).layout==="home"?(a(),k(gn,{key:2},{"home-hero-before":h(()=>[u(o.$slots,"home-hero-before",{},void 0,!0)]),"home-hero-info":h(()=>[u(o.$slots,"home-hero-info",{},void 0,!0)]),"home-hero-image":h(()=>[u(o.$slots,"home-hero-image",{},void 0,!0)]),"home-hero-after":h(()=>[u(o.$slots,"home-hero-after",{},void 0,!0)]),"home-features-before":h(()=>[u(o.$slots,"home-features-before",{},void 0,!0)]),"home-features-after":h(()=>[u(o.$slots,"home-features-after",{},void 0,!0)]),_:3})):i(t).layout&&i(t).layout!=="doc"?(a(),k(O(i(t).layout),{key:3})):(a(),k(Is,{key:4},{"doc-top":h(()=>[u(o.$slots,"doc-top",{},void 0,!0)]),"doc-bottom":h(()=>[u(o.$slots,"doc-bottom",{},void 0,!0)]),"doc-footer-before":h(()=>[u(o.$slots,"doc-footer-before",{},void 0,!0)]),"doc-before":h(()=>[u(o.$slots,"doc-before",{},void 0,!0)]),"doc-after":h(()=>[u(o.$slots,"doc-after",{},void 0,!0)]),"aside-top":h(()=>[u(o.$slots,"aside-top",{},void 0,!0)]),"aside-outline-before":h(()=>[u(o.$slots,"aside-outline-before",{},void 0,!0)]),"aside-outline-after":h(()=>[u(o.$slots,"aside-outline-after",{},void 0,!0)]),"aside-ads-before":h(()=>[u(o.$slots,"aside-ads-before",{},void 0,!0)]),"aside-ads-after":h(()=>[u(o.$slots,"aside-ads-after",{},void 0,!0)]),"aside-bottom":h(()=>[u(o.$slots,"aside-bottom",{},void 0,!0)]),_:3}))],2))}}),wn=m(Pn,[["__scopeId","data-v-669faec9"]]),Vn={class:"container"},Ln=["innerHTML"],Sn=["innerHTML"],Mn=g({__name:"VPFooter",setup(s){const{theme:e,frontmatter:t}=P(),{hasSidebar:n}=F();return(o,r)=>i(e).footer&&i(t).footer!==!1?(a(),l("footer",{key:0,class:I(["VPFooter",{"has-sidebar":i(n)}])},[c("div",Vn,[i(e).footer.message?(a(),l("p",{key:0,class:"message",innerHTML:i(e).footer.message},null,8,Ln)):f("",!0),i(e).footer.copyright?(a(),l("p",{key:1,class:"copyright",innerHTML:i(e).footer.copyright},null,8,Sn)):f("",!0)])],2)):f("",!0)}}),In=m(Mn,[["__scopeId","data-v-e315a0ad"]]),Cn={class:"header"},Tn={class:"outline"},Bn=g({__name:"VPLocalNavOutlineDropdown",props:{headers:{},navHeight:{}},setup(s){const e=s,{theme:t}=P(),n=S(!1),o=S(0),r=S();Y(()=>{n.value=!1});function d(){n.value=!n.value,o.value=window.innerHeight+Math.min(window.scrollY-e.navHeight,0)}function p($){$.target.classList.contains("outline-link")&&(r.value&&(r.value.style.transition="none"),xe(()=>{n.value=!1}))}function v(){n.value=!1,window.scrollTo({top:0,left:0,behavior:"smooth"})}return($,y)=>(a(),l("div",{class:"VPLocalNavOutlineDropdown",style:Oe({"--vp-vh":o.value+"px"})},[$.headers.length>0?(a(),l("button",{key:0,onClick:d,class:I({open:n.value})},[H(L(i(he)(i(t)))+" ",1),_(fe,{class:"icon"})],2)):(a(),l("button",{key:1,onClick:v},L(i(t).returnToTopLabel||"Return to top"),1)),_(le,{name:"flyout"},{default:h(()=>[n.value?(a(),l("div",{key:0,ref_key:"items",ref:r,class:"items",onClick:p},[c("div",Cn,[c("a",{class:"top-link",href:"#",onClick:v},L(i(t).returnToTopLabel||"Return to top"),1)]),c("div",Tn,[_(_e,{headers:$.headers},null,8,["headers"])])],512)):f("",!0)]),_:1})],4))}}),Nn=m(Bn,[["__scopeId","data-v-1c15a60a"]]),An={},Hn={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},zn=c("path",{d:"M17,11H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h14c0.6,0,1,0.4,1,1S17.6,11,17,11z"},null,-1),En=c("path",{d:"M21,7H3C2.4,7,2,6.6,2,6s0.4-1,1-1h18c0.6,0,1,0.4,1,1S21.6,7,21,7z"},null,-1),Dn=c("path",{d:"M21,15H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h18c0.6,0,1,0.4,1,1S21.6,15,21,15z"},null,-1),Fn=c("path",{d:"M17,19H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h14c0.6,0,1,0.4,1,1S17.6,19,17,19z"},null,-1),On=[zn,En,Dn,Fn];function xn(s,e){return a(),l("svg",Hn,On)}const Gn=m(An,[["render",xn]]),Un=["aria-expanded"],jn={class:"menu-text"},qn=g({__name:"VPLocalNav",props:{open:{type:Boolean}},emits:["open-menu"],setup(s){const{theme:e,frontmatter:t}=P(),{hasSidebar:n}=F(),{y:o}=Ve(),r=de([]),d=S(0);G(()=>{d.value=parseInt(getComputedStyle(document.documentElement).getPropertyValue("--vp-nav-height"))}),Y(()=>{r.value=pe(t.value.outline??e.value.outline)});const p=b(()=>r.value.length===0&&!n.value),v=b(()=>({VPLocalNav:!0,fixed:p.value,"reached-top":o.value>=d.value}));return($,y)=>i(t).layout!=="home"&&(!p.value||i(o)>=d.value)?(a(),l("div",{key:0,class:I(v.value)},[i(n)?(a(),l("button",{key:0,class:"menu","aria-expanded":$.open,"aria-controls":"VPSidebarNav",onClick:y[0]||(y[0]=V=>$.$emit("open-menu"))},[_(Gn,{class:"menu-icon"}),c("span",jn,L(i(e).sidebarMenuLabel||"Menu"),1)],8,Un)):f("",!0),_(Nn,{headers:r.value,navHeight:d.value},null,8,["headers","navHeight"])],2)):f("",!0)}}),Rn=m(qn,[["__scopeId","data-v-f84a0989"]]);function Kn(){const s=S(!1);function e(){s.value=!0,window.addEventListener("resize",o)}function t(){s.value=!1,window.removeEventListener("resize",o)}function n(){s.value?t():e()}function o(){window.outerWidth>=768&&t()}const r=te();return U(()=>r.path,t),{isScreenOpen:s,openScreen:e,closeScreen:t,toggleScreen:n}}const Wn={},Yn={class:"VPSwitch",type:"button",role:"switch"},Jn={class:"check"},Zn={key:0,class:"icon"};function Qn(s,e){return a(),l("button",Yn,[c("span",Jn,[s.$slots.default?(a(),l("span",Zn,[u(s.$slots,"default",{},void 0,!0)])):f("",!0)])])}const Xn=m(Wn,[["render",Qn],["__scopeId","data-v-b1685198"]]),eo={},to={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},so=c("path",{d:"M12.1,22c-0.3,0-0.6,0-0.9,0c-5.5-0.5-9.5-5.4-9-10.9c0.4-4.8,4.2-8.6,9-9c0.4,0,0.8,0.2,1,0.5c0.2,0.3,0.2,0.8-0.1,1.1c-2,2.7-1.4,6.4,1.3,8.4c2.1,1.6,5,1.6,7.1,0c0.3-0.2,0.7-0.3,1.1-0.1c0.3,0.2,0.5,0.6,0.5,1c-0.2,2.7-1.5,5.1-3.6,6.8C16.6,21.2,14.4,22,12.1,22zM9.3,4.4c-2.9,1-5,3.6-5.2,6.8c-0.4,4.4,2.8,8.3,7.2,8.7c2.1,0.2,4.2-0.4,5.8-1.8c1.1-0.9,1.9-2.1,2.4-3.4c-2.5,0.9-5.3,0.5-7.5-1.1C9.2,11.4,8.1,7.7,9.3,4.4z"},null,-1),no=[so];function oo(s,e){return a(),l("svg",to,no)}const ao=m(eo,[["render",oo]]),ro={},io={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},lo=Ge('',9),co=[lo];function uo(s,e){return a(),l("svg",io,co)}const vo=m(ro,[["render",uo]]),ho=g({__name:"VPSwitchAppearance",setup(s){const{isDark:e}=P(),t=se("toggle-appearance",()=>{e.value=!e.value}),n=b(()=>e.value?"Switch to light theme":"Switch to dark theme");return(o,r)=>(a(),k(Xn,{title:n.value,class:"VPSwitchAppearance","aria-checked":i(e),onClick:i(t)},{default:h(()=>[_(vo,{class:"sun"}),_(ao,{class:"moon"})]),_:1},8,["title","aria-checked","onClick"]))}}),me=m(ho,[["__scopeId","data-v-cbbe1149"]]),po={key:0,class:"VPNavBarAppearance"},_o=g({__name:"VPNavBarAppearance",setup(s){const{site:e}=P();return(t,n)=>i(e).appearance&&i(e).appearance!=="force-dark"?(a(),l("div",po,[_(me)])):f("",!0)}}),fo=m(_o,[["__scopeId","data-v-e6aabb21"]]),ge=S();let Ce=!1,oe=0;function mo(s){const e=S(!1);if(K){!Ce&&go(),oe++;const t=U(ge,n=>{var o,r,d;n===s.el.value||(o=s.el.value)!=null&&o.contains(n)?(e.value=!0,(r=s.onFocus)==null||r.call(s)):(e.value=!1,(d=s.onBlur)==null||d.call(s))});ue(()=>{t(),oe--,oe||$o()})}return Ue(e)}function go(){document.addEventListener("focusin",Te),Ce=!0,ge.value=document.activeElement}function $o(){document.removeEventListener("focusin",Te)}function Te(){ge.value=document.activeElement}const ko={},bo={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},yo=c("path",{d:"M12,16c-0.3,0-0.5-0.1-0.7-0.3l-6-6c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l5.3,5.3l5.3-5.3c0.4-0.4,1-0.4,1.4,0s0.4,1,0,1.4l-6,6C12.5,15.9,12.3,16,12,16z"},null,-1),Po=[yo];function wo(s,e){return a(),l("svg",bo,Po)}const Be=m(ko,[["render",wo]]),Vo={},Lo={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},So=c("circle",{cx:"12",cy:"12",r:"2"},null,-1),Mo=c("circle",{cx:"19",cy:"12",r:"2"},null,-1),Io=c("circle",{cx:"5",cy:"12",r:"2"},null,-1),Co=[So,Mo,Io];function To(s,e){return a(),l("svg",Lo,Co)}const Bo=m(Vo,[["render",To]]),No={class:"VPMenuLink"},Ao=g({__name:"VPMenuLink",props:{item:{}},setup(s){const{page:e}=P();return(t,n)=>(a(),l("div",No,[_(z,{class:I({active:i(x)(i(e).relativePath,t.item.activeMatch||t.item.link,!!t.item.activeMatch)}),href:t.item.link,target:t.item.target,rel:t.item.rel},{default:h(()=>[H(L(t.item.text),1)]),_:1},8,["class","href","target","rel"])]))}}),ne=m(Ao,[["__scopeId","data-v-43f1e123"]]),Ho={class:"VPMenuGroup"},zo={key:0,class:"title"},Eo=g({__name:"VPMenuGroup",props:{text:{},items:{}},setup(s){return(e,t)=>(a(),l("div",Ho,[e.text?(a(),l("p",zo,L(e.text),1)):f("",!0),(a(!0),l(M,null,A(e.items,n=>(a(),l(M,null,["link"in n?(a(),k(ne,{key:0,item:n},null,8,["item"])):f("",!0)],64))),256))]))}}),Do=m(Eo,[["__scopeId","data-v-69e747b5"]]),Fo={class:"VPMenu"},Oo={key:0,class:"items"},xo=g({__name:"VPMenu",props:{items:{}},setup(s){return(e,t)=>(a(),l("div",Fo,[e.items?(a(),l("div",Oo,[(a(!0),l(M,null,A(e.items,n=>(a(),l(M,{key:n.text},["link"in n?(a(),k(ne,{key:0,item:n},null,8,["item"])):(a(),k(Do,{key:1,text:n.text,items:n.items},null,8,["text","items"]))],64))),128))])):f("",!0),u(e.$slots,"default",{},void 0,!0)]))}}),Go=m(xo,[["__scopeId","data-v-e7ea1737"]]),Uo=["aria-expanded","aria-label"],jo={key:0,class:"text"},qo=["innerHTML"],Ro={class:"menu"},Ko=g({__name:"VPFlyout",props:{icon:{},button:{},label:{},items:{}},setup(s){const e=S(!1),t=S();mo({el:t,onBlur:n});function n(){e.value=!1}return(o,r)=>(a(),l("div",{class:"VPFlyout",ref_key:"el",ref:t,onMouseenter:r[1]||(r[1]=d=>e.value=!0),onMouseleave:r[2]||(r[2]=d=>e.value=!1)},[c("button",{type:"button",class:"button","aria-haspopup":"true","aria-expanded":e.value,"aria-label":o.label,onClick:r[0]||(r[0]=d=>e.value=!e.value)},[o.button||o.icon?(a(),l("span",jo,[o.icon?(a(),k(O(o.icon),{key:0,class:"option-icon"})):f("",!0),o.button?(a(),l("span",{key:1,innerHTML:o.button},null,8,qo)):f("",!0),_(Be,{class:"text-icon"})])):(a(),k(Bo,{key:1,class:"icon"}))],8,Uo),c("div",Ro,[_(Go,{items:o.items},{default:h(()=>[u(o.$slots,"default",{},void 0,!0)]),_:3},8,["items"])])],544))}}),$e=m(Ko,[["__scopeId","data-v-9c007e85"]]),Wo={discord:'Discord',facebook:'Facebook',github:'GitHub',instagram:'Instagram',linkedin:'LinkedIn',mastodon:'Mastodon',slack:'Slack',twitter:'Twitter',x:'X',youtube:'YouTube'},Yo=["href","aria-label","innerHTML"],Jo=g({__name:"VPSocialLink",props:{icon:{},link:{},ariaLabel:{}},setup(s){const e=s,t=b(()=>typeof e.icon=="object"?e.icon.svg:Wo[e.icon]);return(n,o)=>(a(),l("a",{class:"VPSocialLink no-icon",href:n.link,"aria-label":n.ariaLabel??(typeof n.icon=="string"?n.icon:""),target:"_blank",rel:"noopener",innerHTML:t.value},null,8,Yo))}}),Zo=m(Jo,[["__scopeId","data-v-f80f8133"]]),Qo={class:"VPSocialLinks"},Xo=g({__name:"VPSocialLinks",props:{links:{}},setup(s){return(e,t)=>(a(),l("div",Qo,[(a(!0),l(M,null,A(e.links,({link:n,icon:o,ariaLabel:r})=>(a(),k(Zo,{key:n,icon:o,link:n,ariaLabel:r},null,8,["icon","link","ariaLabel"]))),128))]))}}),ke=m(Xo,[["__scopeId","data-v-7bc22406"]]),ea={key:0,class:"group translations"},ta={class:"trans-title"},sa={key:1,class:"group"},na={class:"item appearance"},oa={class:"label"},aa={class:"appearance-action"},ra={key:2,class:"group"},ia={class:"item social-links"},la=g({__name:"VPNavBarExtra",setup(s){const{site:e,theme:t}=P(),{localeLinks:n,currentLang:o}=J({correspondingLink:!0}),r=b(()=>n.value.length&&o.value.label||e.value.appearance||t.value.socialLinks);return(d,p)=>r.value?(a(),k($e,{key:0,class:"VPNavBarExtra",label:"extra navigation"},{default:h(()=>[i(n).length&&i(o).label?(a(),l("div",ea,[c("p",ta,L(i(o).label),1),(a(!0),l(M,null,A(i(n),v=>(a(),k(ne,{key:v.link,item:v},null,8,["item"]))),128))])):f("",!0),i(e).appearance&&i(e).appearance!=="force-dark"?(a(),l("div",sa,[c("div",na,[c("p",oa,L(i(t).darkModeSwitchLabel||"Appearance"),1),c("div",aa,[_(me)])])])):f("",!0),i(t).socialLinks?(a(),l("div",ra,[c("div",ia,[_(ke,{class:"social-links-list",links:i(t).socialLinks},null,8,["links"])])])):f("",!0)]),_:1})):f("",!0)}}),ca=m(la,[["__scopeId","data-v-d0bd9dde"]]),ua=s=>(E("data-v-e5dd9c1c"),s=s(),D(),s),da=["aria-expanded"],va=ua(()=>c("span",{class:"container"},[c("span",{class:"top"}),c("span",{class:"middle"}),c("span",{class:"bottom"})],-1)),ha=[va],pa=g({__name:"VPNavBarHamburger",props:{active:{type:Boolean}},emits:["click"],setup(s){return(e,t)=>(a(),l("button",{type:"button",class:I(["VPNavBarHamburger",{active:e.active}]),"aria-label":"mobile navigation","aria-expanded":e.active,"aria-controls":"VPNavScreen",onClick:t[0]||(t[0]=n=>e.$emit("click"))},ha,10,da))}}),_a=m(pa,[["__scopeId","data-v-e5dd9c1c"]]),fa=["innerHTML"],ma=g({__name:"VPNavBarMenuLink",props:{item:{}},setup(s){const{page:e}=P();return(t,n)=>(a(),k(z,{class:I({VPNavBarMenuLink:!0,active:i(x)(i(e).relativePath,t.item.activeMatch||t.item.link,!!t.item.activeMatch)}),href:t.item.link,target:t.item.target,rel:t.item.rel,tabindex:"0"},{default:h(()=>[c("span",{innerHTML:t.item.text},null,8,fa)]),_:1},8,["class","href","target","rel"]))}}),ga=m(ma,[["__scopeId","data-v-42ef59de"]]),$a=g({__name:"VPNavBarMenuGroup",props:{item:{}},setup(s){const e=s,{page:t}=P(),n=r=>"link"in r?x(t.value.relativePath,r.link,!!e.item.activeMatch):r.items.some(n),o=b(()=>n(e.item));return(r,d)=>(a(),k($e,{class:I({VPNavBarMenuGroup:!0,active:i(x)(i(t).relativePath,r.item.activeMatch,!!r.item.activeMatch)||o.value}),button:r.item.text,items:r.item.items},null,8,["class","button","items"]))}}),ka=s=>(E("data-v-7f418b0f"),s=s(),D(),s),ba={key:0,"aria-labelledby":"main-nav-aria-label",class:"VPNavBarMenu"},ya=ka(()=>c("span",{id:"main-nav-aria-label",class:"visually-hidden"},"Main Navigation",-1)),Pa=g({__name:"VPNavBarMenu",setup(s){const{theme:e}=P();return(t,n)=>i(e).nav?(a(),l("nav",ba,[ya,(a(!0),l(M,null,A(i(e).nav,o=>(a(),l(M,{key:o.text},["link"in o?(a(),k(ga,{key:0,item:o},null,8,["item"])):(a(),k($a,{key:1,item:o},null,8,["item"]))],64))),128))])):f("",!0)}}),wa=m(Pa,[["__scopeId","data-v-7f418b0f"]]);function Va(s,e){const{localeIndex:t}=P();function n(o){var C,N;const r=o.split("."),d=s&&typeof s=="object",p=d&&((N=(C=s.locales)==null?void 0:C[t.value])==null?void 0:N.translations)||null,v=d&&s.translations||null;let $=p,y=v,V=e;const B=r.pop();for(const T of r){let w=null;const q=V==null?void 0:V[T];q&&(w=V=q);const W=y==null?void 0:y[T];W&&(w=y=W);const R=$==null?void 0:$[T];R&&(w=$=R),q||(V=w),W||(y=w),R||($=w)}return($==null?void 0:$[B])??(y==null?void 0:y[B])??(V==null?void 0:V[B])??""}return n}const La=["aria-label"],Sa={class:"DocSearch-Button-Container"},Ma=c("svg",{class:"DocSearch-Search-Icon",width:"20",height:"20",viewBox:"0 0 20 20","aria-label":"search icon"},[c("path",{d:"M14.386 14.386l4.0877 4.0877-4.0877-4.0877c-2.9418 2.9419-7.7115 2.9419-10.6533 0-2.9419-2.9418-2.9419-7.7115 0-10.6533 2.9418-2.9419 7.7115-2.9419 10.6533 0 2.9419 2.9418 2.9419 7.7115 0 10.6533z",stroke:"currentColor",fill:"none","fill-rule":"evenodd","stroke-linecap":"round","stroke-linejoin":"round"})],-1),Ia={class:"DocSearch-Button-Placeholder"},Ca=c("span",{class:"DocSearch-Button-Keys"},[c("kbd",{class:"DocSearch-Button-Key"}),c("kbd",{class:"DocSearch-Button-Key"},"K")],-1),ye=g({__name:"VPNavBarSearchButton",setup(s){const{theme:e}=P(),t={button:{buttonText:"Search",buttonAriaLabel:"Search"}},n=je(Va)(qe(()=>{var o;return(o=e.value.search)==null?void 0:o.options}),t);return(o,r)=>(a(),l("button",{type:"button",class:"DocSearch DocSearch-Button","aria-label":i(n)("button.buttonAriaLabel")},[c("span",Sa,[Ma,c("span",Ia,L(i(n)("button.buttonText")),1)]),Ca],8,La))}}),Ta={class:"VPNavBarSearch"},Ba={id:"local-search"},Na={key:1,id:"docsearch"},Aa=g({__name:"VPNavBarSearch",setup(s){const e=()=>null,t=()=>null,{theme:n}=P(),o=S(!1),r=S(!1);G(()=>{});function d(){o.value||(o.value=!0,setTimeout(p,16))}function p(){const y=new Event("keydown");y.key="k",y.metaKey=!0,window.dispatchEvent(y),setTimeout(()=>{document.querySelector(".DocSearch-Modal")||p()},16)}const v=S(!1),$="";return(y,V)=>{var B;return a(),l("div",Ta,[i($)==="local"?(a(),l(M,{key:0},[v.value?(a(),k(i(e),{key:0,onClose:V[0]||(V[0]=C=>v.value=!1)})):f("",!0),c("div",Ba,[_(ye,{onClick:V[1]||(V[1]=C=>v.value=!0)})])],64)):i($)==="algolia"?(a(),l(M,{key:1},[o.value?(a(),k(i(t),{key:0,algolia:((B=i(n).search)==null?void 0:B.options)??i(n).algolia,onVnodeBeforeMount:V[2]||(V[2]=C=>r.value=!0)},null,8,["algolia"])):f("",!0),r.value?f("",!0):(a(),l("div",Na,[_(ye,{onClick:d})]))],64)):f("",!0)])}}}),Ha=g({__name:"VPNavBarSocialLinks",setup(s){const{theme:e}=P();return(t,n)=>i(e).socialLinks?(a(),k(ke,{key:0,class:"VPNavBarSocialLinks",links:i(e).socialLinks},null,8,["links"])):f("",!0)}}),za=m(Ha,[["__scopeId","data-v-0394ad82"]]),Ea=["href"],Da=g({__name:"VPNavBarTitle",setup(s){const{site:e,theme:t}=P(),{hasSidebar:n}=F(),{currentLang:o}=J();return(r,d)=>(a(),l("div",{class:I(["VPNavBarTitle",{"has-sidebar":i(n)}])},[c("a",{class:"title",href:i(t).logoLink??i(ve)(i(o).link)},[u(r.$slots,"nav-bar-title-before",{},void 0,!0),i(t).logo?(a(),k(X,{key:0,class:"logo",image:i(t).logo},null,8,["image"])):f("",!0),i(t).siteTitle?(a(),l(M,{key:1},[H(L(i(t).siteTitle),1)],64)):i(t).siteTitle===void 0?(a(),l(M,{key:2},[H(L(i(e).title),1)],64)):f("",!0),u(r.$slots,"nav-bar-title-after",{},void 0,!0)],8,Ea)],2))}}),Fa=m(Da,[["__scopeId","data-v-86d1bed8"]]),Oa={},xa={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},Ga=c("path",{d:"M0 0h24v24H0z",fill:"none"},null,-1),Ua=c("path",{d:" M12.87 15.07l-2.54-2.51.03-.03c1.74-1.94 2.98-4.17 3.71-6.53H17V4h-7V2H8v2H1v1.99h11.17C11.5 7.92 10.44 9.75 9 11.35 8.07 10.32 7.3 9.19 6.69 8h-2c.73 1.63 1.73 3.17 2.98 4.56l-5.09 5.02L4 19l5-5 3.11 3.11.76-2.04zM18.5 10h-2L12 22h2l1.12-3h4.75L21 22h2l-4.5-12zm-2.62 7l1.62-4.33L19.12 17h-3.24z ",class:"css-c4d79v"},null,-1),ja=[Ga,Ua];function qa(s,e){return a(),l("svg",xa,ja)}const Ne=m(Oa,[["render",qa]]),Ra={class:"items"},Ka={class:"title"},Wa=g({__name:"VPNavBarTranslations",setup(s){const{theme:e}=P(),{localeLinks:t,currentLang:n}=J({correspondingLink:!0});return(o,r)=>i(t).length&&i(n).label?(a(),k($e,{key:0,class:"VPNavBarTranslations",icon:Ne,label:i(e).langMenuLabel||"Change language"},{default:h(()=>[c("div",Ra,[c("p",Ka,L(i(n).label),1),(a(!0),l(M,null,A(i(t),d=>(a(),k(ne,{key:d.link,item:d},null,8,["item"]))),128))])]),_:1},8,["label"])):f("",!0)}}),Ya=m(Wa,[["__scopeId","data-v-74abcbb9"]]),Ja=s=>(E("data-v-d83f3580"),s=s(),D(),s),Za={class:"container"},Qa={class:"title"},Xa={class:"content"},er=Ja(()=>c("div",{class:"curtain"},null,-1)),tr={class:"content-body"},sr=g({__name:"VPNavBar",props:{isScreenOpen:{type:Boolean}},emits:["toggle-screen"],setup(s){const{y:e}=Ve(),{hasSidebar:t}=F(),{frontmatter:n}=P(),o=S({});return Pe(()=>{o.value={"has-sidebar":t.value,top:n.value.layout==="home"&&e.value===0}}),(r,d)=>(a(),l("div",{class:I(["VPNavBar",o.value])},[c("div",Za,[c("div",Qa,[_(Fa,null,{"nav-bar-title-before":h(()=>[u(r.$slots,"nav-bar-title-before",{},void 0,!0)]),"nav-bar-title-after":h(()=>[u(r.$slots,"nav-bar-title-after",{},void 0,!0)]),_:3})]),c("div",Xa,[er,c("div",tr,[u(r.$slots,"nav-bar-content-before",{},void 0,!0),_(Aa,{class:"search"}),_(wa,{class:"menu"}),_(Ya,{class:"translations"}),_(fo,{class:"appearance"}),_(za,{class:"social-links"}),_(ca,{class:"extra"}),u(r.$slots,"nav-bar-content-after",{},void 0,!0),_(_a,{class:"hamburger",active:r.isScreenOpen,onClick:d[0]||(d[0]=p=>r.$emit("toggle-screen"))},null,8,["active"])])])])],2))}}),nr=m(sr,[["__scopeId","data-v-d83f3580"]]),or={key:0,class:"VPNavScreenAppearance"},ar={class:"text"},rr=g({__name:"VPNavScreenAppearance",setup(s){const{site:e,theme:t}=P();return(n,o)=>i(e).appearance&&i(e).appearance!=="force-dark"?(a(),l("div",or,[c("p",ar,L(i(t).darkModeSwitchLabel||"Appearance"),1),_(me)])):f("",!0)}}),ir=m(rr,[["__scopeId","data-v-2d7af913"]]),lr=g({__name:"VPNavScreenMenuLink",props:{item:{}},setup(s){const e=se("close-screen");return(t,n)=>(a(),k(z,{class:"VPNavScreenMenuLink",href:t.item.link,target:t.item.target,rel:t.item.rel,onClick:i(e)},{default:h(()=>[H(L(t.item.text),1)]),_:1},8,["href","target","rel","onClick"]))}}),cr=m(lr,[["__scopeId","data-v-05f27b2a"]]),ur={},dr={xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",focusable:"false",viewBox:"0 0 24 24"},vr=c("path",{d:"M18.9,10.9h-6v-6c0-0.6-0.4-1-1-1s-1,0.4-1,1v6h-6c-0.6,0-1,0.4-1,1s0.4,1,1,1h6v6c0,0.6,0.4,1,1,1s1-0.4,1-1v-6h6c0.6,0,1-0.4,1-1S19.5,10.9,18.9,10.9z"},null,-1),hr=[vr];function pr(s,e){return a(),l("svg",dr,hr)}const _r=m(ur,[["render",pr]]),fr=g({__name:"VPNavScreenMenuGroupLink",props:{item:{}},setup(s){const e=se("close-screen");return(t,n)=>(a(),k(z,{class:"VPNavScreenMenuGroupLink",href:t.item.link,target:t.item.target,rel:t.item.rel,onClick:i(e)},{default:h(()=>[H(L(t.item.text),1)]),_:1},8,["href","target","rel","onClick"]))}}),Ae=m(fr,[["__scopeId","data-v-19976ae1"]]),mr={class:"VPNavScreenMenuGroupSection"},gr={key:0,class:"title"},$r=g({__name:"VPNavScreenMenuGroupSection",props:{text:{},items:{}},setup(s){return(e,t)=>(a(),l("div",mr,[e.text?(a(),l("p",gr,L(e.text),1)):f("",!0),(a(!0),l(M,null,A(e.items,n=>(a(),k(Ae,{key:n.text,item:n},null,8,["item"]))),128))]))}}),kr=m($r,[["__scopeId","data-v-8133b170"]]),br=["aria-controls","aria-expanded"],yr=["innerHTML"],Pr=["id"],wr={key:1,class:"group"},Vr=g({__name:"VPNavScreenMenuGroup",props:{text:{},items:{}},setup(s){const e=s,t=S(!1),n=b(()=>`NavScreenGroup-${e.text.replace(" ","-").toLowerCase()}`);function o(){t.value=!t.value}return(r,d)=>(a(),l("div",{class:I(["VPNavScreenMenuGroup",{open:t.value}])},[c("button",{class:"button","aria-controls":n.value,"aria-expanded":t.value,onClick:o},[c("span",{class:"button-text",innerHTML:r.text},null,8,yr),_(_r,{class:"button-icon"})],8,br),c("div",{id:n.value,class:"items"},[(a(!0),l(M,null,A(r.items,p=>(a(),l(M,{key:p.text},["link"in p?(a(),l("div",{key:p.text,class:"item"},[_(Ae,{item:p},null,8,["item"])])):(a(),l("div",wr,[_(kr,{text:p.text,items:p.items},null,8,["text","items"])]))],64))),128))],8,Pr)],2))}}),Lr=m(Vr,[["__scopeId","data-v-65ef89ca"]]),Sr={key:0,class:"VPNavScreenMenu"},Mr=g({__name:"VPNavScreenMenu",setup(s){const{theme:e}=P();return(t,n)=>i(e).nav?(a(),l("nav",Sr,[(a(!0),l(M,null,A(i(e).nav,o=>(a(),l(M,{key:o.text},["link"in o?(a(),k(cr,{key:0,item:o},null,8,["item"])):(a(),k(Lr,{key:1,text:o.text||"",items:o.items},null,8,["text","items"]))],64))),128))])):f("",!0)}}),Ir=g({__name:"VPNavScreenSocialLinks",setup(s){const{theme:e}=P();return(t,n)=>i(e).socialLinks?(a(),k(ke,{key:0,class:"VPNavScreenSocialLinks",links:i(e).socialLinks},null,8,["links"])):f("",!0)}}),Cr={class:"list"},Tr=g({__name:"VPNavScreenTranslations",setup(s){const{localeLinks:e,currentLang:t}=J({correspondingLink:!0}),n=S(!1);function o(){n.value=!n.value}return(r,d)=>i(e).length&&i(t).label?(a(),l("div",{key:0,class:I(["VPNavScreenTranslations",{open:n.value}])},[c("button",{class:"title",onClick:o},[_(Ne,{class:"icon lang"}),H(" "+L(i(t).label)+" ",1),_(Be,{class:"icon chevron"})]),c("ul",Cr,[(a(!0),l(M,null,A(i(e),p=>(a(),l("li",{key:p.link,class:"item"},[_(z,{class:"link",href:p.link},{default:h(()=>[H(L(p.text),1)]),_:2},1032,["href"])]))),128))])],2)):f("",!0)}}),Br=m(Tr,[["__scopeId","data-v-d72aa483"]]),Nr={class:"container"},Ar=g({__name:"VPNavScreen",props:{open:{type:Boolean}},setup(s){const e=S(null),t=Le(K?document.body:null);return(n,o)=>(a(),k(le,{name:"fade",onEnter:o[0]||(o[0]=r=>t.value=!0),onAfterLeave:o[1]||(o[1]=r=>t.value=!1)},{default:h(()=>[n.open?(a(),l("div",{key:0,class:"VPNavScreen",ref_key:"screen",ref:e,id:"VPNavScreen"},[c("div",Nr,[u(n.$slots,"nav-screen-content-before",{},void 0,!0),_(Mr,{class:"menu"}),_(Br,{class:"translations"}),_(ir,{class:"appearance"}),_(Ir,{class:"social-links"}),u(n.$slots,"nav-screen-content-after",{},void 0,!0)])],512)):f("",!0)]),_:3}))}}),Hr=m(Ar,[["__scopeId","data-v-cc5739dd"]]),zr={key:0,class:"VPNav"},Er=g({__name:"VPNav",setup(s){const{isScreenOpen:e,closeScreen:t,toggleScreen:n}=Kn(),{frontmatter:o}=P(),r=b(()=>o.value.navbar!==!1);return Se("close-screen",t),ee(()=>{K&&document.documentElement.classList.toggle("hide-nav",!r.value)}),(d,p)=>r.value?(a(),l("header",zr,[_(nr,{"is-screen-open":i(e),onToggleScreen:i(n)},{"nav-bar-title-before":h(()=>[u(d.$slots,"nav-bar-title-before",{},void 0,!0)]),"nav-bar-title-after":h(()=>[u(d.$slots,"nav-bar-title-after",{},void 0,!0)]),"nav-bar-content-before":h(()=>[u(d.$slots,"nav-bar-content-before",{},void 0,!0)]),"nav-bar-content-after":h(()=>[u(d.$slots,"nav-bar-content-after",{},void 0,!0)]),_:3},8,["is-screen-open","onToggleScreen"]),_(Hr,{open:i(e)},{"nav-screen-content-before":h(()=>[u(d.$slots,"nav-screen-content-before",{},void 0,!0)]),"nav-screen-content-after":h(()=>[u(d.$slots,"nav-screen-content-after",{},void 0,!0)]),_:3},8,["open"])])):f("",!0)}}),Dr=m(Er,[["__scopeId","data-v-ae24b3ad"]]),Fr=s=>(E("data-v-e31bd47b"),s=s(),D(),s),Or=["role","tabindex"],xr=Fr(()=>c("div",{class:"indicator"},null,-1)),Gr={key:1,class:"items"},Ur=g({__name:"VPSidebarItem",props:{item:{},depth:{}},setup(s){const e=s,{collapsed:t,collapsible:n,isLink:o,isActiveLink:r,hasActiveLink:d,hasChildren:p,toggle:v}=ft(b(()=>e.item)),$=b(()=>p.value?"section":"div"),y=b(()=>o.value?"a":"div"),V=b(()=>p.value?e.depth+2===7?"p":`h${e.depth+2}`:"p"),B=b(()=>o.value?void 0:"button"),C=b(()=>[[`level-${e.depth}`],{collapsible:n.value},{collapsed:t.value},{"is-link":o.value},{"is-active":r.value},{"has-active":d.value}]);function N(w){"key"in w&&w.key!=="Enter"||!e.item.link&&v()}function T(){e.item.link&&v()}return(w,q)=>{const W=j("VPSidebarItem",!0);return a(),k(O($.value),{class:I(["VPSidebarItem",C.value])},{default:h(()=>[w.item.text?(a(),l("div",Z({key:0,class:"item",role:B.value},Ke(w.item.items?{click:N,keydown:N}:{},!0),{tabindex:w.item.items&&0}),[xr,w.item.link?(a(),k(z,{key:0,tag:y.value,class:"link",href:w.item.link,rel:w.item.rel,target:w.item.target},{default:h(()=>[(a(),k(O(V.value),{class:"text",innerHTML:w.item.text},null,8,["innerHTML"]))]),_:1},8,["tag","href","rel","target"])):(a(),k(O(V.value),{key:1,class:"text",innerHTML:w.item.text},null,8,["innerHTML"])),w.item.collapsed!=null?(a(),l("div",{key:2,class:"caret",role:"button","aria-label":"toggle section",onClick:T,onKeydown:Re(T,["enter"]),tabindex:"0"},[_(fe,{class:"caret-icon"})],32)):f("",!0)],16,Or)):f("",!0),w.item.items&&w.item.items.length?(a(),l("div",Gr,[w.depth<5?(a(!0),l(M,{key:0},A(w.item.items,R=>(a(),k(W,{key:R.text,item:R,depth:w.depth+1},null,8,["item","depth"]))),128)):f("",!0)])):f("",!0)]),_:1},8,["class"])}}}),jr=m(Ur,[["__scopeId","data-v-e31bd47b"]]),He=s=>(E("data-v-7f44e717"),s=s(),D(),s),qr=He(()=>c("div",{class:"curtain"},null,-1)),Rr={class:"nav",id:"VPSidebarNav","aria-labelledby":"sidebar-aria-label",tabindex:"-1"},Kr=He(()=>c("span",{class:"visually-hidden",id:"sidebar-aria-label"}," Sidebar Navigation ",-1)),Wr=g({__name:"VPSidebar",props:{open:{type:Boolean}},setup(s){const{sidebarGroups:e,hasSidebar:t}=F(),n=s,o=S(null),r=Le(K?document.body:null);return U([n,o],()=>{var d;n.open?(r.value=!0,(d=o.value)==null||d.focus()):r.value=!1},{immediate:!0,flush:"post"}),(d,p)=>i(t)?(a(),l("aside",{key:0,class:I(["VPSidebar",{open:d.open}]),ref_key:"navEl",ref:o,onClick:p[0]||(p[0]=We(()=>{},["stop"]))},[qr,c("nav",Rr,[Kr,u(d.$slots,"sidebar-nav-before",{},void 0,!0),(a(!0),l(M,null,A(i(e),v=>(a(),l("div",{key:v.text,class:"group"},[_(jr,{item:v,depth:0},null,8,["item"])]))),128)),u(d.$slots,"sidebar-nav-after",{},void 0,!0)])],2)):f("",!0)}}),Yr=m(Wr,[["__scopeId","data-v-7f44e717"]]),Jr=g({__name:"VPSkipLink",setup(s){const e=te(),t=S();U(()=>e.path,()=>t.value.focus());function n({target:o}){const r=document.getElementById(decodeURIComponent(o.hash).slice(1));if(r){const d=()=>{r.removeAttribute("tabindex"),r.removeEventListener("blur",d)};r.setAttribute("tabindex","-1"),r.addEventListener("blur",d),r.focus(),window.scrollTo(0,0)}}return(o,r)=>(a(),l(M,null,[c("span",{ref_key:"backToTop",ref:t,tabindex:"-1"},null,512),c("a",{href:"#VPContent",class:"VPSkipLink visually-hidden",onClick:n}," Skip to content ")],64))}}),Zr=m(Jr,[["__scopeId","data-v-0f60ec36"]]),Qr=g({__name:"Layout",setup(s){const{isOpen:e,open:t,close:n}=F(),o=te();U(()=>o.path,n),_t(e,n);const{frontmatter:r}=P(),d=Ye(),p=b(()=>!!d["home-hero-image"]);return Se("hero-image-slot-exists",p),(v,$)=>{const y=j("Content");return i(r).layout!==!1?(a(),l("div",{key:0,class:I(["Layout",i(r).pageClass])},[u(v.$slots,"layout-top",{},void 0,!0),_(Zr),_(et,{class:"backdrop",show:i(e),onClick:i(n)},null,8,["show","onClick"]),_(Dr,null,{"nav-bar-title-before":h(()=>[u(v.$slots,"nav-bar-title-before",{},void 0,!0)]),"nav-bar-title-after":h(()=>[u(v.$slots,"nav-bar-title-after",{},void 0,!0)]),"nav-bar-content-before":h(()=>[u(v.$slots,"nav-bar-content-before",{},void 0,!0)]),"nav-bar-content-after":h(()=>[u(v.$slots,"nav-bar-content-after",{},void 0,!0)]),"nav-screen-content-before":h(()=>[u(v.$slots,"nav-screen-content-before",{},void 0,!0)]),"nav-screen-content-after":h(()=>[u(v.$slots,"nav-screen-content-after",{},void 0,!0)]),_:3}),_(Rn,{open:i(e),onOpenMenu:i(t)},null,8,["open","onOpenMenu"]),_(Yr,{open:i(e)},{"sidebar-nav-before":h(()=>[u(v.$slots,"sidebar-nav-before",{},void 0,!0)]),"sidebar-nav-after":h(()=>[u(v.$slots,"sidebar-nav-after",{},void 0,!0)]),_:3},8,["open"]),_(wn,null,{"page-top":h(()=>[u(v.$slots,"page-top",{},void 0,!0)]),"page-bottom":h(()=>[u(v.$slots,"page-bottom",{},void 0,!0)]),"not-found":h(()=>[u(v.$slots,"not-found",{},void 0,!0)]),"home-hero-before":h(()=>[u(v.$slots,"home-hero-before",{},void 0,!0)]),"home-hero-info":h(()=>[u(v.$slots,"home-hero-info",{},void 0,!0)]),"home-hero-image":h(()=>[u(v.$slots,"home-hero-image",{},void 0,!0)]),"home-hero-after":h(()=>[u(v.$slots,"home-hero-after",{},void 0,!0)]),"home-features-before":h(()=>[u(v.$slots,"home-features-before",{},void 0,!0)]),"home-features-after":h(()=>[u(v.$slots,"home-features-after",{},void 0,!0)]),"doc-footer-before":h(()=>[u(v.$slots,"doc-footer-before",{},void 0,!0)]),"doc-before":h(()=>[u(v.$slots,"doc-before",{},void 0,!0)]),"doc-after":h(()=>[u(v.$slots,"doc-after",{},void 0,!0)]),"doc-top":h(()=>[u(v.$slots,"doc-top",{},void 0,!0)]),"doc-bottom":h(()=>[u(v.$slots,"doc-bottom",{},void 0,!0)]),"aside-top":h(()=>[u(v.$slots,"aside-top",{},void 0,!0)]),"aside-bottom":h(()=>[u(v.$slots,"aside-bottom",{},void 0,!0)]),"aside-outline-before":h(()=>[u(v.$slots,"aside-outline-before",{},void 0,!0)]),"aside-outline-after":h(()=>[u(v.$slots,"aside-outline-after",{},void 0,!0)]),"aside-ads-before":h(()=>[u(v.$slots,"aside-ads-before",{},void 0,!0)]),"aside-ads-after":h(()=>[u(v.$slots,"aside-ads-after",{},void 0,!0)]),_:3}),_(In),u(v.$slots,"layout-bottom",{},void 0,!0)],2)):(a(),k(y,{key:1}))}}}),Xr=m(Qr,[["__scopeId","data-v-5a346dfe"]]),ti={Layout:Xr,enhanceApp:({app:s})=>{s.component("Badge",Ze)}};export{ti as t}; diff --git a/assets/cleared-window.f143363a.png b/assets/cleared-window.NdJYunVt.png similarity index 100% rename from assets/cleared-window.f143363a.png rename to assets/cleared-window.NdJYunVt.png diff --git a/assets/connect.317eedac.jpg b/assets/connect.tEwLZZd_.jpg similarity index 100% rename from assets/connect.317eedac.jpg rename to assets/connect.tEwLZZd_.jpg diff --git a/assets/corruption.634c88d7.png b/assets/corruption.i47wUppK.png similarity index 100% rename from assets/corruption.634c88d7.png rename to assets/corruption.i47wUppK.png diff --git a/assets/create.2648e6ab.png b/assets/create.PI67xtCk.png similarity index 100% rename from assets/create.2648e6ab.png rename to assets/create.PI67xtCk.png diff --git a/assets/cube-diffuse.989b7825.jpg b/assets/cube-diffuse.GqZGxA7U.jpg similarity index 100% rename from assets/cube-diffuse.989b7825.jpg rename to assets/cube-diffuse.GqZGxA7U.jpg diff --git a/assets/cube-normal.076c62a3.png b/assets/cube-normal.ojipvyDD.png similarity index 100% rename from assets/cube-normal.076c62a3.png rename to assets/cube-normal.ojipvyDD.png diff --git a/assets/cubes-correct.f1d8f512.png b/assets/cubes-correct.kqVZNaXG.png similarity index 100% rename from assets/cubes-correct.f1d8f512.png rename to assets/cubes-correct.kqVZNaXG.png diff --git a/assets/cubes.57cd381f.png b/assets/cubes.N5MdxQg4.png similarity index 100% rename from assets/cubes.57cd381f.png rename to assets/cubes.N5MdxQg4.png diff --git a/assets/debug_material.49f1d418.png b/assets/debug_material.knbjh1ZS.png similarity index 100% rename from assets/debug_material.49f1d418.png rename to assets/debug_material.knbjh1ZS.png diff --git a/assets/debug_navigator.a853a4e1.png b/assets/debug_navigator.H_YJMyMQ.png similarity index 100% rename from assets/debug_navigator.a853a4e1.png rename to assets/debug_navigator.H_YJMyMQ.png diff --git a/assets/depth_problems.9a7af250.png b/assets/depth_problems._syaXayK.png similarity index 100% rename from assets/depth_problems.9a7af250.png rename to assets/depth_problems._syaXayK.png diff --git a/assets/diffuse_right.33406428.png b/assets/diffuse_right.Lc1TfbXm.png similarity index 100% rename from assets/diffuse_right.33406428.png rename to assets/diffuse_right.Lc1TfbXm.png diff --git a/assets/diffuse_wrong.70c1d359.png b/assets/diffuse_wrong.-cOTaPSJ.png similarity index 100% rename from assets/diffuse_wrong.70c1d359.png rename to assets/diffuse_wrong.-cOTaPSJ.png diff --git a/assets/figure_fbm.0a4b2e8c.png b/assets/figure_fbm.vQ-eUzh_.png similarity index 100% rename from assets/figure_fbm.0a4b2e8c.png rename to assets/figure_fbm.vQ-eUzh_.png diff --git a/assets/figure_no-fbm.f932d989.png b/assets/figure_no-fbm.WygYK_Pb.png similarity index 100% rename from assets/figure_no-fbm.f932d989.png rename to assets/figure_no-fbm.WygYK_Pb.png diff --git a/assets/figure_spiky.a86510ba.png b/assets/figure_spiky.4lru4RHQ.png similarity index 100% rename from assets/figure_spiky.a86510ba.png rename to assets/figure_spiky.4lru4RHQ.png diff --git a/assets/figure_work-groups.8aea47c7.jpg b/assets/figure_work-groups.-0RxSLqe.jpg similarity index 100% rename from assets/figure_work-groups.8aea47c7.jpg rename to assets/figure_work-groups.-0RxSLqe.jpg diff --git a/assets/forest.7453fae9.png b/assets/forest.KVLLckoU.png similarity index 100% rename from assets/forest.7453fae9.png rename to assets/forest.KVLLckoU.png diff --git a/assets/forest_fixed.6ec7a22b.png b/assets/forest_fixed._xtODI5M.png similarity index 100% rename from assets/forest_fixed.6ec7a22b.png rename to assets/forest_fixed._xtODI5M.png diff --git a/assets/fps.2666c991.png b/assets/fps.KHtAZo6h.png similarity index 100% rename from assets/fps.2666c991.png rename to assets/fps.KHtAZo6h.png diff --git a/assets/frame.5d75a9c3.jpg b/assets/frame.tKpmLFBx.jpg similarity index 100% rename from assets/frame.5d75a9c3.jpg rename to assets/frame.tKpmLFBx.jpg diff --git a/assets/global_invocation_id.88d705bb.png b/assets/global_invocation_id.WloB-8Kb.png similarity index 100% rename from assets/global_invocation_id.88d705bb.png rename to assets/global_invocation_id.WloB-8Kb.png diff --git a/assets/half_dir.96da210f.png b/assets/half_dir.ayFab_R-.png similarity index 100% rename from assets/half_dir.96da210f.png rename to assets/half_dir.ayFab_R-.png diff --git a/assets/happy-tree-uv-coords.dc08858a.png b/assets/happy-tree-uv-coords.3QtDvIcC.png similarity index 100% rename from assets/happy-tree-uv-coords.dc08858a.png rename to assets/happy-tree-uv-coords.3QtDvIcC.png diff --git a/assets/happy-tree.cf7260cf.png b/assets/happy-tree.tK0WX7Fz.png similarity index 100% rename from assets/happy-tree.cf7260cf.png rename to assets/happy-tree.tK0WX7Fz.png diff --git a/assets/index.md.UCsFBABa.js b/assets/index.md.UCsFBABa.js new file mode 100644 index 000000000..1059713cb --- /dev/null +++ b/assets/index.md.UCsFBABa.js @@ -0,0 +1,11 @@ +import{_ as a,D as r,o as s,c as i,I as t,R as n,a1 as p,a2 as l,a3 as o}from"./chunks/framework.bMtwhlie.js";const P=JSON.parse('{"title":"介绍","description":"","frontmatter":{},"headers":[],"relativePath":"index.md","filePath":"index.md","lastUpdated":1703303099000}'),h={name:"index.md"},g=n('

介绍

为了便于读者的理解,译者选择性的添加了一些内容,并对原文中有歧义或错误的地方进行重新表述。所有的添加与修改均不会做单独标记。

翻译时采用了第一人称视角,故,除了带 🆕 标记的章节,教程中的主要指的是原作者 @sotrh

另外,专有名词在一个段落中第一次出现时做了加粗处理,同一段落里反复出现时就不再加粗。

WebGPU 是啥?

WebGPU 是由 W3C GPU for the Web 社区组所发布的规范,目标是允许网页代码以高性能且安全可靠的方式访问 GPU 功能。它通过借鉴 Vulkan API,并将其转换为宿主硬件上使用的各式 API(如 DirectX、Metal、Vulkan)来实现这一目标。

wgpu 又是啥?

wgpu 是基于 WebGPU API 规范的、跨平台的、安全的、纯 Rust 图形 API。它是 Firefox、Servo 和 Deno 中 WebGPU 整合的核心。

wgpu 不仅可以在 Web 环境运行,还可以在 macOS / iOS、Android、Window 和 Linux 等系统上原生运行。

为什么要使用 Rust?

wgpu 实际上提供了 C 语言绑定 (wgpu-native),你可以写 C/C++ 或其他能与 C 互通的语言来使用它。尽管如此,wgpu 本身是用 Rust 实现的,它便利的 Rust 绑定能减少你使用中的阻碍。更重要的是,Rust 是一门高性能,内存和线程安全且极具生产力的现代底层语言。

在学习本教程之前你需要先熟悉 Rust,因为这里不会详细介绍 Rust 的语法知识。如果对 Rust 还不太熟悉,可以回顾一下 Rust 教程Rust 语言圣经。另外还需要熟悉 Rust 包管理工具 Cargo

为什么要学习 wgpu,直接用 JS/TS 搞 WebGPU 开发不香吗?

从 wgpu 及 dawn 这两个主要的 WebGPU 标准的实现库的开发动向可以看出,大量的扩展特性目前只有在 Native 端(Windows、macOS、Linux、iOS、Android)原生运行才能支持。wgpu 更是将 Native 端运行做为首要目标,WebGPU 是做为最低支持的特性集而存在。

使用 wgpu 在桌面及移动端做跨平台原生应用开发的体验极好,甚至我偏向于认为:WebGPU 更容易在 Native 端得到普及。因为不用受限于 1.0 标准啥时候发布,用户的浏览器是否支持等问题,现在就可以发布采用了 wgpu 的商业应用。

学习 wgpu 还有另一个重要的优势,那就是可以利用各种强大的桌面端 GPU 调试工具。在开发大型 2D/3D 应用时,通过使用命令记录/回放、帧捕捉、Buffer 视图等功能,可以快速定位 GPU 层代码/数据的性能瓶颈和程序缺陷。相较于仅依靠浏览器提供的有限调试能力,这些工具能够事半功倍,帮助开发者更快地解决问题。

wgpu/WebGPU 的学习资料是不是很少?

其实不用纠结于 WebGPU 方面的直接学习资料的多少。

WebGPU 就是一套图形接口,绝大部分概念都是各图形接口里通用的,任何一本经典图形学书籍都是我们的学习资料。 要利用好这些经典资料,前提仅仅就是要先学习一套图形接口。因为图形学的书不是使用统一的特定图形接口所写,先学会一个图形接口及常见的概念,然后再去深入学习某个方面的资料就会事半功倍。

现在学习 wgpu 是不是为时尚早?

WebGPU 1.0 API 已经稳定,Google 已经在 2023/4/6 宣布从 Chrome 113 版本开始正式支持 WebGPU

WebGPU Spec 1.0 补充一下 @Kangz 的话: Web 规范有点滑稽,因为“草案”或“推荐”之类的名称在很大程度上是一个管理细节,实际上对规范是否稳定可用没有任何影响。事实上,W3C 程序建议至少有两个浏览器在规范通过“草案”之前已经发布了兼容的实现,但显然这些浏览器会认为规范相当稳定,然后才愿意发布实现。然而,这确实令开发人员感到困惑,我们对此深表歉意。

如何运行示例代码

本教程的示例代码大部分放在 code/ 目录下,且示例程序的名称与程序目录同名。 比如,第一章 依赖与窗口 所有在的目录是 code/beginner/tutorial1-window, 此示例程序的名称也叫 tutorial1-window:

sh
# 在桌面环境本地运行
+cargo run --bin tutorial3-pipeline
+
+# 在浏览器中运行
+# 需要先安装 Rust WebAssembly target
+rustup target add wasm32-unknown-unknown
+# 使用 WebGPU(需要使用 Chrome/Edge 113+ 或 Chrome/Edge Canary,Canary 需手动开启 WebGPU 试验功能)
+# compute-pipeline, vertex-animation 及 hilbert-curve 示例只能在桌面端与浏览器端 WebGPU 环境运行
+cargo run-wasm --bin vertex-animation
+# 使用 WebGL 2.0
+cargo run-wasm --bin tutorial2-surface --features webgl

调试与集成 部分的代码是 2 个独立的项目: wgpu-in-appbevy-in-app

simuverse 是基于 wgpu + egui 的扩展示例,提供了粒子矢量场,流体场及 GPU 程序化纹理的实现。

如何开启浏览器 WebGPU 功能支持

Chrome

Chrome 113+、Microsoft Edge 113+ 均已经默认开启了 WebGPU 支持。

FireFox

安装 Nightly 版本,在地址栏中输入 about:config , 将 dom.webgpu.enabled 设置为 true: FireFox Nightly

Safari

安装 Safari Technology Preview 185+,从顶部菜单栏选择 开发 -> 功能标志 , 搜索并勾选 WebGPU: Edge Canary

关于译者

我是一名移动端架构师,有多年使用 OpenGL / WebGL, Metal 的实践经验。2018 年开始接触 WebGPU,目前正积极地参与到 wgpu 开源项目的开发与完善之中,并且已于两年前在 AppStore 上架了基于 wgpu 实现的毛笔书法模拟 App 字习 Pro

加入 wgpu 微信学习交流群

',37);function u(b,d,c,k,m,w){const e=r("JoinWeiChatGroup");return s(),i("div",null,[g,t(e)])}const F=a(h,[["render",u]]);export{P as __pageData,F as default}; diff --git a/assets/index.md.UCsFBABa.lean.js b/assets/index.md.UCsFBABa.lean.js new file mode 100644 index 000000000..210e4a777 --- /dev/null +++ b/assets/index.md.UCsFBABa.lean.js @@ -0,0 +1 @@ +import{_ as a,D as r,o as s,c as i,I as t,R as n,a1 as p,a2 as l,a3 as o}from"./chunks/framework.bMtwhlie.js";const P=JSON.parse('{"title":"介绍","description":"","frontmatter":{},"headers":[],"relativePath":"index.md","filePath":"index.md","lastUpdated":1703303099000}'),h={name:"index.md"},g=n("",37);function u(b,d,c,k,m,w){const e=r("JoinWeiChatGroup");return s(),i("div",null,[g,t(e)])}const F=a(h,[["render",u]]);export{P as __pageData,F as default}; diff --git a/assets/index.md.e2543842.js b/assets/index.md.e2543842.js deleted file mode 100644 index f59ad14f9..000000000 --- a/assets/index.md.e2543842.js +++ /dev/null @@ -1,11 +0,0 @@ -import{_ as a,E as r,o as s,c as n,J as t,S as o}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/res/WebGPU-1.0.png",l="/learn-wgpu-zh/res/firefox.png",i="/learn-wgpu-zh/res/chrome.png",c="/learn-wgpu-zh/res/edge.png",P=JSON.parse('{"title":"介绍","description":"","frontmatter":{},"headers":[],"relativePath":"index.md","filePath":"index.md","lastUpdated":1701933923000}'),u={name:"index.md"},h=o('

介绍

为了便于读者的理解,译者选择性的添加了一些内容,并对原文中有歧义或错误的地方进行重新表述。所有的添加与修改均不会做单独标记。

翻译时采用了第一人称视角,故,除了带 🆕 标记的章节,教程中的主要指的是原作者 @sotrh

另外,专有名词在一个段落中第一次出现时做了加粗处理,同一段落里反复出现时就不再加粗。

WebGPU 是啥?

WebGPU 是由 W3C GPU for the Web 社区组所发布的规范,目标是允许网页代码以高性能且安全可靠的方式访问 GPU 功能。它通过借鉴 Vulkan API,并将其转换为宿主硬件上使用的各式 API(如 DirectX、Metal、Vulkan)来实现这一目标。

wgpu 又是啥?

wgpu 是基于 WebGPU API 规范的、跨平台的、安全的、纯 Rust 图形 API。它是 Firefox、Servo 和 Deno 中 WebGPU 整合的核心。

wgpu 不仅可以在 Web 环境运行,还可以在 macOS / iOS、Android、Window 和 Linux 等系统上原生运行。

为什么要使用 Rust?

wgpu 实际上提供了 C 语言绑定 (wgpu-native),你可以写 C/C++ 或其他能与 C 互通的语言来使用它。尽管如此,wgpu 本身是用 Rust 实现的,它便利的 Rust 绑定能减少你使用中的阻碍。更重要的是,Rust 是一门高性能,内存和线程安全且极具生产力的现代底层语言。

在学习本教程之前你需要先熟悉 Rust,因为这里不会详细介绍 Rust 的语法知识。如果对 Rust 还不太熟悉,可以回顾一下 Rust 教程Rust 语言圣经。另外还需要熟悉 Rust 包管理工具 Cargo

为什么要学习 wgpu,直接用 JS/TS 搞 WebGPU 开发不香吗?

从 wgpu 及 dawn 这两个主要的 WebGPU 标准的实现库的开发动向可以看出,大量的扩展特性目前只有在 Native 端(Windows、macOS、Linux、iOS、Android)原生运行才能支持。wgpu 更是将 Native 端运行做为首要目标,WebGPU 是做为最低支持的特性集而存在。

使用 wgpu 在桌面及移动端做跨平台原生应用开发的体验极好,甚至我偏向于认为:WebGPU 更容易在 Native 端得到普及。因为不用受限于 1.0 标准啥时候发布,用户的浏览器是否支持等问题,现在就可以发布采用了 wgpu 的商业应用。

学习 wgpu 还有另一个重要的优势,那就是可以利用各种强大的桌面端 GPU 调试工具。在开发大型 2D/3D 应用时,通过使用命令记录/回放、帧捕捉、Buffer 视图等功能,可以快速定位 GPU 层代码/数据的性能瓶颈和程序缺陷。相较于仅依靠浏览器提供的有限调试能力,这些工具能够事半功倍,帮助开发者更快地解决问题。

wgpu/WebGPU 的学习资料是不是很少?

其实不用纠结于 WebGPU 方面的直接学习资料的多少。

WebGPU 就是一套图形接口,绝大部分概念都是各图形接口里通用的,任何一本经典图形学书籍都是我们的学习资料。 要利用好这些经典资料,前提仅仅就是要先学习一套图形接口。因为图形学的书不是使用统一的特定图形接口所写,先学会一个图形接口及常见的概念,然后再去深入学习某个方面的资料就会事半功倍。

现在学习 wgpu 是不是为时尚早?

WebGPU 1.0 API 已经稳定,Google 已经在 2023/4/6 宣布从 Chrome 113 版本开始正式支持 WebGPU

WebGPU Spec 1.0 补充一下 @Kangz 的话: Web 规范有点滑稽,因为“草案”或“推荐”之类的名称在很大程度上是一个管理细节,实际上对规范是否稳定可用没有任何影响。事实上,W3C 程序建议至少有两个浏览器在规范通过“草案”之前已经发布了兼容的实现,但显然这些浏览器会认为规范相当稳定,然后才愿意发布实现。然而,这确实令开发人员感到困惑,我们对此深表歉意。

如何运行示例代码

本教程的示例代码大部分放在 code/ 目录下,且示例程序的名称与程序目录同名。 比如,第一章 依赖与窗口 所有在的目录是 code/beginner/tutorial1-window, 此示例程序的名称也叫 tutorial1-window:

sh
# 在桌面环境本地运行
-cargo run --bin tutorial3-pipeline
-
-# 在浏览器中运行
-# 需要先安装 Rust WebAssembly target
-rustup target add wasm32-unknown-unknown
-# 使用 WebGPU(需要使用 Chrome/Edge 113+ 或 Chrome/Edge Canary,Canary 需手动开启 WebGPU 试验功能)
-# compute-pipeline, vertex-animation 及 hilbert-curve 示例只能在桌面端与浏览器端 WebGPU 环境运行
-cargo run-wasm --bin vertex-animation
-# 使用 WebGL 2.0
-cargo run-wasm --bin tutorial2-surface --features webgl

调试与集成 部分的代码是 2 个独立的项目: wgpu-in-appbevy-in-app

simuverse 是基于 wgpu + egui 的扩展示例,提供了粒子矢量场,流体场及 GPU 程序化纹理的实现。

如何开启浏览器 WebGPU 试验功能

FireFox

安装 Nightly 版本,在地址栏中输入 about:config , 将 dom.webgpu.enabled 设置为 true: FireFox Nightly

Chrome

Chrome 113+ 已经默认开启了 WebGPU 支持。如果安装的是 Canary 版,在地址栏中输入 chrome://flags , 将 Unsafe WebGPU 设置为 Enabled: Chrome Canary

Microsoft Edge

Microsoft Edge 113+ 也已默认开启了 WebGPU 支持。如果安装的是 Canary 版,在地址栏中输入 edge://flags , 将 Unsafe WebGPU Support 设置为 Enabled: Edge Canary

关于译者

我是一名移动端架构师,有多年使用 OpenGL ES / WebGL, Metal 的实践经验。2018 年开始接触 WebGPU,目前正积极地参与到 wgpu 开源项目的开发与完善之中,并且已于两年前在 AppStore 上架了基于 wgpu 实现的毛笔书法模拟 App 字习 Pro

加入 wgpu 微信学习交流群

',37);function g(b,d,m,f,w,C){const e=r("JoinWeiChatGroup");return s(),n("div",null,[h,t(e)])}const _=a(u,[["render",g]]);export{P as __pageData,_ as default}; diff --git a/assets/index.md.e2543842.lean.js b/assets/index.md.e2543842.lean.js deleted file mode 100644 index bc4a1360a..000000000 --- a/assets/index.md.e2543842.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as a,E as r,o as s,c as n,J as t,S as o}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/res/WebGPU-1.0.png",l="/learn-wgpu-zh/res/firefox.png",i="/learn-wgpu-zh/res/chrome.png",c="/learn-wgpu-zh/res/edge.png",P=JSON.parse('{"title":"介绍","description":"","frontmatter":{},"headers":[],"relativePath":"index.md","filePath":"index.md","lastUpdated":1701933923000}'),u={name:"index.md"},h=o("",37);function g(b,d,m,f,w,C){const e=r("JoinWeiChatGroup");return s(),n("div",null,[h,t(e)])}const _=a(u,[["render",g]]);export{P as __pageData,_ as default}; diff --git a/assets/indexed-pentagon.e22309a1.png b/assets/indexed-pentagon.hkUQ8suQ.png similarity index 100% rename from assets/indexed-pentagon.e22309a1.png rename to assets/indexed-pentagon.hkUQ8suQ.png diff --git a/assets/info.2afe1049.png b/assets/info.zIhwR3lv.png similarity index 100% rename from assets/info.2afe1049.png rename to assets/info.zIhwR3lv.png diff --git a/assets/integration-and-debugging_android_index.md.c7a02422.js b/assets/integration-and-debugging_android_index.md.c7a02422.js deleted file mode 100644 index 61951b8e7..000000000 --- a/assets/integration-and-debugging_android_index.md.c7a02422.js +++ /dev/null @@ -1,209 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/tools.c24f4092.png",o="/learn-wgpu-zh/assets/on_android.fd40f73c.png",d=JSON.parse('{"title":"与 Android App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/android/index.md","filePath":"integration-and-debugging/android/index.md","lastUpdated":1701933923000}'),e={name:"integration-and-debugging/android/index.md"},r=l('

与 Android App 集成

开发环境配置

假设你的电脑上已经安装了 Android Studio,从菜单栏打开 SDK 管理器(Tools > SDK Manager > Android SDK > SDK Tools),勾选以下 3 个选项后点击 OK 按钮确认:

  • Android SDK Build-Tools
  • Android SDK Command-line Tools
  • NDK(Side by side)

然后,设置如下两个系统环境变量:

sh
export ANDROID_SDK_ROOT=$HOME/Library/Android/sdk
-# 注意,此处需要替换为你电脑上安装的 NDK 的版本号
-export NDK_HOME=$ANDROID_SDK_ROOT/ndk/23.1.7779620

添加安卓构建目标支持

到目前为止,Android 模拟器和虚拟设备还不支持 Vulkan 图形 API(仅支持 OpenGL ES),所以开发或调试 wgpu 程序在 Android 系统上的运行时,建议使用真机(各种云测平台的云真机也行)。

如果需要支持模拟器运行,还得加上 x86_64-linux-androidi686-linux-android 这两个构建目标的支持。需要注意的是,如果指定了 wgpu 项目使用 Vulkan 图形后端(Instance::new(wgpu::Backends::VULKAN)),则在模拟内运行时会崩溃:

sh
rustup target add aarch64-linux-android armv7-linux-androideabi

自定义窗口对象

要实现一个 wgpu 里能使用的窗口对象,就必须实现 raw-window-handle 中 raw_window_handle() raw_display_handle() 这两个分别定义在 HasRawWindowHandle HasRawDisplayHandle trait 里的抽象接口。

实现 raw_display_handle() 最为简单, 只需要实例化一个空的 AndroidDisplayHandle 对象做为参数。查看 raw-window-handle 的源码就会发现,实现 raw_window_handle() 抽象接口需要用到 AndroidNdkWindowHandle 对象,此对象有一个叫 a_native_window 的字段,用来指向安卓 App 的 ANativeWindow 实例。 下面我们来一步步实现它。

先给项目添加必要的依赖:

toml
[target.'cfg(target_os = "android")'.dependencies]
-jni = "0.19"
-# 星号表示不锁定特定版本,在项目构建及运行时始终保持使用最新版本
-ndk-sys = "*"
-raw-window-handle = "0.5"

然后定义一个 NativeWindow 结构体,它只有一个叫 a_native_window 的字段:

rust
struct NativeWindow {
-    a_native_window: *mut ndk_sys::ANativeWindow,
-}
-impl NativeWindow {
-    // env 和 surface 都是安卓端传递过来的参数
-    fn new(env: *mut JNIEnv, surface: jobject) -> Self {
-        let a_native_window = unsafe {
-            // 获取与安卓端 surface 对象关联的 ANativeWindow,以便能通过 Rust 与之交互。
-            // 此函数在返回 ANativeWindow 的同时会自动将其引用计数 +1,以防止该对象在安卓端被意外释放。
-            ndk_sys::ANativeWindow_fromSurface(env as *mut _, surface as *mut _)
-        };
-        Self { a_native_window }
-    }
-}

最后给 NativeWindow 实现 raw-window-handle 抽象接口:

rust
unsafe impl HasRawWindowHandle for NativeWindow {
-    fn raw_window_handle(&self) -> RawWindowHandle {
-        let mut handle = AndroidNdkWindowHandle::empty();
-        handle.a_native_window = self.a_native_window as *mut _ as *mut c_void;
-        RawWindowHandle::AndroidNdk(handle)
-    }
-}
-
-unsafe impl HasRawDisplayHandle for NativeWindow {
-    fn raw_display_handle(&self) -> RawDisplayHandle {
-        RawDisplayHandle::Android(AndroidDisplayHandle::empty())
-    }
-}

定义 FFI

Rust 有一个关键字 extern(kotlin 中定义 JNI 函数时也有一个对应的关键字叫 external, 我们接下来会用到),当需要与其他语言编写的代码进行交互时,用于创建和使用外部函数接口(FFI,Foreign Function Interface)。FFI 是一种编程语言定义函数的方式,可以让不同的(外部)编程语言调用这些函数。

在 Rust 这一端,我们通过给公开函数添加 #[no_mangle] 属性来允许安卓端调用此函数:

rust
#[no_mangle]
-#[jni_fn("name.jinleili.wgpu.RustBridge")]
-pub fn createWgpuCanvas(env: *mut JNIEnv, _: JClass, surface: jobject, idx: jint) -> jlong {
-    android_logger::init_once(Config::default().with_min_level(Level::Trace));
-    let canvas = WgpuCanvas::new(AppSurface::new(env as *mut _, surface), idx as i32);
-    info!("WgpuCanvas created!");
-    // 使用 Box 对 Rust 对象进行装箱操作。
-    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
-    // into_raw 返回指针的同时,也将此对象的内存管理权转交给调用方
-    Box::into_raw(Box::new(canvas)) as jlong
-}
-
-#[no_mangle]
-#[jni_fn("name.jinleili.wgpu.RustBridge")]
-pub fn enterFrame(_env: *mut JNIEnv, _: JClass, obj: jlong) {
-    // 直接获取到指针指代的 Rust 对象的可变借用
-    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
-    obj.enter_frame();
-}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

#[jni_fn("XXX")] 这个函数签名属性需要重点介绍一下,做过安卓 JNI 开发的都知道,JNI 函数的签名是又臭又长,比如上面的 createWgpuCanvas 函数,手写符合 JNI 规范的函数签名就会是 Java_name_jinleili_wgpu_RustBridge_createWgpuCanvas 这样,难写且难维护 #[jni_fn("name.jinleili.wgpu.RustBridge")] 这个属性能自动帮我们生成兼容 JNI 的函数签名,使正确编写函数签名变得更加容易。为此,我们需要 jni_fn 依赖项:

toml
[target.'cfg(target_os = "android")'.dependencies]
-jni_fn = "0.1"
-# 其它依赖项

在安卓端,我们定义一个命名空间为 name.jinleili.wgpuRustBridge 类来加载 Rust 程序,并使用 external 关键字标记好具体实现在 Rust 端的外部函数声明:

kotlin
package name.jinleili.wgpu
-
-import android.view.Surface
-
-class RustBridge {
-    init {
-        System.loadLibrary("wgpu_on_app")
-    }
-
-    external fun createWgpuCanvas(surface: Surface, idx: Int): Long
-    external fun enterFrame(rustObj: Long)
-    // ...
-}

你可以使用任意符合安卓规范的命名空间,只需要记得让 Rust 端 #[jni_fn("")] 属性里的字符串与安卓端代码里的命名空间一致。

实现 cargo so 子命令

实现 so 子命令的目的是为了一劳永逸地解决 Rust 项目配置 Android NDK 链接的问题。如果你对如何给 wgpu 项目手动配置 NDK 感兴趣,Mozilla 的这篇文章有详细的步骤。 so 子命令的代码非常简单,而且我已经将它发布到了 Rust 的包注册网站 crates.io, 可以直接安装使用:

rust
let args = std::env::args();
-match Subcommand::new(args, "so", |_, _| Ok(false)) {
-    Ok(cmd) => match cmd.cmd() {
-        "build" | "b" => {
-            let ndk = Ndk::from_env().unwrap();
-            let build_targets = if let Some(target) = cmd.target() {
-                vec![Target::from_rust_triple(target).ok().unwrap()]
-            } else {
-                vec![
-                    Target::Arm64V8a,
-                    Target::ArmV7a,
-                    Target::X86,
-                    Target::X86_64,
-                ]
-            };
-            for target in build_targets {
-                let triple = target.rust_triple();
-                // setting ar, linker value
-                let mut cargo = cargo_ndk(&ndk, target, 24).unwrap();
-                cargo.arg("rustc");
-                if cmd.target().is_none() {
-                    cargo.arg("--target").arg(triple);
-                }
-                cargo.args(cmd.args());
-                if ndk.build_tag() > 7272597 {
-                    if !cmd.args().contains(&"--".to_owned()) {
-                        cargo.arg("--");
-                    }
-                    let gcc_link_dir = cmd.target_dir().join("gcc-temp-extra-link-libraries");
-                    let _ = std::fs::create_dir_all(&gcc_link_dir);
-                    std::fs::write(gcc_link_dir.join("libgcc.a"), "INPUT(-lunwind)")
-                        .expect("Failed to write");
-                    cargo.arg("-L").arg(gcc_link_dir);
-                }
-
-                if !cargo.status().unwrap().success() {
-                    println!("{:?}", NdkError::CmdFailed(cargo));
-                }
-            }
-        }
-        _ => {}
-    },
-    Err(_) => {}
-};

编译为 .so 库文件

首先,安装我们上面实现 so 子命令:

sh
# 从 crates.io 安装
-cargo install cargo-so
-# 或者
-# 也可以从源码安装
-cargo install --path ./cargo-so

然后,使用 so 子命令来构建 wgpu 项目:

sh
# 将 wgpu 程序构建为 Android .so 库文件
-cargo so b --lib --target aarch64-linux-android --release
-cargo so b --lib --target armv7-linux-androideabi --release
-
-# 将 .so 复制到安卓项目的 jniLibs/ 目录下
-cp target/aarch64-linux-android/release/libwgpu_on_app.so android/app/libs/arm64-v8a/libwgpu_on_app.so
-cp target/armv7-linux-androideabi/release/libwgpu_on_app.so android/app/libs/armeabi-v7a/libwgpu_on_app.so

我们还可以上面的构建与复制命令放进一个 .sh 命令行文件内,之后编译项目时只需要执行此命令行文件就可以了:

sh
sh ./release.sh

自定义 WGPUSurfaceView

安卓视图组件 SurfaceView 提供了一个可嵌入在视图层级结构中的专用于绘制的视图。它负责绘制表面(Surface)在屏幕上的正确位置,还控制着绘制表面的像素格式及分辨率大小。 SurfaceView 持有的绘制表面是独立于 App 窗口的,可以在单独的线程中进行绘制而不占用主线程资源。所以使用 SurfaceView 可以实现复杂而高效的渲染(比如,游戏、视频播放、相机预览等),且不会阻塞用户交互(触摸、键盘输入等)的响应。

安卓系统上的绘制表面是纵深排序(Z-Ordered)的,它默认处在 App 窗口的后面, SurfaceView 通过在 App 窗口上面设置透明区域来展示处在后面的绘制表面。 如果想将绘制表面放置到窗口的最上层,可以通过 setZOrderOnTop() 函数来实现:

kotlin
mySurfaceView.setZOrderOnTop(true)

这里有必要多解释一句:wgpu 里的 Surface 对象虽然最终指向的就是 SurfaceView 持有的绘制表面,但它是一个经过统一封装的结构体,所以并不是同一个对象:

rust
pub struct Surface {
-    pub(crate) presentation: Option<Presentation>,
-    #[cfg(vulkan)]
-    pub vulkan: Option<HalSurface<hal::api::Vulkan>>,
-    #[cfg(metal)]
-    pub metal: Option<HalSurface<hal::api::Metal>>,
-    #[cfg(dx12)]
-    pub dx12: Option<HalSurface<hal::api::Dx12>>,
-    #[cfg(dx11)]
-    pub dx11: Option<HalSurface<hal::api::Dx11>>,
-    #[cfg(gl)]
-    pub gl: Option<HalSurface<hal::api::Gles>>,
-}

窗口的视图层级结构决定了与绘制表面的正确合成,也就是说,绘制表面的展示会受到视图层级关系的影响,在 SurfaceView 所处层级之上的视图会覆盖(遮挡)在合成后的绘制表面之上。 需要注意的是,如果覆盖内容存在透明度,则每次绘制表面渲染完成后,都会进行一次完整的 alpha 混合合成,这会对性能产生不利影响。

我们只能通过 SurfaceHolder 接口来访问绘制表面。当 SurfaceView 在窗口中可见时,绘制表面就会被创建,而不可见时(比如,App 被切换到后台运行)绘制表面会被销毁,所以需要实现 SurfaceHolder 的回调接口 surfaceCreatedsurfaceDestroyed 来发现绘制表面的创建和销毁。 下边的代码实现了一个继承自 SurfaceViewWGPUSurfaceView

kotlin
// 为当前类实现 SurfaceHolder 的回调接口
-class WGPUSurfaceView : SurfaceView, SurfaceHolder.Callback2 {
-    private var rustBrige = RustBridge()
-    // Rust 对象的指针
-    private var wgpuObj: Long = Long.MAX_VALUE
-    private var idx: Int = 0
-
-    //...
-
-    init {
-        // 将当前类设置为 SurfaceHolder 的回调接口代理
-        holder.addCallback(this)
-    }
-
-    // 绘制表面被创建后,创建/重新创建 wgpu 对象
-    override fun surfaceCreated(holder: SurfaceHolder) {
-        holder.let { h ->
-            wgpuObj = rustBrige.createWgpuCanvas(h.surface, this.idx)
-            // SurfaceView 默认不会自动开始绘制,setWillNotDraw(false) 用于通知 App 已经准备好开始绘制了。
-            setWillNotDraw(false)
-        }
-    }
-
-    // 绘制表面被销毁后,也销毁 wgpu 对象
-    override fun surfaceDestroyed(holder: SurfaceHolder) {
-        if (wgpuObj != Long.MAX_VALUE) {
-            rustBrige.dropWgpuCanvas(wgpuObj)
-            wgpuObj = Long.MAX_VALUE
-        }
-    }
-
-    override fun draw(canvas: Canvas?) {
-        super.draw(canvas)
-        // 考虑到边界情况,这个条件判断不能省略
-        if (wgpuObj == Long.MAX_VALUE) {
-            return
-        }
-        rustBrige.enterFrame(wgpuObj)
-        // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数 
-        invalidate()
-    }
-}

App 中加载 WGPUSurfaceView

现在可以在 Activity 或 Fragment(此处仅指安卓 Fragment,与着色器里的片元无关)里加载 WGPUSurfaceView 实例了,通过 XML 或者 Java/Kotlin 代码来加载很常见,下面我们来看看在安卓上的新一代 UI 开发框架 Jetpack Compose 中如何加载:

kotlin
class MainActivity : ComponentActivity() {
-    override fun onCreate(savedInstanceState: Bundle?) {
-        super.onCreate(savedInstanceState)
-
-        setContent {
-            MyApplicationTheme {
-                Surface(
-                    modifier = Modifier.fillMaxSize(),
-                    color = colorResource(id = R.color.white)
-                ) {
-                    SurfaceCard()
-                }
-            }
-        }
-    }
-}
-
-@Composable
-fun SurfaceCard() {
-    val screenWidth = LocalConfiguration.current.screenWidthDp.dp
-    Column(modifier = Modifier.fillMaxSize()) {
-        Row(
-            verticalAlignment = Alignment.CenterVertically,
-            horizontalArrangement = Arrangement.Center
-        ) {
-            Text(text = "wgpu on Android", fontSize = 20.sp, fontWeight = FontWeight.Bold)
-        }
-        // ...
-
-        // 通过 AndroidView 容器来加载我们的 WGPUSurfaceView
-        AndroidView(
-            factory = { ctx ->
-                WGPUSurfaceView(context = ctx)
-            },
-            modifier = Modifier
-                .fillMaxWidth()
-                .height(screenWidth),
-        )
-    }
-}

基于以上代码,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

',56),c=[r];function t(D,y,C,A,F,i){return n(),a("div",null,c)}const b=s(e,[["render",t]]);export{d as __pageData,b as default}; diff --git a/assets/integration-and-debugging_android_index.md.c7a02422.lean.js b/assets/integration-and-debugging_android_index.md.c7a02422.lean.js deleted file mode 100644 index ae2a8e8b9..000000000 --- a/assets/integration-and-debugging_android_index.md.c7a02422.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/tools.c24f4092.png",o="/learn-wgpu-zh/assets/on_android.fd40f73c.png",d=JSON.parse('{"title":"与 Android App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/android/index.md","filePath":"integration-and-debugging/android/index.md","lastUpdated":1701933923000}'),e={name:"integration-and-debugging/android/index.md"},r=l("",56),c=[r];function t(D,y,C,A,F,i){return n(),a("div",null,c)}const b=s(e,[["render",t]]);export{d as __pageData,b as default}; diff --git a/assets/integration-and-debugging_android_index.md.t6gE4WOH.js b/assets/integration-and-debugging_android_index.md.t6gE4WOH.js new file mode 100644 index 000000000..a71022ef3 --- /dev/null +++ b/assets/integration-and-debugging_android_index.md.t6gE4WOH.js @@ -0,0 +1,209 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const l="/learn-wgpu-zh/assets/tools.Eg3x9gRT.png",p="/learn-wgpu-zh/assets/on_android.ZlmWizZN.png",o=JSON.parse('{"title":"与 Android App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/android/index.md","filePath":"integration-and-debugging/android/index.md","lastUpdated":1703303099000}'),h={name:"integration-and-debugging/android/index.md"},k=n('

与 Android App 集成

开发环境配置

假设你的电脑上已经安装了 Android Studio,从菜单栏打开 SDK 管理器(Tools > SDK Manager > Android SDK > SDK Tools),勾选以下 3 个选项后点击 OK 按钮确认:

  • Android SDK Build-Tools
  • Android SDK Command-line Tools
  • NDK(Side by side)

然后,设置如下两个系统环境变量:

sh
export ANDROID_SDK_ROOT=$HOME/Library/Android/sdk
+# 注意,此处需要替换为你电脑上安装的 NDK 的版本号
+export NDK_HOME=$ANDROID_SDK_ROOT/ndk/23.1.7779620

添加安卓构建目标支持

到目前为止,Android 模拟器和虚拟设备还不支持 Vulkan 图形 API(仅支持 OpenGL ES),所以开发或调试 wgpu 程序在 Android 系统上的运行时,建议使用真机(各种云测平台的云真机也行)。

如果需要支持模拟器运行,还得加上 x86_64-linux-androidi686-linux-android 这两个构建目标的支持。需要注意的是,如果指定了 wgpu 项目使用 Vulkan 图形后端(Instance::new(wgpu::Backends::VULKAN)),则在模拟内运行时会崩溃:

sh
rustup target add aarch64-linux-android armv7-linux-androideabi

自定义窗口对象

要实现一个 wgpu 里能使用的窗口对象,就必须实现 raw-window-handle 中 raw_window_handle() raw_display_handle() 这两个分别定义在 HasRawWindowHandle HasRawDisplayHandle trait 里的抽象接口。

实现 raw_display_handle() 最为简单, 只需要实例化一个空的 AndroidDisplayHandle 对象做为参数。查看 raw-window-handle 的源码就会发现,实现 raw_window_handle() 抽象接口需要用到 AndroidNdkWindowHandle 对象,此对象有一个叫 a_native_window 的字段,用来指向安卓 App 的 ANativeWindow 实例。 下面我们来一步步实现它。

先给项目添加必要的依赖:

toml
[target.'cfg(target_os = "android")'.dependencies]
+jni = "0.19"
+# 星号表示不锁定特定版本,在项目构建及运行时始终保持使用最新版本
+ndk-sys = "*"
+raw-window-handle = "0.5"

然后定义一个 NativeWindow 结构体,它只有一个叫 a_native_window 的字段:

rust
struct NativeWindow {
+    a_native_window: *mut ndk_sys::ANativeWindow,
+}
+impl NativeWindow {
+    // env 和 surface 都是安卓端传递过来的参数
+    fn new(env: *mut JNIEnv, surface: jobject) -> Self {
+        let a_native_window = unsafe {
+            // 获取与安卓端 surface 对象关联的 ANativeWindow,以便能通过 Rust 与之交互。
+            // 此函数在返回 ANativeWindow 的同时会自动将其引用计数 +1,以防止该对象在安卓端被意外释放。
+            ndk_sys::ANativeWindow_fromSurface(env as *mut _, surface as *mut _)
+        };
+        Self { a_native_window }
+    }
+}

最后给 NativeWindow 实现 raw-window-handle 抽象接口:

rust
unsafe impl HasRawWindowHandle for NativeWindow {
+    fn raw_window_handle(&self) -> RawWindowHandle {
+        let mut handle = AndroidNdkWindowHandle::empty();
+        handle.a_native_window = self.a_native_window as *mut _ as *mut c_void;
+        RawWindowHandle::AndroidNdk(handle)
+    }
+}
+
+unsafe impl HasRawDisplayHandle for NativeWindow {
+    fn raw_display_handle(&self) -> RawDisplayHandle {
+        RawDisplayHandle::Android(AndroidDisplayHandle::empty())
+    }
+}

定义 FFI

Rust 有一个关键字 extern(kotlin 中定义 JNI 函数时也有一个对应的关键字叫 external, 我们接下来会用到),当需要与其他语言编写的代码进行交互时,用于创建和使用外部函数接口(FFI,Foreign Function Interface)。FFI 是一种编程语言定义函数的方式,可以让不同的(外部)编程语言调用这些函数。

在 Rust 这一端,我们通过给公开函数添加 #[no_mangle] 属性来允许安卓端调用此函数:

rust
#[no_mangle]
+#[jni_fn("name.jinleili.wgpu.RustBridge")]
+pub fn createWgpuCanvas(env: *mut JNIEnv, _: JClass, surface: jobject, idx: jint) -> jlong {
+    android_logger::init_once(Config::default().with_min_level(Level::Trace));
+    let canvas = WgpuCanvas::new(AppSurface::new(env as *mut _, surface), idx as i32);
+    info!("WgpuCanvas created!");
+    // 使用 Box 对 Rust 对象进行装箱操作。
+    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
+    // into_raw 返回指针的同时,也将此对象的内存管理权转交给调用方
+    Box::into_raw(Box::new(canvas)) as jlong
+}
+
+#[no_mangle]
+#[jni_fn("name.jinleili.wgpu.RustBridge")]
+pub fn enterFrame(_env: *mut JNIEnv, _: JClass, obj: jlong) {
+    // 直接获取到指针指代的 Rust 对象的可变借用
+    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
+    obj.enter_frame();
+}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

#[jni_fn("XXX")] 这个函数签名属性需要重点介绍一下,做过安卓 JNI 开发的都知道,JNI 函数的签名是又臭又长,比如上面的 createWgpuCanvas 函数,手写符合 JNI 规范的函数签名就会是 Java_name_jinleili_wgpu_RustBridge_createWgpuCanvas 这样,难写且难维护 #[jni_fn("name.jinleili.wgpu.RustBridge")] 这个属性能自动帮我们生成兼容 JNI 的函数签名,使正确编写函数签名变得更加容易。为此,我们需要 jni_fn 依赖项:

toml
[target.'cfg(target_os = "android")'.dependencies]
+jni_fn = "0.1"
+# 其它依赖项

在安卓端,我们定义一个命名空间为 name.jinleili.wgpuRustBridge 类来加载 Rust 程序,并使用 external 关键字标记好具体实现在 Rust 端的外部函数声明:

kotlin
package name.jinleili.wgpu
+
+import android.view.Surface
+
+class RustBridge {
+    init {
+        System.loadLibrary("wgpu_on_app")
+    }
+
+    external fun createWgpuCanvas(surface: Surface, idx: Int): Long
+    external fun enterFrame(rustObj: Long)
+    // ...
+}

你可以使用任意符合安卓规范的命名空间,只需要记得让 Rust 端 #[jni_fn("")] 属性里的字符串与安卓端代码里的命名空间一致。

实现 cargo so 子命令

实现 so 子命令的目的是为了一劳永逸地解决 Rust 项目配置 Android NDK 链接的问题。如果你对如何给 wgpu 项目手动配置 NDK 感兴趣,Mozilla 的这篇文章有详细的步骤。 so 子命令的代码非常简单,而且我已经将它发布到了 Rust 的包注册网站 crates.io, 可以直接安装使用:

rust
let args = std::env::args();
+match Subcommand::new(args, "so", |_, _| Ok(false)) {
+    Ok(cmd) => match cmd.cmd() {
+        "build" | "b" => {
+            let ndk = Ndk::from_env().unwrap();
+            let build_targets = if let Some(target) = cmd.target() {
+                vec![Target::from_rust_triple(target).ok().unwrap()]
+            } else {
+                vec![
+                    Target::Arm64V8a,
+                    Target::ArmV7a,
+                    Target::X86,
+                    Target::X86_64,
+                ]
+            };
+            for target in build_targets {
+                let triple = target.rust_triple();
+                // setting ar, linker value
+                let mut cargo = cargo_ndk(&ndk, target, 24).unwrap();
+                cargo.arg("rustc");
+                if cmd.target().is_none() {
+                    cargo.arg("--target").arg(triple);
+                }
+                cargo.args(cmd.args());
+                if ndk.build_tag() > 7272597 {
+                    if !cmd.args().contains(&"--".to_owned()) {
+                        cargo.arg("--");
+                    }
+                    let gcc_link_dir = cmd.target_dir().join("gcc-temp-extra-link-libraries");
+                    let _ = std::fs::create_dir_all(&gcc_link_dir);
+                    std::fs::write(gcc_link_dir.join("libgcc.a"), "INPUT(-lunwind)")
+                        .expect("Failed to write");
+                    cargo.arg("-L").arg(gcc_link_dir);
+                }
+
+                if !cargo.status().unwrap().success() {
+                    println!("{:?}", NdkError::CmdFailed(cargo));
+                }
+            }
+        }
+        _ => {}
+    },
+    Err(_) => {}
+};

编译为 .so 库文件

首先,安装我们上面实现 so 子命令:

sh
# 从 crates.io 安装
+cargo install cargo-so
+# 或者
+# 也可以从源码安装
+cargo install --path ./cargo-so

然后,使用 so 子命令来构建 wgpu 项目:

sh
# 将 wgpu 程序构建为 Android .so 库文件
+cargo so b --lib --target aarch64-linux-android --release
+cargo so b --lib --target armv7-linux-androideabi --release
+
+# 将 .so 复制到安卓项目的 jniLibs/ 目录下
+cp target/aarch64-linux-android/release/libwgpu_on_app.so android/app/libs/arm64-v8a/libwgpu_on_app.so
+cp target/armv7-linux-androideabi/release/libwgpu_on_app.so android/app/libs/armeabi-v7a/libwgpu_on_app.so

我们还可以上面的构建与复制命令放进一个 .sh 命令行文件内,之后编译项目时只需要执行此命令行文件就可以了:

sh
sh ./release.sh

自定义 WGPUSurfaceView

安卓视图组件 SurfaceView 提供了一个可嵌入在视图层级结构中的专用于绘制的视图。它负责绘制表面(Surface)在屏幕上的正确位置,还控制着绘制表面的像素格式及分辨率大小。 SurfaceView 持有的绘制表面是独立于 App 窗口的,可以在单独的线程中进行绘制而不占用主线程资源。所以使用 SurfaceView 可以实现复杂而高效的渲染(比如,游戏、视频播放、相机预览等),且不会阻塞用户交互(触摸、键盘输入等)的响应。

安卓系统上的绘制表面是纵深排序(Z-Ordered)的,它默认处在 App 窗口的后面, SurfaceView 通过在 App 窗口上面设置透明区域来展示处在后面的绘制表面。 如果想将绘制表面放置到窗口的最上层,可以通过 setZOrderOnTop() 函数来实现:

kotlin
mySurfaceView.setZOrderOnTop(true)

这里有必要多解释一句:wgpu 里的 Surface 对象虽然最终指向的就是 SurfaceView 持有的绘制表面,但它是一个经过统一封装的结构体,所以并不是同一个对象:

rust
pub struct Surface {
+    pub(crate) presentation: Option<Presentation>,
+    #[cfg(vulkan)]
+    pub vulkan: Option<HalSurface<hal::api::Vulkan>>,
+    #[cfg(metal)]
+    pub metal: Option<HalSurface<hal::api::Metal>>,
+    #[cfg(dx12)]
+    pub dx12: Option<HalSurface<hal::api::Dx12>>,
+    #[cfg(dx11)]
+    pub dx11: Option<HalSurface<hal::api::Dx11>>,
+    #[cfg(gl)]
+    pub gl: Option<HalSurface<hal::api::Gles>>,
+}

窗口的视图层级结构决定了与绘制表面的正确合成,也就是说,绘制表面的展示会受到视图层级关系的影响,在 SurfaceView 所处层级之上的视图会覆盖(遮挡)在合成后的绘制表面之上。 需要注意的是,如果覆盖内容存在透明度,则每次绘制表面渲染完成后,都会进行一次完整的 alpha 混合合成,这会对性能产生不利影响。

我们只能通过 SurfaceHolder 接口来访问绘制表面。当 SurfaceView 在窗口中可见时,绘制表面就会被创建,而不可见时(比如,App 被切换到后台运行)绘制表面会被销毁,所以需要实现 SurfaceHolder 的回调接口 surfaceCreatedsurfaceDestroyed 来发现绘制表面的创建和销毁。 下边的代码实现了一个继承自 SurfaceViewWGPUSurfaceView

kotlin
// 为当前类实现 SurfaceHolder 的回调接口
+class WGPUSurfaceView : SurfaceView, SurfaceHolder.Callback2 {
+    private var rustBrige = RustBridge()
+    // Rust 对象的指针
+    private var wgpuObj: Long = Long.MAX_VALUE
+    private var idx: Int = 0
+
+    //...
+
+    init {
+        // 将当前类设置为 SurfaceHolder 的回调接口代理
+        holder.addCallback(this)
+    }
+
+    // 绘制表面被创建后,创建/重新创建 wgpu 对象
+    override fun surfaceCreated(holder: SurfaceHolder) {
+        holder.let { h ->
+            wgpuObj = rustBrige.createWgpuCanvas(h.surface, this.idx)
+            // SurfaceView 默认不会自动开始绘制,setWillNotDraw(false) 用于通知 App 已经准备好开始绘制了。
+            setWillNotDraw(false)
+        }
+    }
+
+    // 绘制表面被销毁后,也销毁 wgpu 对象
+    override fun surfaceDestroyed(holder: SurfaceHolder) {
+        if (wgpuObj != Long.MAX_VALUE) {
+            rustBrige.dropWgpuCanvas(wgpuObj)
+            wgpuObj = Long.MAX_VALUE
+        }
+    }
+
+    override fun draw(canvas: Canvas?) {
+        super.draw(canvas)
+        // 考虑到边界情况,这个条件判断不能省略
+        if (wgpuObj == Long.MAX_VALUE) {
+            return
+        }
+        rustBrige.enterFrame(wgpuObj)
+        // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数 
+        invalidate()
+    }
+}

App 中加载 WGPUSurfaceView

现在可以在 Activity 或 Fragment(此处仅指安卓 Fragment,与着色器里的片元无关)里加载 WGPUSurfaceView 实例了,通过 XML 或者 Java/Kotlin 代码来加载很常见,下面我们来看看在安卓上的新一代 UI 开发框架 Jetpack Compose 中如何加载:

kotlin
class MainActivity : ComponentActivity() {
+    override fun onCreate(savedInstanceState: Bundle?) {
+        super.onCreate(savedInstanceState)
+
+        setContent {
+            MyApplicationTheme {
+                Surface(
+                    modifier = Modifier.fillMaxSize(),
+                    color = colorResource(id = R.color.white)
+                ) {
+                    SurfaceCard()
+                }
+            }
+        }
+    }
+}
+
+@Composable
+fun SurfaceCard() {
+    val screenWidth = LocalConfiguration.current.screenWidthDp.dp
+    Column(modifier = Modifier.fillMaxSize()) {
+        Row(
+            verticalAlignment = Alignment.CenterVertically,
+            horizontalArrangement = Arrangement.Center
+        ) {
+            Text(text = "wgpu on Android", fontSize = 20.sp, fontWeight = FontWeight.Bold)
+        }
+        // ...
+
+        // 通过 AndroidView 容器来加载我们的 WGPUSurfaceView
+        AndroidView(
+            factory = { ctx ->
+                WGPUSurfaceView(context = ctx)
+            },
+            modifier = Modifier
+                .fillMaxWidth()
+                .height(screenWidth),
+        )
+    }
+}

基于以上代码,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

',56),e=[k];function t(r,d,E,g,c,y){return i(),a("div",null,e)}const u=s(h,[["render",t]]);export{o as __pageData,u as default}; diff --git a/assets/integration-and-debugging_android_index.md.t6gE4WOH.lean.js b/assets/integration-and-debugging_android_index.md.t6gE4WOH.lean.js new file mode 100644 index 000000000..b34224e3f --- /dev/null +++ b/assets/integration-and-debugging_android_index.md.t6gE4WOH.lean.js @@ -0,0 +1 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const l="/learn-wgpu-zh/assets/tools.Eg3x9gRT.png",p="/learn-wgpu-zh/assets/on_android.ZlmWizZN.png",o=JSON.parse('{"title":"与 Android App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/android/index.md","filePath":"integration-and-debugging/android/index.md","lastUpdated":1703303099000}'),h={name:"integration-and-debugging/android/index.md"},k=n("",56),e=[k];function t(r,d,E,g,c,y){return i(),a("div",null,e)}const u=s(h,[["render",t]]);export{o as __pageData,u as default}; diff --git a/assets/integration-and-debugging_bevy_index.md.7becd25a.js b/assets/integration-and-debugging_bevy_index.md.7becd25a.js deleted file mode 100644 index b60a5f473..000000000 --- a/assets/integration-and-debugging_bevy_index.md.7becd25a.js +++ /dev/null @@ -1,262 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/bevy_in_android.6d8b91d5.png",o="/learn-wgpu-zh/assets/bevy_in_ios.7ff2933b.png",d=JSON.parse('{"title":"在 iOS Android App 中集成 Bevy 游戏引擎","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/bevy/index.md","filePath":"integration-and-debugging/bevy/index.md","lastUpdated":1701933923000}'),e={name:"integration-and-debugging/bevy/index.md"},r=l(`

在 iOS Android App 中集成 Bevy 游戏引擎

认识 Bevy

Bevy 是一个开源、跨平台的 Rust 游戏引擎,设计目的是提供一个简单、高效且易于使用的游戏开发框架。它的特点包括:

  • 模块化设计:游戏引擎的各个组件皆为单独的模块,方便选择需要的组件并扩展。
  • 灵活的插件系统:支持自定义插件,可以创建并集成自己的插件。
  • 易于使用的 API:API 简单易懂,帮助快速开始游戏开发。
  • 强大的渲染系统:使用了 wgpu 作为渲染后端,以提供强大的图形渲染能力。
  • 跨平台:除了支持在 Windows、MacOS 和 Linux 桌面系统及 iOS、Android 移动设备上运行,还能在支持 WebGPU 的浏览器上运行。

Bevy 是一个适合新手的游戏引擎,其简单性和灵活性使得能够轻松地开始游戏开发,随着经验的增加,它也能满足更高级的定制需求。

需求场景

如果需要给已有的 App 添加一个开屏小游戏,或者实现一些动态 UI 组件、图表...,又或者只是想充分利用手机上的 Motion Sensors 来实现令人惊艳的游戏体验,那么就不能使用 Bevy 默认的 WinitPlugin 了。因为 winit 会完全控制 App 的初始化过程和窗口,而我们需要的是在已有的 App 实例中创建 bevy::App, 并且我们可能还希望 bevy::App 能在任意大小的 iOS UIView 或 Android SurfaceView 中运行。

本章我们将逐步实现一个此类场景,并且利用手机的 Motion Sensor 来玩 breakout 小游戏。

Bevy 中的窗口插件

Bevy 中有两个窗口插件:WindowPluginWinitPlugin

WindowPlugin 是 Bevy 中构建游戏或应用程序的基础插件, 它提供了一套简单易用的接口来管理窗口的属性(标题、是否可见...)、行为(获取焦点,缩放约束,窗口显示层级...)、与事件(鼠标、键盘、触摸事件...)。WindowPlugin 并不负责实际的窗口创建,它需要与其他插件配合使用

WinitPlugin 为 Bevy 提供实际的窗口及事件循环的创建与管理。顾名思义,它依赖了 winit 窗口管理库。

下面通过源码来看看 WinitPlugin 是如何完成工作的:

rust
// crates/bevy_winit/src/lib.rs
-
-#[derive(Default)]
-pub struct WinitPlugin;
-
-impl Plugin for WinitPlugin {
-    fn build(&self, app: &mut App) {
-        let mut event_loop_builder = EventLoopBuilder::<()>::with_user_event();
-        let event_loop = event_loop_builder.build();
-        app.insert_non_send_resource(event_loop);
-
-        app.init_non_send_resource::<WinitWindows>()
-            .init_resource::<WinitSettings>()
-            .set_runner(winit_runner)
-            .add_systems(...);
-        // ...
-    }
-}

WinitPlugin 是个实现了 Plugin trait 的空结构体,在 build 函数内向 app World 内添分别加了 EventLoopWinitWindowsWinitSettings 3 项资源,WinitWindows 用于随后创建并保存窗口实例:

rust
// crates/bevy_winit/src/winit_windows.rs
-
-#[derive(Debug, Default)]
-pub struct WinitWindows {
-    pub windows: HashMap<winit::window::WindowId, winit::window::Window>,
-    // ...
-}

然后, 设置了一个叫 winit_runner 的 runner 函数,这个函数在用户调用 app.run() 时会被自动执行。

winit_runner() 内调用了 create_window() 来完成最终的窗口创建:

rust
// crates/bevy_winit/src/lib.rs
-
-pub fn winit_runner(mut app: App) {
-    // 取出在 build() 中创建的事件循环实例
-    let mut event_loop = app
-        .world
-        .remove_non_send_resource::<EventLoop<()>>()
-        .unwrap();
-
-    // 创建一个新的系统状态
-    let mut create_window_system_state: SystemState<(Commands, Query<(Entity, &mut Window),
-    Added<Window>>, EventWriter<WindowCreated>,NonSendMut<WinitWindows>)>
-        = SystemState::from_world(&mut app.world);
-
-    let event_handler = move |event: Event<()>,
-                              event_loop: &EventLoopWindowTarget<()>,
-                              control_flow: &mut ControlFlow| {
-        // ...
-        // 创建新窗口
-        let (commands, mut new_windows, created_window_writer, winit_windows) =
-                create_window_system_state.get_mut(&mut app.world);
-        create_window(
-            commands,
-            event_loop,
-            new_windows.iter_mut(),
-            created_window_writer,
-            winit_windows,
-            ...
-        );
-        // ...
-
-}
-
-// crates/bevy_winit/src/system.rs
-
-pub(crate) fn create_window<'a>(
-    mut commands: Commands,
-    event_loop: &EventLoopWindowTarget<()>,
-    created_windows: impl Iterator<Item = (Entity, Mut<'a, Window>)>,
-    mut event_writer: EventWriter<WindowCreated>,
-    mut winit_windows: NonSendMut<WinitWindows>,
-) {
-    for (entity, mut window) in created_windows {
-        // ...
-        // 创建 winit 窗口
-        let winit_window = winit_windows.create_window(event_loop, entity, &window);
-        // 更新 bevy 窗口的状态
-        window.resolution.XXX;
-        // ...
-    }
-}

实现 AppViewPlugin

接下来要做的就是使用自定义的窗口插件来替代 WinitPlugin。具体怎么做呢?简单模仿 WinitPlugin

首先,需要实现一个创建与保存窗口实例的结构体。由于我们的宿主 App 已经有完整的事件循环了,创建及管理事件循环的步骤都可以免了:

rust
// bevy_in_app/src/app_view/app_views.rs
-
-#[derive(Debug, Default)]
-pub struct AppViews {
-    views: HashMap<WindowId, AppView>,
-    entity_to_window_id: HashMap<Entity, super::WindowId>,
-}
-
-impl AppViews {
-    pub fn create_window(
-        &mut self,
-        #[cfg(target_os = "ios")] view_obj: super::IOSViewObj,
-        #[cfg(target_os = "android")] view_obj: super::AndroidViewObj,
-        entity: Entity,
-    ) -> Window { ... }
-}

AppViews 里的 AppViewIOSViewObjAndroidViewObj 在前面的与 iOS App 集成与 Android App 集成分别有详细介绍,简单来讲,IOSViewObj 封装了 iOS UIView 实例,AndroidViewObj 封装了 Android SurfaceView 所持有的 ANativeWindow 实例,AppView 实现了 HasRawWindowHandleHasRawDisplayHandle trait。

rust
// bevy-in-app/src/app_view/mod.rs
-
-pub struct AppViewPlugin;
-
-impl Plugin for AppViewPlugin {
-    fn build(&self, app: &mut App) {
-        app.init_non_send_resource::<AppViews>().add_systems(
-            (
-                changed_window.ambiguous_with(exit_on_all_closed),
-                despawn_window.after(changed_window),
-            )
-                .in_base_set(CoreSet::Last),
-        );
-    }
-}

上面就是 AppViewPlugin 的完整代码,就这么简单。

值得注意的是,在 build() 中没有像 WinitPlugin 一样设置 runner 函数,这是怎么回事?

前面已经提到,设置的 runner 函数会在调用 app.run() 时被自动执行。查看源码可以到此函数会将 app 实例从内存中移出并传递 runner,用户端的 app 被替换成了空实例:

rust
// crates/bevy_app/src/app.rs
-
-pub fn run(&mut self) {
-	// ...
-	let mut app = std::mem::replace(self, App::empty());
-	let runner = std::mem::replace(&mut app.runner, Box::new(run_once));
-
-	(runner)(app);
-}

我们需要从宿主 App 事件循环中调用 Bevy App 实例,所以不能让 runner 拿走它,改由从 src/ffi/create_bevy_app() 函数中手动调用:

rust
// bevy-in-app/src/ffi/iOS.rs
-
-#[no_mangle]
-pub fn create_bevy_app(view: *mut objc::runtime::Object, scale_factor: f32) -> *mut libc::c_void {
-    let mut bevy_app = crate::create_breakout_app();
-    let ios_obj = IOSViewObj { view, scale_factor };
-    bevy_app.insert_non_send_resource(ios_obj);
-
-    create_bevy_window(&mut bevy_app);
-    // ...
-}
-
-// bevy-in-app/src/app_view/mod.rs
-
-pub fn create_bevy_window(app: &mut App) {
-    #[cfg(target_os = "ios")]
-    let view_obj = app.world.remove_non_send_resource::<IOSViewObj>().unwrap();
-    #[cfg(target_os = "android")]
-    let view_obj = app.world.remove_non_send_resource::<AndroidViewObj>().unwrap();
-
-    let mut create_window_system_state: SystemState<(
-        Commands,
-        Query<(Entity, &mut Window), Added<Window>>,
-        EventWriter<WindowCreated>,
-        NonSendMut<AppViews>,
-    )> = SystemState::from_world(&mut app.world);
-    let (mut commands, mut new_windows, mut created_window_writer, mut app_views) =
-        create_window_system_state.get_mut(&mut app.world);
-
-    for (entity, mut bevy_window) in new_windows.iter_mut() {
-        if app_views.get_view(entity).is_some() {
-            continue;
-        }
-        let app_view = app_views.create_window(view_obj, entity);
-        let logical_res = app_view.logical_resolution();
-
-        bevy_window
-            .resolution
-            .set_scale_factor(app_view.scale_factor as f64);
-        bevy_window.resolution.set(logical_res.0, logical_res.1);
-
-        commands.entity(entity).insert(RawHandleWrapper {
-            window_handle: app_view.raw_window_handle(),
-            display_handle: app_view.raw_display_handle(),
-        });
-
-        created_window_writer.send(WindowCreated { window: entity });
-    }
-    create_window_system_state.apply(&mut app.world);
-}

create_bevy_window 函数的完整执行逻辑如下: 0. 从 World 中取出 IOSViewObjAndroidViewObj 目标平台的视图对象资源;

  1. 创建一个新的系统状态,并获取所需的命令队列,窗口实体列表,窗口创建写入器(EventWriter<WindowCreated>)和 AppViews 的可变借用;
  2. 遍历窗口实体列表,检查窗口是否已经被创建;
  3. 调用 AppViewscreate_window() 创建一个新窗口 app_view(也就是实现了 raw-window-handle traits 的 AppView);
  4. 调用 app_view 的相关函数与字段更新 Bevy window 的物理分辨率缩放因子及逻辑分辨率;
  5. 通过命令队列 commandsapp_view 中实现的窗口句柄插入到实体中;
  6. 窗口创建写入器发送一个包含了新窗口的实体的 WindowCreated;
  7. 最后,调用 apply() 函数应用系统状态;

IOSViewObjAndroidViewObj 遵循了 Bevy 中资源传递的惯例,在 create_bevy_app() FFI 函数中调用 insert_non_send_resource() 将其插入到 World。 此处没使用 insert_resource() 有两个原因:

  • IOSViewObj 不是线程安全的,iOS UIView 中的函数只能在主线程中使用;
shell
error[E0277]: \`*mut Object\` cannot be sent between threads safely
-  --> src/app_view/ios.rs:21:26
-   |
-21 | #[derive(Debug, Default, Resource)]
-   |                          ^^^^^^^^ \`*mut Object\` cannot be sent between threads safely
-   |
-   = help: the trait \`Send\` is not implemented for \`*mut Object\`
-   = note: required for \`Option<*mut Object>\` to implement \`Sync\`
  • 事实上也并不需要在多线程环境中来创建窗口;

链接 libc++

实现了 AppViewPlugin 后运行 cargo so b --lib --target aarch64-linux-android 将 crate 编译为 .so 库文件,在 Android 项目中加载时将得到如下错误:

shell
dlopen failed: cannot locate symbol "__gxx_personality_v0" referenced by ...

我们知道 Bevy 项目通过 cargo-apk 命令编译为 .apk 是可以在 Android 上安装运行的,于是查看 cargo-apk 的源码:

rust
self.add_lib(&artifact, target)?;
-for need in list_needed_libs(&readelf_path, &artifact)? {
-	// c++_shared is available in the NDK but not on-device.
-	// Must be bundled with the apk if used:
-	// https://developer.android.com/ndk/guides/cpp-support#libc
-	let search_paths = if need == "libc++_shared.so" {
-		// ...
-	}
-	// ...
-}

根据注释里的相关链接libc++_shared 链接库名称,就知道如何在我们的项目里链接 Android libc++ 共享库了, 在 android.rs 中添加如下代码:

rust
// bevy-in-app/src/ffi/android.rs
-
-#[link(name = "c++_shared")]
-extern "C" {}

编译后运行, 现在出现了新的情况:

sh
dlopen failed: library "libc++_shared.so" not found

有两个解决途径:

  • 从 GitHub 下载编译好的 libc++_shared.so 放到 jniLibs/XX 目录下;
  • 使用 externalNativeBuild 配置一个空的 c++ 原生库的构建, Gradle 会自动将该库添加到 App 中;

bevy-in-app 使用了第二种方式,仅需一点模板化的配置,可以直接查看项目源码,这里就不贴出来了。

调用 Motion Sensors

以 Android 为例:

kotlin
class BevySurfaceView : SurfaceView, SurfaceHolder.Callback2 {
-    private var sensorManager: SensorManager? = null
-    private var mSensor: Sensor? = null
-    private var sensorValues: FloatArray = FloatArray(3)
-
-    constructor(context: Context) : super(context) {
-        // 获取传感器服务
-        sensorManager = context.getSystemService(Context.SENSOR_SERVICE) as SensorManager
-        // 获取重力传感器,用于检测设备的重力变化
-        mSensor = sensorManager?.getDefaultSensor(Sensor.TYPE_GRAVITY)
-    }
-
-    override fun surfaceCreated(holder: SurfaceHolder) {
-        holder.let { h ->
-            // ...
-            // 创建了一个传感器事件监听器
-            var sensorEventListener = object : SensorEventListener {
-                // 当传感器的值改变时,更新 sensorValues 变量的值
-                override fun onSensorChanged(event: SensorEvent?) {
-                    if (event != null) {
-                        sensorValues = event.values
-                    }
-                }
-                override fun onAccuracyChanged(sensor: Sensor?, accuracy: Int) {
-                }
-            }
-            mSensor?.also { sensor ->
-                // 注册上边创建的事件监听器,以便开始监听传感器事件
-                sensorManager?.registerListener(sensorEventListener, sensor, SensorManager.SENSOR_DELAY_GAME)
-            }
-        }
-    }
-}

现在已经有了实时变化的传感器数据,仅需调用 Rust 端实现的 FFI 函数来发送此输入事件

Android 端:

kotlin
override fun draw(canvas: Canvas?) {
-    // ...
-    bevyApp.device_motion(bevy_app, sensorValues[0], sensorValues[1], sensorValues[2])
-    bevyApp.enter_frame(bevy_app)
-    // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数
-    invalidate()
-}

Rust 端:

rust
// bevy-in-app/src/ffi/android.rs
-
-#[no_mangle]
-#[jni_fn("name.jinleili.bevy.RustBridge")]
-pub fn device_motion(_env: *mut JNIEnv, _: jobject, obj: jlong, x: jfloat, _y: jfloat, _z: jfloat) {
-    let app = unsafe { &mut *(obj as *mut App) };
-    let x: f32 = x as _;
-    if x < -0.2 {
-        crate::change_input(app, KeyCode::Left, ButtonState::Released);
-        crate::change_input(app, KeyCode::Right, ButtonState::Pressed);
-    } else if x > 0.2 {
-        crate::change_input(app, KeyCode::Right, ButtonState::Released);
-        crate::change_input(app, KeyCode::Left, ButtonState::Pressed);
-    } else {
-        crate::change_input(app, KeyCode::Left, ButtonState::Released);
-        crate::change_input(app, KeyCode::Right, ButtonState::Released);
-    }
-}
-
-// bevy-in-app/src/lib.rs
-
-#[cfg(any(target_os = "android", target_os = "ios"))]
-pub(crate) fn change_input(app: &mut App, key_code: KeyCode, state: ButtonState) {
-    let input = KeyboardInput {
-        scan_code: if key_code == KeyCode::Left { 123 } else { 124 },
-        state,
-        key_code: Some(key_code),
-    };
-    app.world.cell().send_event(input);
-}

如何重启/退出 Bevy 引擎

Bevy 引擎默认的行为是,当所有窗口关闭时自动退出。显然,这不能满足当前的场景,因为我们需要在不销毁 iOS UIView 或 Android SurfaceView 的情况下原地重启 Bevy App。 为此,需要自定义一个退出函数,以确保在需要的时候可以手动退出:

rust
// bevy-in-app/src/lib.rs
-
-#[cfg(any(target_os = "android", target_os = "ios"))]
-pub(crate) fn exit_bevy_app(mut app: Box<App>) {
-    // 创建一个查询所有窗口的状态变量
-    let mut windows_state: SystemState<(Commands, Query<(Entity, &mut Window)>)> =
-        SystemState::from_world(&mut app.world);
-    // 获取命令列表与窗口列表的可变借用
-    let (mut commands, windows) = windows_state.get_mut(&mut app.world);
-    // 遍历并提交所有窗口实体的销毁命令
-    for (window, _focus) in windows.iter() {
-        commands.entity(window).despawn();
-    }
-    windows_state.apply(&mut app.world);
-    // 由于我们没有使用 Bevy App 的 runner 函数,需要手动 update
-    app.update();
-}

运行

Bevy in Android App
Bevy in iOS App
',63),c=[r];function t(y,D,F,C,A,i){return n(),a("div",null,c)}const u=s(e,[["render",t]]);export{d as __pageData,u as default}; diff --git a/assets/integration-and-debugging_bevy_index.md.7becd25a.lean.js b/assets/integration-and-debugging_bevy_index.md.7becd25a.lean.js deleted file mode 100644 index da0dba08b..000000000 --- a/assets/integration-and-debugging_bevy_index.md.7becd25a.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/bevy_in_android.6d8b91d5.png",o="/learn-wgpu-zh/assets/bevy_in_ios.7ff2933b.png",d=JSON.parse('{"title":"在 iOS Android App 中集成 Bevy 游戏引擎","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/bevy/index.md","filePath":"integration-and-debugging/bevy/index.md","lastUpdated":1701933923000}'),e={name:"integration-and-debugging/bevy/index.md"},r=l("",63),c=[r];function t(y,D,F,C,A,i){return n(),a("div",null,c)}const u=s(e,[["render",t]]);export{d as __pageData,u as default}; diff --git a/assets/integration-and-debugging_bevy_index.md.kfueqURQ.js b/assets/integration-and-debugging_bevy_index.md.kfueqURQ.js new file mode 100644 index 000000000..f0e9830b0 --- /dev/null +++ b/assets/integration-and-debugging_bevy_index.md.kfueqURQ.js @@ -0,0 +1,262 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const p="/learn-wgpu-zh/assets/bevy_in_android.gkRUPflr.png",l="/learn-wgpu-zh/assets/bevy_in_ios.xoCIvUfb.png",o=JSON.parse('{"title":"在 iOS Android App 中集成 Bevy 游戏引擎","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/bevy/index.md","filePath":"integration-and-debugging/bevy/index.md","lastUpdated":1703303099000}'),h={name:"integration-and-debugging/bevy/index.md"},k=n(`

在 iOS Android App 中集成 Bevy 游戏引擎

认识 Bevy

Bevy 是一个开源、跨平台的 Rust 游戏引擎,设计目的是提供一个简单、高效且易于使用的游戏开发框架。它的特点包括:

  • 模块化设计:游戏引擎的各个组件皆为单独的模块,方便选择需要的组件并扩展。
  • 灵活的插件系统:支持自定义插件,可以创建并集成自己的插件。
  • 易于使用的 API:API 简单易懂,帮助快速开始游戏开发。
  • 强大的渲染系统:使用了 wgpu 作为渲染后端,以提供强大的图形渲染能力。
  • 跨平台:除了支持在 Windows、MacOS 和 Linux 桌面系统及 iOS、Android 移动设备上运行,还能在支持 WebGPU 的浏览器上运行。

Bevy 是一个适合新手的游戏引擎,其简单性和灵活性使得能够轻松地开始游戏开发,随着经验的增加,它也能满足更高级的定制需求。

需求场景

如果需要给已有的 App 添加一个开屏小游戏,或者实现一些动态 UI 组件、图表...,又或者只是想充分利用手机上的 Motion Sensors 来实现令人惊艳的游戏体验,那么就不能使用 Bevy 默认的 WinitPlugin 了。因为 winit 会完全控制 App 的初始化过程和窗口,而我们需要的是在已有的 App 实例中创建 bevy::App, 并且我们可能还希望 bevy::App 能在任意大小的 iOS UIView 或 Android SurfaceView 中运行。

本章我们将逐步实现一个此类场景,并且利用手机的 Motion Sensor 来玩 breakout 小游戏。

Bevy 中的窗口插件

Bevy 中有两个窗口插件:WindowPluginWinitPlugin

WindowPlugin 是 Bevy 中构建游戏或应用程序的基础插件, 它提供了一套简单易用的接口来管理窗口的属性(标题、是否可见...)、行为(获取焦点,缩放约束,窗口显示层级...)、与事件(鼠标、键盘、触摸事件...)。WindowPlugin 并不负责实际的窗口创建,它需要与其他插件配合使用

WinitPlugin 为 Bevy 提供实际的窗口及事件循环的创建与管理。顾名思义,它依赖了 winit 窗口管理库。

下面通过源码来看看 WinitPlugin 是如何完成工作的:

rust
// crates/bevy_winit/src/lib.rs
+
+#[derive(Default)]
+pub struct WinitPlugin;
+
+impl Plugin for WinitPlugin {
+    fn build(&self, app: &mut App) {
+        let mut event_loop_builder = EventLoopBuilder::<()>::with_user_event();
+        let event_loop = event_loop_builder.build();
+        app.insert_non_send_resource(event_loop);
+
+        app.init_non_send_resource::<WinitWindows>()
+            .init_resource::<WinitSettings>()
+            .set_runner(winit_runner)
+            .add_systems(...);
+        // ...
+    }
+}

WinitPlugin 是个实现了 Plugin trait 的空结构体,在 build 函数内向 app World 内添分别加了 EventLoopWinitWindowsWinitSettings 3 项资源,WinitWindows 用于随后创建并保存窗口实例:

rust
// crates/bevy_winit/src/winit_windows.rs
+
+#[derive(Debug, Default)]
+pub struct WinitWindows {
+    pub windows: HashMap<winit::window::WindowId, winit::window::Window>,
+    // ...
+}

然后, 设置了一个叫 winit_runner 的 runner 函数,这个函数在用户调用 app.run() 时会被自动执行。

winit_runner() 内调用了 create_window() 来完成最终的窗口创建:

rust
// crates/bevy_winit/src/lib.rs
+
+pub fn winit_runner(mut app: App) {
+    // 取出在 build() 中创建的事件循环实例
+    let mut event_loop = app
+        .world
+        .remove_non_send_resource::<EventLoop<()>>()
+        .unwrap();
+
+    // 创建一个新的系统状态
+    let mut create_window_system_state: SystemState<(Commands, Query<(Entity, &mut Window),
+    Added<Window>>, EventWriter<WindowCreated>,NonSendMut<WinitWindows>)>
+        = SystemState::from_world(&mut app.world);
+
+    let event_handler = move |event: Event<()>,
+                              event_loop: &EventLoopWindowTarget<()>,
+                              control_flow: &mut ControlFlow| {
+        // ...
+        // 创建新窗口
+        let (commands, mut new_windows, created_window_writer, winit_windows) =
+                create_window_system_state.get_mut(&mut app.world);
+        create_window(
+            commands,
+            event_loop,
+            new_windows.iter_mut(),
+            created_window_writer,
+            winit_windows,
+            ...
+        );
+        // ...
+
+}
+
+// crates/bevy_winit/src/system.rs
+
+pub(crate) fn create_window<'a>(
+    mut commands: Commands,
+    event_loop: &EventLoopWindowTarget<()>,
+    created_windows: impl Iterator<Item = (Entity, Mut<'a, Window>)>,
+    mut event_writer: EventWriter<WindowCreated>,
+    mut winit_windows: NonSendMut<WinitWindows>,
+) {
+    for (entity, mut window) in created_windows {
+        // ...
+        // 创建 winit 窗口
+        let winit_window = winit_windows.create_window(event_loop, entity, &window);
+        // 更新 bevy 窗口的状态
+        window.resolution.XXX;
+        // ...
+    }
+}

实现 AppViewPlugin

接下来要做的就是使用自定义的窗口插件来替代 WinitPlugin。具体怎么做呢?简单模仿 WinitPlugin

首先,需要实现一个创建与保存窗口实例的结构体。由于我们的宿主 App 已经有完整的事件循环了,创建及管理事件循环的步骤都可以免了:

rust
// bevy_in_app/src/app_view/app_views.rs
+
+#[derive(Debug, Default)]
+pub struct AppViews {
+    views: HashMap<WindowId, AppView>,
+    entity_to_window_id: HashMap<Entity, super::WindowId>,
+}
+
+impl AppViews {
+    pub fn create_window(
+        &mut self,
+        #[cfg(target_os = "ios")] view_obj: super::IOSViewObj,
+        #[cfg(target_os = "android")] view_obj: super::AndroidViewObj,
+        entity: Entity,
+    ) -> Window { ... }
+}

AppViews 里的 AppViewIOSViewObjAndroidViewObj 在前面的与 iOS App 集成与 Android App 集成分别有详细介绍,简单来讲,IOSViewObj 封装了 iOS UIView 实例,AndroidViewObj 封装了 Android SurfaceView 所持有的 ANativeWindow 实例,AppView 实现了 HasRawWindowHandleHasRawDisplayHandle trait。

rust
// bevy-in-app/src/app_view/mod.rs
+
+pub struct AppViewPlugin;
+
+impl Plugin for AppViewPlugin {
+    fn build(&self, app: &mut App) {
+        app.init_non_send_resource::<AppViews>().add_systems(
+            (
+                changed_window.ambiguous_with(exit_on_all_closed),
+                despawn_window.after(changed_window),
+            )
+                .in_base_set(CoreSet::Last),
+        );
+    }
+}

上面就是 AppViewPlugin 的完整代码,就这么简单。

值得注意的是,在 build() 中没有像 WinitPlugin 一样设置 runner 函数,这是怎么回事?

前面已经提到,设置的 runner 函数会在调用 app.run() 时被自动执行。查看源码可以到此函数会将 app 实例从内存中移出并传递 runner,用户端的 app 被替换成了空实例:

rust
// crates/bevy_app/src/app.rs
+
+pub fn run(&mut self) {
+	// ...
+	let mut app = std::mem::replace(self, App::empty());
+	let runner = std::mem::replace(&mut app.runner, Box::new(run_once));
+
+	(runner)(app);
+}

我们需要从宿主 App 事件循环中调用 Bevy App 实例,所以不能让 runner 拿走它,改由从 src/ffi/create_bevy_app() 函数中手动调用:

rust
// bevy-in-app/src/ffi/iOS.rs
+
+#[no_mangle]
+pub fn create_bevy_app(view: *mut objc::runtime::Object, scale_factor: f32) -> *mut libc::c_void {
+    let mut bevy_app = crate::create_breakout_app();
+    let ios_obj = IOSViewObj { view, scale_factor };
+    bevy_app.insert_non_send_resource(ios_obj);
+
+    create_bevy_window(&mut bevy_app);
+    // ...
+}
+
+// bevy-in-app/src/app_view/mod.rs
+
+pub fn create_bevy_window(app: &mut App) {
+    #[cfg(target_os = "ios")]
+    let view_obj = app.world.remove_non_send_resource::<IOSViewObj>().unwrap();
+    #[cfg(target_os = "android")]
+    let view_obj = app.world.remove_non_send_resource::<AndroidViewObj>().unwrap();
+
+    let mut create_window_system_state: SystemState<(
+        Commands,
+        Query<(Entity, &mut Window), Added<Window>>,
+        EventWriter<WindowCreated>,
+        NonSendMut<AppViews>,
+    )> = SystemState::from_world(&mut app.world);
+    let (mut commands, mut new_windows, mut created_window_writer, mut app_views) =
+        create_window_system_state.get_mut(&mut app.world);
+
+    for (entity, mut bevy_window) in new_windows.iter_mut() {
+        if app_views.get_view(entity).is_some() {
+            continue;
+        }
+        let app_view = app_views.create_window(view_obj, entity);
+        let logical_res = app_view.logical_resolution();
+
+        bevy_window
+            .resolution
+            .set_scale_factor(app_view.scale_factor as f64);
+        bevy_window.resolution.set(logical_res.0, logical_res.1);
+
+        commands.entity(entity).insert(RawHandleWrapper {
+            window_handle: app_view.raw_window_handle(),
+            display_handle: app_view.raw_display_handle(),
+        });
+
+        created_window_writer.send(WindowCreated { window: entity });
+    }
+    create_window_system_state.apply(&mut app.world);
+}

create_bevy_window 函数的完整执行逻辑如下: 0. 从 World 中取出 IOSViewObjAndroidViewObj 目标平台的视图对象资源;

  1. 创建一个新的系统状态,并获取所需的命令队列,窗口实体列表,窗口创建写入器(EventWriter<WindowCreated>)和 AppViews 的可变借用;
  2. 遍历窗口实体列表,检查窗口是否已经被创建;
  3. 调用 AppViewscreate_window() 创建一个新窗口 app_view(也就是实现了 raw-window-handle traits 的 AppView);
  4. 调用 app_view 的相关函数与字段更新 Bevy window 的物理分辨率缩放因子及逻辑分辨率;
  5. 通过命令队列 commandsapp_view 中实现的窗口句柄插入到实体中;
  6. 窗口创建写入器发送一个包含了新窗口的实体的 WindowCreated;
  7. 最后,调用 apply() 函数应用系统状态;

IOSViewObjAndroidViewObj 遵循了 Bevy 中资源传递的惯例,在 create_bevy_app() FFI 函数中调用 insert_non_send_resource() 将其插入到 World。 此处没使用 insert_resource() 有两个原因:

  • IOSViewObj 不是线程安全的,iOS UIView 中的函数只能在主线程中使用;
shell
error[E0277]: \`*mut Object\` cannot be sent between threads safely
+  --> src/app_view/ios.rs:21:26
+   |
+21 | #[derive(Debug, Default, Resource)]
+   |                          ^^^^^^^^ \`*mut Object\` cannot be sent between threads safely
+   |
+   = help: the trait \`Send\` is not implemented for \`*mut Object\`
+   = note: required for \`Option<*mut Object>\` to implement \`Sync\`
  • 事实上也并不需要在多线程环境中来创建窗口;

链接 libc++

实现了 AppViewPlugin 后运行 cargo so b --lib --target aarch64-linux-android 将 crate 编译为 .so 库文件,在 Android 项目中加载时将得到如下错误:

shell
dlopen failed: cannot locate symbol "__gxx_personality_v0" referenced by ...

我们知道 Bevy 项目通过 cargo-apk 命令编译为 .apk 是可以在 Android 上安装运行的,于是查看 cargo-apk 的源码:

rust
self.add_lib(&artifact, target)?;
+for need in list_needed_libs(&readelf_path, &artifact)? {
+	// c++_shared is available in the NDK but not on-device.
+	// Must be bundled with the apk if used:
+	// https://developer.android.com/ndk/guides/cpp-support#libc
+	let search_paths = if need == "libc++_shared.so" {
+		// ...
+	}
+	// ...
+}

根据注释里的相关链接libc++_shared 链接库名称,就知道如何在我们的项目里链接 Android libc++ 共享库了, 在 android.rs 中添加如下代码:

rust
// bevy-in-app/src/ffi/android.rs
+
+#[link(name = "c++_shared")]
+extern "C" {}

编译后运行, 现在出现了新的情况:

sh
dlopen failed: library "libc++_shared.so" not found

有两个解决途径:

  • 从 GitHub 下载编译好的 libc++_shared.so 放到 jniLibs/XX 目录下;
  • 使用 externalNativeBuild 配置一个空的 c++ 原生库的构建, Gradle 会自动将该库添加到 App 中;

bevy-in-app 使用了第二种方式,仅需一点模板化的配置,可以直接查看项目源码,这里就不贴出来了。

调用 Motion Sensors

以 Android 为例:

kotlin
class BevySurfaceView : SurfaceView, SurfaceHolder.Callback2 {
+    private var sensorManager: SensorManager? = null
+    private var mSensor: Sensor? = null
+    private var sensorValues: FloatArray = FloatArray(3)
+
+    constructor(context: Context) : super(context) {
+        // 获取传感器服务
+        sensorManager = context.getSystemService(Context.SENSOR_SERVICE) as SensorManager
+        // 获取重力传感器,用于检测设备的重力变化
+        mSensor = sensorManager?.getDefaultSensor(Sensor.TYPE_GRAVITY)
+    }
+
+    override fun surfaceCreated(holder: SurfaceHolder) {
+        holder.let { h ->
+            // ...
+            // 创建了一个传感器事件监听器
+            var sensorEventListener = object : SensorEventListener {
+                // 当传感器的值改变时,更新 sensorValues 变量的值
+                override fun onSensorChanged(event: SensorEvent?) {
+                    if (event != null) {
+                        sensorValues = event.values
+                    }
+                }
+                override fun onAccuracyChanged(sensor: Sensor?, accuracy: Int) {
+                }
+            }
+            mSensor?.also { sensor ->
+                // 注册上边创建的事件监听器,以便开始监听传感器事件
+                sensorManager?.registerListener(sensorEventListener, sensor, SensorManager.SENSOR_DELAY_GAME)
+            }
+        }
+    }
+}

现在已经有了实时变化的传感器数据,仅需调用 Rust 端实现的 FFI 函数来发送此输入事件

Android 端:

kotlin
override fun draw(canvas: Canvas?) {
+    // ...
+    bevyApp.device_motion(bevy_app, sensorValues[0], sensorValues[1], sensorValues[2])
+    bevyApp.enter_frame(bevy_app)
+    // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数
+    invalidate()
+}

Rust 端:

rust
// bevy-in-app/src/ffi/android.rs
+
+#[no_mangle]
+#[jni_fn("name.jinleili.bevy.RustBridge")]
+pub fn device_motion(_env: *mut JNIEnv, _: jobject, obj: jlong, x: jfloat, _y: jfloat, _z: jfloat) {
+    let app = unsafe { &mut *(obj as *mut App) };
+    let x: f32 = x as _;
+    if x < -0.2 {
+        crate::change_input(app, KeyCode::Left, ButtonState::Released);
+        crate::change_input(app, KeyCode::Right, ButtonState::Pressed);
+    } else if x > 0.2 {
+        crate::change_input(app, KeyCode::Right, ButtonState::Released);
+        crate::change_input(app, KeyCode::Left, ButtonState::Pressed);
+    } else {
+        crate::change_input(app, KeyCode::Left, ButtonState::Released);
+        crate::change_input(app, KeyCode::Right, ButtonState::Released);
+    }
+}
+
+// bevy-in-app/src/lib.rs
+
+#[cfg(any(target_os = "android", target_os = "ios"))]
+pub(crate) fn change_input(app: &mut App, key_code: KeyCode, state: ButtonState) {
+    let input = KeyboardInput {
+        scan_code: if key_code == KeyCode::Left { 123 } else { 124 },
+        state,
+        key_code: Some(key_code),
+    };
+    app.world.cell().send_event(input);
+}

如何重启/退出 Bevy 引擎

Bevy 引擎默认的行为是,当所有窗口关闭时自动退出。显然,这不能满足当前的场景,因为我们需要在不销毁 iOS UIView 或 Android SurfaceView 的情况下原地重启 Bevy App。 为此,需要自定义一个退出函数,以确保在需要的时候可以手动退出:

rust
// bevy-in-app/src/lib.rs
+
+#[cfg(any(target_os = "android", target_os = "ios"))]
+pub(crate) fn exit_bevy_app(mut app: Box<App>) {
+    // 创建一个查询所有窗口的状态变量
+    let mut windows_state: SystemState<(Commands, Query<(Entity, &mut Window)>)> =
+        SystemState::from_world(&mut app.world);
+    // 获取命令列表与窗口列表的可变借用
+    let (mut commands, windows) = windows_state.get_mut(&mut app.world);
+    // 遍历并提交所有窗口实体的销毁命令
+    for (window, _focus) in windows.iter() {
+        commands.entity(window).despawn();
+    }
+    windows_state.apply(&mut app.world);
+    // 由于我们没有使用 Bevy App 的 runner 函数,需要手动 update
+    app.update();
+}

运行

Bevy in Android App
Bevy in iOS App
',63),e=[k];function t(r,d,E,g,y,F){return i(),a("div",null,e)}const b=s(h,[["render",t]]);export{o as __pageData,b as default}; diff --git a/assets/integration-and-debugging_bevy_index.md.kfueqURQ.lean.js b/assets/integration-and-debugging_bevy_index.md.kfueqURQ.lean.js new file mode 100644 index 000000000..b271041c4 --- /dev/null +++ b/assets/integration-and-debugging_bevy_index.md.kfueqURQ.lean.js @@ -0,0 +1 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const p="/learn-wgpu-zh/assets/bevy_in_android.gkRUPflr.png",l="/learn-wgpu-zh/assets/bevy_in_ios.xoCIvUfb.png",o=JSON.parse('{"title":"在 iOS Android App 中集成 Bevy 游戏引擎","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/bevy/index.md","filePath":"integration-and-debugging/bevy/index.md","lastUpdated":1703303099000}'),h={name:"integration-and-debugging/bevy/index.md"},k=n("",63),e=[k];function t(r,d,E,g,y,F){return i(),a("div",null,e)}const b=s(h,[["render",t]]);export{o as __pageData,b as default}; diff --git a/assets/integration-and-debugging_index.md.531e86b4.js b/assets/integration-and-debugging_index.md.531e86b4.js deleted file mode 100644 index bc959fb63..000000000 --- a/assets/integration-and-debugging_index.md.531e86b4.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as t,E as n,o,c as a,J as r,S as i}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/res/tools.png",A=JSON.parse('{"title":"楔子","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/index.md","filePath":"integration-and-debugging/index.md","lastUpdated":1701933923000}'),s={name:"integration-and-debugging/index.md"},d=i('

楔子

调试工具集

教程的开篇我们就已提到:wgpu 是基于 WebGPU 规范的跨平台图形 API。也就是说,wgpu 不光能运行在 Web 及桌面环境里,更是能运行在 iOS、Android 两大移动操作系统上。

wgpu 的运行并不依赖于任何窗口程序,所以也不提供窗口的创建及管理功能,只有在创建基于窗口的绘制表面(Surface)时,才可能需要一个实现了 raw-window-handle 抽象接口的实参(之所以说是可能需要,是因为在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例)。 winit 是一个实现了 raw-window-handle 抽象接口的、跨平台的窗口创建及管理(crate)。 在桌面端(macOS、Windows、Linux)及移动端(iOS、Android),winit 会接管整个 App (应用程序)的窗口管理(包括事件循环(Events loop))。

毫无疑问,对于游戏类 App, 使用 wgpu + winit 的组合是非常合适的。但是,大量非游戏类 App 也经常有使用图形 API 的需求(比如,图表、图片滤镜等),这些 App 需要用到大量的系统 UI 组件及交互,winit 这种接管整个 App 窗口的方式是不合适的。所以,将 wgpu 集成到现有的 iOS、Android App 且不使用 winit 将非常有用。

我们都知道,调试和分析是优化程序性能的必备技能。

虽然 wgpu 会在运行时验证 API 调用及参数设置来保证只有有效的工作负载才能提交给 GPU 执行,但是,这并不能保证渲染计算着色(Compute Shading)的正确性。本章中我们还会学习到如何利用调试工具来分析 wgpu 程序的性能及查找难以发现的错误!

加入 wgpu 微信学习交流群

',8);function g(c,u,w,l,_,h){const e=n("JoinWeiChatGroup");return o(),a("div",null,[d,r(e)])}const f=t(s,[["render",g]]);export{A as __pageData,f as default}; diff --git a/assets/integration-and-debugging_index.md.531e86b4.lean.js b/assets/integration-and-debugging_index.md.531e86b4.lean.js deleted file mode 100644 index 6d27063c5..000000000 --- a/assets/integration-and-debugging_index.md.531e86b4.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as t,E as n,o,c as a,J as r,S as i}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/res/tools.png",A=JSON.parse('{"title":"楔子","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/index.md","filePath":"integration-and-debugging/index.md","lastUpdated":1701933923000}'),s={name:"integration-and-debugging/index.md"},d=i("",8);function g(c,u,w,l,_,h){const e=n("JoinWeiChatGroup");return o(),a("div",null,[d,r(e)])}const f=t(s,[["render",g]]);export{A as __pageData,f as default}; diff --git a/assets/integration-and-debugging_index.md.AdQIXBZG.js b/assets/integration-and-debugging_index.md.AdQIXBZG.js new file mode 100644 index 000000000..2c60b4764 --- /dev/null +++ b/assets/integration-and-debugging_index.md.AdQIXBZG.js @@ -0,0 +1 @@ +import{_ as t,D as n,o as a,c as o,I as r,R as i,a4 as p}from"./chunks/framework.bMtwhlie.js";const A=JSON.parse('{"title":"楔子","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/index.md","filePath":"integration-and-debugging/index.md","lastUpdated":1703303099000}'),d={name:"integration-and-debugging/index.md"},s=i('

楔子

调试工具集

教程的开篇我们就已提到:wgpu 是基于 WebGPU 规范的跨平台图形 API。也就是说,wgpu 不光能运行在 Web 及桌面环境里,更是能运行在 iOS、Android 两大移动操作系统上。

wgpu 的运行并不依赖于任何窗口程序,所以也不提供窗口的创建及管理功能,只有在创建基于窗口的绘制表面(Surface)时,才可能需要一个实现了 raw-window-handle 抽象接口的实参(之所以说是可能需要,是因为在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例)。 winit 是一个实现了 raw-window-handle 抽象接口的、跨平台的窗口创建及管理(crate)。 在桌面端(macOS、Windows、Linux)及移动端(iOS、Android),winit 会接管整个 App (应用程序)的窗口管理(包括事件循环(Events loop))。

毫无疑问,对于游戏类 App, 使用 wgpu + winit 的组合是非常合适的。但是,大量非游戏类 App 也经常有使用图形 API 的需求(比如,图表、图片滤镜等),这些 App 需要用到大量的系统 UI 组件及交互,winit 这种接管整个 App 窗口的方式是不合适的。所以,将 wgpu 集成到现有的 iOS、Android App 且不使用 winit 将非常有用。

我们都知道,调试和分析是优化程序性能的必备技能。

虽然 wgpu 会在运行时验证 API 调用及参数设置来保证只有有效的工作负载才能提交给 GPU 执行,但是,这并不能保证渲染计算着色(Compute Shading)的正确性。本章中我们还会学习到如何利用调试工具来分析 wgpu 程序的性能及查找难以发现的错误!

加入 wgpu 微信学习交流群

',8);function g(c,u,w,_,l,h){const e=n("JoinWeiChatGroup");return a(),o("div",null,[s,r(e)])}const f=t(d,[["render",g]]);export{A as __pageData,f as default}; diff --git a/assets/integration-and-debugging_index.md.AdQIXBZG.lean.js b/assets/integration-and-debugging_index.md.AdQIXBZG.lean.js new file mode 100644 index 000000000..f510dd418 --- /dev/null +++ b/assets/integration-and-debugging_index.md.AdQIXBZG.lean.js @@ -0,0 +1 @@ +import{_ as t,D as n,o as a,c as o,I as r,R as i,a4 as p}from"./chunks/framework.bMtwhlie.js";const A=JSON.parse('{"title":"楔子","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/index.md","filePath":"integration-and-debugging/index.md","lastUpdated":1703303099000}'),d={name:"integration-and-debugging/index.md"},s=i("",8);function g(c,u,w,_,l,h){const e=n("JoinWeiChatGroup");return a(),o("div",null,[s,r(e)])}const f=t(d,[["render",g]]);export{A as __pageData,f as default}; diff --git a/assets/integration-and-debugging_ios_index.md.b6b47109.js b/assets/integration-and-debugging_ios_index.md.b6b47109.js deleted file mode 100644 index 5cce69eaf..000000000 --- a/assets/integration-and-debugging_ios_index.md.b6b47109.js +++ /dev/null @@ -1,144 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/lib.a4a41bda.png",e="/learn-wgpu-zh/assets/search.e005d804.png",o="/learn-wgpu-zh/assets/links.b1c5f8bb.png",r="/learn-wgpu-zh/assets/on_ios.6db96077.png",d=JSON.parse('{"title":"与 iOS App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/ios/index.md","filePath":"integration-and-debugging/ios/index.md","lastUpdated":1701933923000}'),c={name:"integration-and-debugging/ios/index.md"},t=l(`

与 iOS App 集成

与 iOS App 集成相比于 Android 要简单一些。

添加 iOS 构建目标支持

sh
# 添加 iOS 构建目标支持
-rustup target add aarch64-apple-ios 
-
-# 添加 iOS 模拟器构建目标支持
-# Intel CPU Mac
-rustup target add x86_64-apple-ios
-# M1+ Mac
-rustup target add aarch64-apple-ios-sim

由于从 A7 芯片(iPhone 5S,iPad Mini 2) 开始,iPhone iPad 都是 64 位的设备,所以我们不需要 armv7s-apple-iosarmv7-apple-ios 这两个构建目标。

iOS 模拟器相比于真机设备的特殊之处

当运行 WebGPU 程序时,模拟器并不会试图完全模拟你正在模拟的 iOS 设备的 GPU。例如,如果选择 iPhone 14 Pro 模拟器,它不会试图模拟 A16 GPU 的能力。相反,模拟器会翻译你的任何调用,并将它们引导到 Mac 主机上的选定 GPU。

苹果为模拟器单独提供了一个设备对象,其功能被限制为苹果 GPU 家族的 Apple2 型号(也就是古早的 A8 芯片),这意味着模拟器往往比实际的 GPU 支持更少的功能或更多的限制。从这篇文档 可以查看到功能限制的详情。

开发调试 GPU 应用,使用真机永远是最好的选择。

定义 FFI

在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例,所以我们无须去实现 raw-window-handle 抽象接口。

先给项目添加上必要的依赖:

toml
[target.'cfg(target_os = "ios")'.dependencies]
-libc = "*"
-objc = "0.2.7"

然后定义一个 IOSViewObj 结构体:

rust
#[repr(C)]
-pub struct IOSViewObj {
-    // metal_layer 所在的 UIView 容器
-    // UIView 有一系列方便的函数可供我们在 Rust 端来调用
-    pub view: *mut Object,
-    // 指向 iOS 端 CAMetalLayer 的指针
-    pub metal_layer: *mut c_void,
-    // 不同的 iOS 设备支持不同的屏幕刷新率,有时我们的 GPU 程序需要用到这类信息
-    pub maximum_frames: i32,
-    // 外部函数接口,用于给 iOS 端传递状态码
-    pub callback_to_swift: extern "C" fn(arg: i32),
-}

#[repr(C)] 属性标注 IOSViewObj 的内存布局兼容 C-ABI。

什么是 ABI?

ABI 是⼀个规范,它涵盖以下内容: · 调⽤约定。⼀个函数的调⽤过程本质就是参数、函数、返回值如何传递。编译器按照调⽤规则去编译,把数据放到相应的堆栈中,函数的调⽤⽅和被调⽤⽅(函数本⾝)都需要遵循这个统⼀的约定。 · 内存布局。主要是⼤⼩和对齐⽅式。 · 处理器指令集。 · ⽬标⽂件和库的⼆进制格式。

为什么使用 C-ABI?

不同的操作系统、编程语⾔、每种编程语⾔的不同编译器 实现基本都有⾃⼰规定或者遵循的 ABI 和调⽤规范。⽬前只能通过 FFI 技术遵循 C 语⾔ ABI 才可以做到编程语⾔的相互调⽤。也就是说,C-ABI 是唯⼀通⽤的稳定的标准 ABI。这是由历史原因决定的,C 语⾔伴随着操作系 统⼀路发展⽽来,导致其成为事实上的标准 ABI。

假设我们已经实现好了一个 wgpu 程序叫 WgpuCanvas, 现在来实现两个供 iOS 端调用的、控制 WgpuCanvas 初始化及帧渲染的函数:

rust
#[no_mangle]
-pub fn create_wgpu_canvas(ios_obj: IOSViewObj) -> *mut libc::c_void {
-    let obj = WgpuCanvas::new(AppSurface::new(ios_obj), 0_i32);
-    // 使用 Box 对 Rust 对象进行装箱操作。
-    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
-    let box_obj = Box::new(obj);
-    Box::into_raw(box_obj) as *mut libc::c_void
-}
-
-#[no_mangle]
-pub fn enter_frame(obj: *mut libc::c_void) {
-    // 将指针转换为其指代的实际 Rust 对象,同时也拿回此对象的内存管理权
-    // from_raw 是 unsafe 函数,它的调用需要放在 unsafe {} 块中
-    let mut obj: Box<WgpuCanvas> = unsafe { Box::from_raw(obj as *mut _) };
-    obj.enter_frame();
-    // 将 obj 对象的内存管理权重新转交给调用方
-    Box::into_raw(obj);
-}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

你应该已注意到了,上面的 enter_frame(obj: *mut libc::c_void) 函数里,我们做了两次内存管理权的转移,先是取回了内存管理权,后又再次转交给调用方。有没有办法避免这两次转移来提升性能呢?可以,直接从裸指针获取到对象的可变借用:

rust
#[no_mangle]
-pub fn enter_frame(obj: *mut libc::c_void) {
-    // 直接获取到指针指代的 Rust 对象的可变借用
-    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
-    obj.enter_frame();
-}

Unsafe Rust

Unsafe Rust 是 Safe Rust 的⼀个超集。也就是说,在 unsafe {} 块中,并不会禁⽤ Safe Rust 中的任何安全检查。它仅在进⾏以下五类操作时,不提供安全检查:

  • 裸指针的解引⽤或类型转换;
  • 调⽤ unsafe 的函数;
  • 访问或修改可变静态变量;
  • 实现 unsafe trait;
  • 读写 Union 联合体中的字段;

&mut *(obj as *mut WgpuCanvas) 之所以要放在 unsafe {} 块中,不仅仅是由于 obj 参数是裸指针,还因为 Rust 在编译阶段的静态安全检查此时完全没有⽤武之地,所以也就没必要提供安全检查了。

还需要写一个简单的 C 语言的头文件来对应上面定义的结构体与函数。 让我们按照惯例,使用项目编译出来的 .a 库文件名称为此头文件命名:

c
#ifndef libwgpu_on_app_h
-#define libwgpu_on_app_h
-
-#include <stdint.h>
-
-// 这个不透明结构体用来指代 Rust 端的 WgpuCanvas 对象
-struct wgpu_canvas;
-
-// 对应 Rust 端的 IOSViewObj 对象
-struct ios_view_obj {
-    void *view;
-    // CAMetalLayer
-    void *metal_layer;
-    int maximum_frames;
-    void (*callback_to_swift)(int32_t arg);
-};
-
-struct wgpu_canvas *create_wgpu_canvas(struct ios_view_obj object);
-void enter_frame(struct wgpu_canvas *data);
-
-#endif /* libwgpu_on_app_h */

将上面的头文件放置到 iOS 项目中。如果你的 iOS 项目是使用 Swift 创建的,则还需要将头文件引入到桥接文件(XXX-Bridging-Header.h)中:

c
#ifndef wgpu_test_Bridging_Header_h
-#define wgpu_test_Bridging_Header_h
-
-#import "libwgpu_on_app.h"
-
-#endif /* wgpu_test_Bridging_Header_h */

App 中加载 WgpuCanvas 对象

先在 iOS 项目中自定义一个继承自 UIView 的 MetalView,代码很简单:

swift
class MetalView: UIView {
-    // 这里将 View 的默认 Layer 指定为 CAMetalLayer
-    override class var layerClass: AnyClass {
-        return CAMetalLayer.self
-    }
-    
-    override func awakeFromNib() {
-        super.awakeFromNib()
-        configLayer()
-    }
-    
-    private func configLayer() {
-        guard let layer = self.layer as? CAMetalLayer else {
-            return
-        }
-        layer.presentsWithTransaction = false
-        layer.framebufferOnly = true
-        // nativeScale is real physical pixel scale
-        // https://tomisacat.xyz/tech/2017/06/17/scale-nativescale-contentsscale.html
-        self.contentScaleFactor = UIScreen.main.nativeScale
-    }
-}

然后在 ViewController 中实例化 WgpuCanvas:

swift
// ...
-// 我是通过 StoryBoard 绑定的 MetalView,当然,你也可以手动创建
-@IBOutlet var metalV: MetalView!
-// 指向 Rust 端 WgpuCanvas 的指针
-var wgpuCanvas: OpaquePointer?
-lazy var displayLink: CADisplayLink = {
-    CADisplayLink.init(target: self, selector: #selector(enterFrame))
-}()
-// ...
-override func viewDidAppear(_ animated: Bool) {
-    super.viewDidAppear(animated)
-    // 我们需要保证 WgpuCanvas 只被实例化一次
-    if wgpuCanvas == nil {
-        // 将 Swift 对象转换为裸指针
-        let viewPointer = Unmanaged.passRetained(self.metalV).toOpaque()
-        let metalLayer = Unmanaged.passRetained(self.metalV.layer).toOpaque()
-        let maximumFrames = UIScreen.main.maximumFramesPerSecond
-        
-        // 创建 IOSViewObj 实例
-        let viewObj = ios_view_obj(view: viewPointer, metal_layer: metalLayer,maximum_frames: Int32(maximumFrames), callback_to_swift: callback_to_swift)
-        // 创建 WgpuCanvas 实例
-        wgpuCanvas = create_wgpu_canvas(viewObj)
-    }
-    self.displayLink.isPaused = false
-}
-
-@objc func enterFrame() {
-    guard let canvas = self.wgpuCanvas else {
-        return
-    }
-    // 执行 WgpuCanvas 帧渲染
-    enter_frame(canvas)
-}
-
-func callback_to_swift(arg: Int32) {
-    // callback_to_swift 函数是在 WgpuCanvas 中被调用的,WgpuCanvas 的代码很可能没有运行在 iOS 的 UI 线程,
-    // 如果此处涉及到 UI 操作,就必须切换到 UI 线程。
-    DispatchQueue.main.async {
-        switch arg {
-        // ...
-        }
-    }
-}

编译与运行

sh
# 编译为 iOS 真机支持的库
-# debug 库
-cargo build --target aarch64-apple-ios
-# release 库
-cargo build --target aarch64-apple-ios --release
-
-# 编译为 iOS 模拟器支持的库
-# M1+ Mac 上执行:
-cargo build --target aarch64-apple-ios-sim 
-# Intel 芯片的 Mac 上执行:
-cargo build --target x86_64-apple-ios
-
-# 编译成功后需复制文件libwgpu_in_app.a至项目目录下
-# cp target/\${TARGET}/\${LIB_FOLDER}/libwgpu_in_app.a Apple/libs/\${LIB_FOLDER}/libwgpu_in_app.a

打开 iOS 项目,在项目的 General 选项卡下找到 Frameworks, Libraries, and Embedded Content 栏, 导入系统的 libresolv.tbd 及我们刚编译的 .a 库,此导入只需要操作一次:

然后在 Build Settings 选项卡下找到 Search Paths -> Library Search Paths 栏, 将 .a 库的 debug 和 release 路径填到对应的字段中:

最后,还是在 Build Settings 选项卡下,找到 Linking -> Other Linker Flags 栏,添加 -ObjC-lc++ 两个链接标记:

当 Xcode 版本 >= 13 且 iOS Deployment Target >= 12.0 时,Other Linker Flags 栏的设置可以省略。

以上就是所有的关键代码和步骤了,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

',41),i=[t];function y(D,C,F,A,b,u){return n(),a("div",null,i)}const g=s(c,[["render",y]]);export{d as __pageData,g as default}; diff --git a/assets/integration-and-debugging_ios_index.md.b6b47109.lean.js b/assets/integration-and-debugging_ios_index.md.b6b47109.lean.js deleted file mode 100644 index 1414f7355..000000000 --- a/assets/integration-and-debugging_ios_index.md.b6b47109.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/lib.a4a41bda.png",e="/learn-wgpu-zh/assets/search.e005d804.png",o="/learn-wgpu-zh/assets/links.b1c5f8bb.png",r="/learn-wgpu-zh/assets/on_ios.6db96077.png",d=JSON.parse('{"title":"与 iOS App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/ios/index.md","filePath":"integration-and-debugging/ios/index.md","lastUpdated":1701933923000}'),c={name:"integration-and-debugging/ios/index.md"},t=l("",41),i=[t];function y(D,C,F,A,b,u){return n(),a("div",null,i)}const g=s(c,[["render",y]]);export{d as __pageData,g as default}; diff --git a/assets/integration-and-debugging_ios_index.md.qvFn1Gez.js b/assets/integration-and-debugging_ios_index.md.qvFn1Gez.js new file mode 100644 index 000000000..99c4340ec --- /dev/null +++ b/assets/integration-and-debugging_ios_index.md.qvFn1Gez.js @@ -0,0 +1,145 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const l="/learn-wgpu-zh/assets/lib.uClQAVCC.png",p="/learn-wgpu-zh/assets/search.XvqND0E-.png",e="/learn-wgpu-zh/assets/links.ykRZQt6t.png",h="/learn-wgpu-zh/assets/on_ios.tACZQwGC.png",o=JSON.parse('{"title":"与 iOS App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/ios/index.md","filePath":"integration-and-debugging/ios/index.md","lastUpdated":1703303099000}'),k={name:"integration-and-debugging/ios/index.md"},t=n(`

与 iOS App 集成

与 iOS App 集成相比于 Android 要简单一些。

添加 iOS 构建目标支持

sh
# 添加 iOS 构建目标支持
+rustup target add aarch64-apple-ios 
+
+# 添加 iOS 模拟器构建目标支持
+# Intel CPU Mac
+rustup target add x86_64-apple-ios
+# M1+ Mac
+rustup target add aarch64-apple-ios-sim

由于从 A7 芯片(iPhone 5S,iPad Mini 2) 开始,iPhone iPad 都是 64 位的设备,所以我们不需要 armv7s-apple-iosarmv7-apple-ios 这两个构建目标。

iOS 模拟器相比于真机设备的特殊之处

当运行 WebGPU 程序时,模拟器并不会试图完全模拟你正在模拟的 iOS 设备的 GPU。例如,如果选择 iPhone 14 Pro 模拟器,它不会试图模拟 A16 GPU 的能力。相反,模拟器会翻译你的任何调用,并将它们引导到 Mac 主机上的选定 GPU。

苹果为模拟器单独提供了一个设备对象,其功能被限制为苹果 GPU 家族的 Apple2 型号(也就是古早的 A8 芯片),这意味着模拟器往往比实际的 GPU 支持更少的功能或更多的限制。从这篇文档 可以查看到功能限制的详情。

开发调试 GPU 应用,使用真机永远是最好的选择。

定义 FFI

在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例,所以我们无须去实现 raw-window-handle 抽象接口。

先给项目添加上必要的依赖:

toml
[target.'cfg(target_os = "ios")'.dependencies]
+libc = "*"
+objc = "0.2.7"

然后定义一个 IOSViewObj 结构体:

rust
#[repr(C)]
+pub struct IOSViewObj {
+    // metal_layer 所在的 UIView 容器
+    // UIView 有一系列方便的函数可供我们在 Rust 端来调用
+    pub view: *mut Object,
+    // 指向 iOS 端 CAMetalLayer 的指针
+    pub metal_layer: *mut c_void,
+    // 不同的 iOS 设备支持不同的屏幕刷新率,有时我们的 GPU 程序需要用到这类信息
+    pub maximum_frames: i32,
+    // 外部函数接口,用于给 iOS 端传递状态码
+    pub callback_to_swift: extern "C" fn(arg: i32),
+}

#[repr(C)] 属性标注 IOSViewObj 的内存布局兼容 C-ABI。

什么是 ABI?

ABI 是⼀个规范,它涵盖以下内容: · 调⽤约定。⼀个函数的调⽤过程本质就是参数、函数、返回值如何传递。编译器按照调⽤规则去编译,把数据放到相应的堆栈中,函数的调⽤⽅和被调⽤⽅(函数本⾝)都需要遵循这个统⼀的约定。 · 内存布局。主要是⼤⼩和对齐⽅式。 · 处理器指令集。 · ⽬标⽂件和库的⼆进制格式。

为什么使用 C-ABI?

不同的操作系统、编程语⾔、每种编程语⾔的不同编译器 实现基本都有⾃⼰规定或者遵循的 ABI 和调⽤规范。⽬前只能通过 FFI 技术遵循 C 语⾔ ABI 才可以做到编程语⾔的相互调⽤。也就是说,C-ABI 是唯⼀通⽤的稳定的标准 ABI。这是由历史原因决定的,C 语⾔伴随着操作系 统⼀路发展⽽来,导致其成为事实上的标准 ABI。

假设我们已经实现好了一个 wgpu 程序叫 WgpuCanvas, 现在来实现两个供 iOS 端调用的、控制 WgpuCanvas 初始化及帧渲染的函数:

rust
#[no_mangle]
+pub fn create_wgpu_canvas(ios_obj: IOSViewObj) -> *mut libc::c_void {
+    let obj = WgpuCanvas::new(AppSurface::new(ios_obj), 0_i32);
+    // 使用 Box 对 Rust 对象进行装箱操作。
+    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
+    let box_obj = Box::new(obj);
+    Box::into_raw(box_obj) as *mut libc::c_void
+}
+
+#[no_mangle]
+pub fn enter_frame(obj: *mut libc::c_void) {
+    // 将指针转换为其指代的实际 Rust 对象,同时也拿回此对象的内存管理权
+    // from_raw 是 unsafe 函数,它的调用需要放在 unsafe {} 块中
+    let mut obj: Box<WgpuCanvas> = unsafe { Box::from_raw(obj as *mut _) };
+    obj.enter_frame();
+    // 将 obj 对象的内存管理权重新转交给调用方
+    Box::into_raw(obj);
+}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

你应该已注意到了,上面的 enter_frame(obj: *mut libc::c_void) 函数里,我们做了两次内存管理权的转移,先是取回了内存管理权,后又再次转交给调用方。有没有办法避免这两次转移来提升性能呢?可以,直接从裸指针获取到对象的可变借用:

rust
#[no_mangle]
+pub fn enter_frame(obj: *mut libc::c_void) {
+    // 直接获取到指针指代的 Rust 对象的可变借用
+    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
+    obj.enter_frame();
+}

Unsafe Rust

Unsafe Rust 是 Safe Rust 的⼀个超集。也就是说,在 unsafe {} 块中,并不会禁⽤ Safe Rust 中的任何安全检查。它仅在进⾏以下五类操作时,不提供安全检查:

  • 裸指针的解引⽤或类型转换;
  • 调⽤ unsafe 的函数;
  • 访问或修改可变静态变量;
  • 实现 unsafe trait;
  • 读写 Union 联合体中的字段;

&mut *(obj as *mut WgpuCanvas) 之所以要放在 unsafe {} 块中,不仅仅是由于 obj 参数是裸指针,还因为 Rust 在编译阶段的静态安全检查此时完全没有⽤武之地,所以也就没必要提供安全检查了。

还需要写一个简单的 C 语言的头文件来对应上面定义的结构体与函数。 让我们按照惯例,使用项目编译出来的 .a 库文件名称为此头文件命名:

c

+#ifndef libwgpu_on_app_h
+#define libwgpu_on_app_h
+
+#include <stdint.h>
+
+// 这个不透明结构体用来指代 Rust 端的 WgpuCanvas 对象
+struct wgpu_canvas;
+
+// 对应 Rust 端的 IOSViewObj 对象
+struct ios_view_obj {
+    void *view;
+    // CAMetalLayer
+    void *metal_layer;
+    int maximum_frames;
+    void (*callback_to_swift)(int32_t arg);
+};
+
+struct wgpu_canvas *create_wgpu_canvas(struct ios_view_obj object);
+void enter_frame(struct wgpu_canvas *data);
+
+#endif /* libwgpu_on_app_h */

将上面的头文件放置到 iOS 项目中。如果你的 iOS 项目是使用 Swift 创建的,则还需要将头文件引入到桥接文件(XXX-Bridging-Header.h)中:

c
#ifndef wgpu_test_Bridging_Header_h
+#define wgpu_test_Bridging_Header_h
+
+#import "libwgpu_on_app.h"
+
+#endif /* wgpu_test_Bridging_Header_h */

App 中加载 WgpuCanvas 对象

先在 iOS 项目中自定义一个继承自 UIView 的 MetalView,代码很简单:

swift
class MetalView: UIView {
+    // 这里将 View 的默认 Layer 指定为 CAMetalLayer
+    override class var layerClass: AnyClass {
+        return CAMetalLayer.self
+    }
+    
+    override func awakeFromNib() {
+        super.awakeFromNib()
+        configLayer()
+    }
+    
+    private func configLayer() {
+        guard let layer = self.layer as? CAMetalLayer else {
+            return
+        }
+        layer.presentsWithTransaction = false
+        layer.framebufferOnly = true
+        // nativeScale is real physical pixel scale
+        // https://tomisacat.xyz/tech/2017/06/17/scale-nativescale-contentsscale.html
+        self.contentScaleFactor = UIScreen.main.nativeScale
+    }
+}

然后在 ViewController 中实例化 WgpuCanvas:

swift
// ...
+// 我是通过 StoryBoard 绑定的 MetalView,当然,你也可以手动创建
+@IBOutlet var metalV: MetalView!
+// 指向 Rust 端 WgpuCanvas 的指针
+var wgpuCanvas: OpaquePointer?
+lazy var displayLink: CADisplayLink = {
+    CADisplayLink.init(target: self, selector: #selector(enterFrame))
+}()
+// ...
+override func viewDidAppear(_ animated: Bool) {
+    super.viewDidAppear(animated)
+    // 我们需要保证 WgpuCanvas 只被实例化一次
+    if wgpuCanvas == nil {
+        // 将 Swift 对象转换为裸指针
+        let viewPointer = Unmanaged.passRetained(self.metalV).toOpaque()
+        let metalLayer = Unmanaged.passRetained(self.metalV.layer).toOpaque()
+        let maximumFrames = UIScreen.main.maximumFramesPerSecond
+        
+        // 创建 IOSViewObj 实例
+        let viewObj = ios_view_obj(view: viewPointer, metal_layer: metalLayer,maximum_frames: Int32(maximumFrames), callback_to_swift: callback_to_swift)
+        // 创建 WgpuCanvas 实例
+        wgpuCanvas = create_wgpu_canvas(viewObj)
+    }
+    self.displayLink.isPaused = false
+}
+
+@objc func enterFrame() {
+    guard let canvas = self.wgpuCanvas else {
+        return
+    }
+    // 执行 WgpuCanvas 帧渲染
+    enter_frame(canvas)
+}
+
+func callback_to_swift(arg: Int32) {
+    // callback_to_swift 函数是在 WgpuCanvas 中被调用的,WgpuCanvas 的代码很可能没有运行在 iOS 的 UI 线程,
+    // 如果此处涉及到 UI 操作,就必须切换到 UI 线程。
+    DispatchQueue.main.async {
+        switch arg {
+        // ...
+        }
+    }
+}

编译与运行

sh
# 编译为 iOS 真机支持的库
+# debug 库
+cargo build --target aarch64-apple-ios
+# release 库
+cargo build --target aarch64-apple-ios --release
+
+# 编译为 iOS 模拟器支持的库
+# M1+ Mac 上执行:
+cargo build --target aarch64-apple-ios-sim 
+# Intel 芯片的 Mac 上执行:
+cargo build --target x86_64-apple-ios
+
+# 编译成功后需复制文件libwgpu_in_app.a至项目目录下
+# cp target/\${TARGET}/\${LIB_FOLDER}/libwgpu_in_app.a Apple/libs/\${LIB_FOLDER}/libwgpu_in_app.a

打开 iOS 项目,在项目的 General 选项卡下找到 Frameworks, Libraries, and Embedded Content 栏, 导入系统的 libresolv.tbd 及我们刚编译的 .a 库,此导入只需要操作一次:

然后在 Build Settings 选项卡下找到 Search Paths -> Library Search Paths 栏, 将 .a 库的 debug 和 release 路径填到对应的字段中:

最后,还是在 Build Settings 选项卡下,找到 Linking -> Other Linker Flags 栏,添加 -ObjC-lc++ 两个链接标记:

当 Xcode 版本 >= 13 且 iOS Deployment Target >= 12.0 时,Other Linker Flags 栏的设置可以省略。

以上就是所有的关键代码和步骤了,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

',41),r=[t];function d(g,E,c,y,b,u){return i(),a("div",null,r)}const m=s(k,[["render",d]]);export{o as __pageData,m as default}; diff --git a/assets/integration-and-debugging_ios_index.md.qvFn1Gez.lean.js b/assets/integration-and-debugging_ios_index.md.qvFn1Gez.lean.js new file mode 100644 index 000000000..aca220645 --- /dev/null +++ b/assets/integration-and-debugging_ios_index.md.qvFn1Gez.lean.js @@ -0,0 +1 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const l="/learn-wgpu-zh/assets/lib.uClQAVCC.png",p="/learn-wgpu-zh/assets/search.XvqND0E-.png",e="/learn-wgpu-zh/assets/links.ykRZQt6t.png",h="/learn-wgpu-zh/assets/on_ios.tACZQwGC.png",o=JSON.parse('{"title":"与 iOS App 集成","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/ios/index.md","filePath":"integration-and-debugging/ios/index.md","lastUpdated":1703303099000}'),k={name:"integration-and-debugging/ios/index.md"},t=n("",41),r=[t];function d(g,E,c,y,b,u){return i(),a("div",null,r)}const m=s(k,[["render",d]]);export{o as __pageData,m as default}; diff --git a/assets/integration-and-debugging_snapdragon-profiler_index.md.13fbbd81.js b/assets/integration-and-debugging_snapdragon-profiler_index.md.13fbbd81.js deleted file mode 100644 index e6ce653a5..000000000 --- a/assets/integration-and-debugging_snapdragon-profiler_index.md.13fbbd81.js +++ /dev/null @@ -1,5 +0,0 @@ -import{_ as o,o as s,c as a,S as n}from"./chunks/framework.adbf3c9e.js";const e="/learn-wgpu-zh/assets/connect.317eedac.jpg",r="/learn-wgpu-zh/assets/realtime-left.fceb4675.jpg",t="/learn-wgpu-zh/assets/realtime.9dd2da17.jpg",p="/learn-wgpu-zh/assets/trace.b6c48e7e.png",l="/learn-wgpu-zh/assets/GMEM_load.3c3ed86f.jpg",i="/learn-wgpu-zh/assets/GMEM_store.6f15c0e3.jpg",d="/learn-wgpu-zh/assets/frame.5d75a9c3.jpg",c="/learn-wgpu-zh/assets/resource-left.1d783628.jpg",g="/learn-wgpu-zh/assets/resource-right.c404daaa.jpg",D=JSON.parse('{"title":"使用 Snapdragon Profiler 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/snapdragon-profiler/index.md","filePath":"integration-and-debugging/snapdragon-profiler/index.md","lastUpdated":1701933923000}'),u={name:"integration-and-debugging/snapdragon-profiler/index.md"},h=n('

使用 Snapdragon Profiler 调试 wgpu 程序

与 Android App 集成章节我们已经学习了 wgpu 与 Android App 的集成,现在来看看集成后的调试。

Snapdragon Profiler 工具介绍

Snapdragon Profiler 是高通公司开发的一款可运行在 Windows、Mac 和 Linux 平台上的性能分析和帧调试工具。 它通过 USB 与安卓设备连接,允许开发人员分析 CPU、GPU、内存等数据,以便我们发现并修复性能瓶颈。

Snapdragon Profiler 工具的功能特点:

  • 实时监测 GPU 性能;
  • 查看 CPU 调度和 GPU 阶段数据,了解应用程序将时间花在哪里;
  • GPU 捕获;
  • 单步调试绘制;
  • 查看和编辑着色器并在设备上预览结果;
  • 查看和调试像素历史记录;
  • 捕获和查看每次绘制调用的 GPU 指标;

上面的官网链接提供了对应平台安装包的免费下载。如果是 Mac 和 Linux 平台, 在安装 Snapdragon Profiler 之前需要先安装 momo 框架(mono 是 Windows .Net 框架的跨平台开源实现)。 在运行 Snapdragon Profiler 之前需要确保系统上安装了 Android Studio 或者 AndroidSDK,并且已将 ADB 路径添加到系统环境变量中。

实时模式查看 GPU 统计数据

USB 连接要调试的 Android 手机后打开 Snapdragon Profiler,点击窗口左边栏的 Start a Session, 此时右边出现的小弹窗里会列出当前与电脑连接的所有可调试设备,我们选中列表中的设备,勾选上弹窗左下角的 Auto connect 再点击右下角的 Connect,这样,下回再次调试同一台设备时就能自动连接到 Snapdragon Profiler 了:

连接后,有四种调试模式供我们选择:实时、追踪、帧捕获及 CPU 采样,现在选择实时(左图),在实时窗口的左边栏展示了实时指标列表,我们可以选择相应的指标项来收集 CPU、GPU、内存、网络、电源和散热的实时指标(右图):

实时模式
实时预览

上面的右图中,我选择了 GPU GeneralGPU Stalls 两个指标类别,窗口右边展示了每个细分指标的实时数据图表,要添加新的指标图表,只需双击类别(以添加类别中的所有指标)或单个指标,或者将类别或指标拖放到右侧的“图表”窗格中。

追踪模式检查片上内存装载

片上内存(on-chip memory)装载是影响移动应用中 GPU 性能的常见问题之一。在本节中,我们来学习如何使用 Snapdragon Profiler 查找和定位引起片上内存装载的应用程序代码。

Snapdragon Profiler 里将片上内存称之为图形内存(GMEM,全称 Graphic Memory),但是这里的图形内存跟显存容易混淆,它俩并不是一回事。故,下边统一使用片上内存来指代 GMEM。

什么是片上内存装载?

移动 GPU 的 Tiling 架构管线包括一个渲染通道。在渲染过程中,每个 Tile 都是先被渲染到片上内存中。按照驱动程序的默认行为,先前的帧缓冲区数据被从设备内存加载到每个 Tile 的片上内存中,即发生片上内存装载。

所谓 Tiling,本质上就是管理 GPU 内存的技术。Tiling 利用片上内存(on-chip memory)去降低设备内存的访问次数,从而降低 GPU 内存带宽的消耗及访问延迟。 正确理解并利用 Tiling 架构的内存管理特性,可以有效的提高 GPU 程序的性能。

为什么要尽可能地减少或避免片上内存装载?

因为每一次片上内存的加载都会减慢 GPU 的处理速度。
如果在 begin_render_pass 时通过设置 Clear() 来清理片上内存,驱动程序就可以避免在片上内存中装载帧缓冲区数据。虽然这涉及到一个额外的图形指令调用及其相关的开销,但它比为每个正在渲染的 Tile 将帧缓冲区数据加载回片上内存的开销要低得多。

导致片上内存装载的最主要原因是: 对驱动程序的不恰当提示。 应用程序代码使驱动程序认为需要帧缓冲区的先前内容。

检测片上内存装载

在 Snapdragon Profiler 的追踪模式下,我们可以让渲染阶段(Rendering Stages) 指标突出显示其自身通道中的片上内存装载(GMEM Loads)。

GPU 应用必须在项目的 AndroidManifest.xml 文件中包含 INTERNET 权限以启用图形 API 及 GPU 指标的追踪:

toml
<uses-permission android:name="android.permission.INTERNET" />

另外,Snapdragon Profiler 的追踪模式不允许追踪捕获超过 10 秒。也就是说,从点击 Start Capture 开始到点击 Stop Capture 结束,时长不得超过 10 秒。

启用追踪模式的操作步骤:

  • 连接好 Android 设备后,从 Start Page 界面单击左边栏的 System Trace Analysis,此时,就创建了一个新的 Trace 选项卡。
  • 选择刚创建的 Trace 选项卡,进入一个类似于实时模式的视图,然后在 Data Sources 边栏上端的应用列表中选中要追踪的应用(如果列表中找不到,就通过列表右上角的 Launch 按钮去启动要追踪的应用)。
  • Data Sources 边栏下端,选中 Process -> Vulkan -> Rendering Stages 项。

点击 Start Capture 开始追踪,在 10 秒内的任意段点击 Stop Capture,在等待 N 秒(取决于电脑性能)后就会展示出如下图表:

上图渲染阶段的设置对话框显示,这些片上内存装载消耗了总渲染时间的 23% 左右。

我们来看看源码帧渲染中的这条 begin_render_pass() 命令,颜色附件的片上操作使用了 Load:

rust
ops: wgpu::Operations {
-    // load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
-    load: wgpu::LoadOp::Load,
-    store: wgpu::StoreOp::Store
-},

但此处实际上没有装载之前的帧缓冲区数据的必要,我们改为使用 Clear() 改善性能之后,就回收了之前片上内存装载消耗的性能,下图可以看到 GMEM Load 统计项消失了(没有发生片上内存装载时就不会显示):

帧捕获模式

帧捕获模式允许捕获 GPU 应用程序的单一帧, 可以详细显示一个场景在 GPU 上的渲染情况。

启用帧捕获模式的操作与追踪模式几乎一样,唯一不同之处就是帧捕获模式在点击 Take Snapshot 捕获一帧数据后会自动结束捕获:

左侧红框区域是当前帧的着色器代码,它们是由 WGSL 自动转换而来的 SPIR-V 代码(当然,此处的着色器代码还取决于 GPU 应用所使用的图形后端,我使用的是 Vulkan 后端,如果使用 OpenGL 后端,此处就会显示 GLSL 代码)。红框下方的区域可以显示着色器的错误信息。说到这里就不得不提 WebGPU 的 WGSL 着色器语言的优势了:WGSL 在编译阶段时就得到了很好的验证,运行时的验证更是能方便地指出着色器与管线不一致的地方。所以,我们不需要依赖 Snapdragon Profiler 的着色器调试功能。

中间绿框区域是命令队列(Queue)提交给当前帧的所有 Vulkan 命令。选中某一条命令,右侧资源面板将展示出此命令涉及的所有资源:图形|计算管线,纹理,着色器等等。

右侧蓝框区域是资源面板。选中某一项资源,下方的面板将能展示出资源详情。
比如,选择纹理资源后,下方的 Image Preview 选项卡会展示可缩放的大图预览,鼠标在纹理图片上滑动可显示对应像素的 RGB 色值,Inspector 选项卡会展示纹理的格式及层次细节参数等(左图); 选择布局描述符资源后,Inspector 选项卡会展示出绑定组布局描述符(BindGroupLayoutDescriptor)详情(右图):

',43),m=[h];function f(P,b,_,y,S,C){return s(),a("div",null,m)}const G=o(u,[["render",f]]);export{D as __pageData,G as default}; diff --git a/assets/integration-and-debugging_snapdragon-profiler_index.md.13fbbd81.lean.js b/assets/integration-and-debugging_snapdragon-profiler_index.md.13fbbd81.lean.js deleted file mode 100644 index 6aa29aca5..000000000 --- a/assets/integration-and-debugging_snapdragon-profiler_index.md.13fbbd81.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as o,o as s,c as a,S as n}from"./chunks/framework.adbf3c9e.js";const e="/learn-wgpu-zh/assets/connect.317eedac.jpg",r="/learn-wgpu-zh/assets/realtime-left.fceb4675.jpg",t="/learn-wgpu-zh/assets/realtime.9dd2da17.jpg",p="/learn-wgpu-zh/assets/trace.b6c48e7e.png",l="/learn-wgpu-zh/assets/GMEM_load.3c3ed86f.jpg",i="/learn-wgpu-zh/assets/GMEM_store.6f15c0e3.jpg",d="/learn-wgpu-zh/assets/frame.5d75a9c3.jpg",c="/learn-wgpu-zh/assets/resource-left.1d783628.jpg",g="/learn-wgpu-zh/assets/resource-right.c404daaa.jpg",D=JSON.parse('{"title":"使用 Snapdragon Profiler 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/snapdragon-profiler/index.md","filePath":"integration-and-debugging/snapdragon-profiler/index.md","lastUpdated":1701933923000}'),u={name:"integration-and-debugging/snapdragon-profiler/index.md"},h=n("",43),m=[h];function f(P,b,_,y,S,C){return s(),a("div",null,m)}const G=o(u,[["render",f]]);export{D as __pageData,G as default}; diff --git a/assets/integration-and-debugging_snapdragon-profiler_index.md.UnTr8zla.js b/assets/integration-and-debugging_snapdragon-profiler_index.md.UnTr8zla.js new file mode 100644 index 000000000..fd746b4a2 --- /dev/null +++ b/assets/integration-and-debugging_snapdragon-profiler_index.md.UnTr8zla.js @@ -0,0 +1,5 @@ +import{_ as s,o as a,c as n,R as r}from"./chunks/framework.bMtwhlie.js";const e="/learn-wgpu-zh/assets/connect.tEwLZZd_.jpg",i="/learn-wgpu-zh/assets/realtime-left.nZSwmtCo.jpg",o="/learn-wgpu-zh/assets/realtime.i8oUMXdC.jpg",t="/learn-wgpu-zh/assets/trace.PJbClu6C.png",p="/learn-wgpu-zh/assets/GMEM_load.KFQaOUnP.jpg",l="/learn-wgpu-zh/assets/GMEM_store.gbnpZX9D.jpg",d="/learn-wgpu-zh/assets/frame.tKpmLFBx.jpg",g="/learn-wgpu-zh/assets/resource-left.a2vJQEkx.jpg",c="/learn-wgpu-zh/assets/resource-right.Kw0F1aVF.jpg",E=JSON.parse('{"title":"使用 Snapdragon Profiler 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/snapdragon-profiler/index.md","filePath":"integration-and-debugging/snapdragon-profiler/index.md","lastUpdated":1703303099000}'),h={name:"integration-and-debugging/snapdragon-profiler/index.md"},u=r('

使用 Snapdragon Profiler 调试 wgpu 程序

与 Android App 集成章节我们已经学习了 wgpu 与 Android App 的集成,现在来看看集成后的调试。

Snapdragon Profiler 工具介绍

Snapdragon Profiler 是高通公司开发的一款可运行在 Windows、Mac 和 Linux 平台上的性能分析和帧调试工具。 它通过 USB 与安卓设备连接,允许开发人员分析 CPU、GPU、内存等数据,以便我们发现并修复性能瓶颈。

Snapdragon Profiler 工具的功能特点:

  • 实时监测 GPU 性能;
  • 查看 CPU 调度和 GPU 阶段数据,了解应用程序将时间花在哪里;
  • GPU 捕获;
  • 单步调试绘制;
  • 查看和编辑着色器并在设备上预览结果;
  • 查看和调试像素历史记录;
  • 捕获和查看每次绘制调用的 GPU 指标;

上面的官网链接提供了对应平台安装包的免费下载。如果是 Mac 和 Linux 平台, 在安装 Snapdragon Profiler 之前需要先安装 momo 框架(mono 是 Windows .Net 框架的跨平台开源实现)。 在运行 Snapdragon Profiler 之前需要确保系统上安装了 Android Studio 或者 AndroidSDK,并且已将 ADB 路径添加到系统环境变量中。

实时模式查看 GPU 统计数据

USB 连接要调试的 Android 手机后打开 Snapdragon Profiler,点击窗口左边栏的 Start a Session, 此时右边出现的小弹窗里会列出当前与电脑连接的所有可调试设备,我们选中列表中的设备,勾选上弹窗左下角的 Auto connect 再点击右下角的 Connect,这样,下回再次调试同一台设备时就能自动连接到 Snapdragon Profiler 了:

连接后,有四种调试模式供我们选择:实时、追踪、帧捕获及 CPU 采样,现在选择实时(左图),在实时窗口的左边栏展示了实时指标列表,我们可以选择相应的指标项来收集 CPU、GPU、内存、网络、电源和散热的实时指标(右图):

实时模式
实时预览

上面的右图中,我选择了 GPU GeneralGPU Stalls 两个指标类别,窗口右边展示了每个细分指标的实时数据图表,要添加新的指标图表,只需双击类别(以添加类别中的所有指标)或单个指标,或者将类别或指标拖放到右侧的“图表”窗格中。

追踪模式检查片上内存装载

片上内存(on-chip memory)装载是影响移动应用中 GPU 性能的常见问题之一。在本节中,我们来学习如何使用 Snapdragon Profiler 查找和定位引起片上内存装载的应用程序代码。

Snapdragon Profiler 里将片上内存称之为图形内存(GMEM,全称 Graphic Memory),但是这里的图形内存跟显存容易混淆,它俩并不是一回事。故,下边统一使用片上内存来指代 GMEM。

什么是片上内存装载?

移动 GPU 的 Tiling 架构管线包括一个渲染通道。在渲染过程中,每个 Tile 都是先被渲染到片上内存中。按照驱动程序的默认行为,先前的帧缓冲区数据被从设备内存加载到每个 Tile 的片上内存中,即发生片上内存装载。

所谓 Tiling,本质上就是管理 GPU 内存的技术。Tiling 利用片上内存(on-chip memory)去降低设备内存的访问次数,从而降低 GPU 内存带宽的消耗及访问延迟。 正确理解并利用 Tiling 架构的内存管理特性,可以有效的提高 GPU 程序的性能。

为什么要尽可能地减少或避免片上内存装载?

因为每一次片上内存的加载都会减慢 GPU 的处理速度。
如果在 begin_render_pass 时通过设置 Clear() 来清理片上内存,驱动程序就可以避免在片上内存中装载帧缓冲区数据。虽然这涉及到一个额外的图形指令调用及其相关的开销,但它比为每个正在渲染的 Tile 将帧缓冲区数据加载回片上内存的开销要低得多。

导致片上内存装载的最主要原因是: 对驱动程序的不恰当提示。 应用程序代码使驱动程序认为需要帧缓冲区的先前内容。

检测片上内存装载

在 Snapdragon Profiler 的追踪模式下,我们可以让渲染阶段(Rendering Stages) 指标突出显示其自身通道中的片上内存装载(GMEM Loads)。

GPU 应用必须在项目的 AndroidManifest.xml 文件中包含 INTERNET 权限以启用图形 API 及 GPU 指标的追踪:

toml
<uses-permission android:name="android.permission.INTERNET" />

另外,Snapdragon Profiler 的追踪模式不允许追踪捕获超过 10 秒。也就是说,从点击 Start Capture 开始到点击 Stop Capture 结束,时长不得超过 10 秒。

启用追踪模式的操作步骤:

  • 连接好 Android 设备后,从 Start Page 界面单击左边栏的 System Trace Analysis,此时,就创建了一个新的 Trace 选项卡。
  • 选择刚创建的 Trace 选项卡,进入一个类似于实时模式的视图,然后在 Data Sources 边栏上端的应用列表中选中要追踪的应用(如果列表中找不到,就通过列表右上角的 Launch 按钮去启动要追踪的应用)。
  • Data Sources 边栏下端,选中 Process -> Vulkan -> Rendering Stages 项。

点击 Start Capture 开始追踪,在 10 秒内的任意段点击 Stop Capture,在等待 N 秒(取决于电脑性能)后就会展示出如下图表:

上图渲染阶段的设置对话框显示,这些片上内存装载消耗了总渲染时间的 23% 左右。

我们来看看源码帧渲染中的这条 begin_render_pass() 命令,颜色附件的片上操作使用了 Load:

rust
ops: wgpu::Operations {
+    // load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
+    load: wgpu::LoadOp::Load,
+    store: wgpu::StoreOp::Store
+},

但此处实际上没有装载之前的帧缓冲区数据的必要,我们改为使用 Clear() 改善性能之后,就回收了之前片上内存装载消耗的性能,下图可以看到 GMEM Load 统计项消失了(没有发生片上内存装载时就不会显示):

帧捕获模式

帧捕获模式允许捕获 GPU 应用程序的单一帧, 可以详细显示一个场景在 GPU 上的渲染情况。

启用帧捕获模式的操作与追踪模式几乎一样,唯一不同之处就是帧捕获模式在点击 Take Snapshot 捕获一帧数据后会自动结束捕获:

左侧红框区域是当前帧的着色器代码,它们是由 WGSL 自动转换而来的 SPIR-V 代码(当然,此处的着色器代码还取决于 GPU 应用所使用的图形后端,我使用的是 Vulkan 后端,如果使用 OpenGL 后端,此处就会显示 GLSL 代码)。红框下方的区域可以显示着色器的错误信息。说到这里就不得不提 WebGPU 的 WGSL 着色器语言的优势了:WGSL 在编译阶段时就得到了很好的验证,运行时的验证更是能方便地指出着色器与管线不一致的地方。所以,我们不需要依赖 Snapdragon Profiler 的着色器调试功能。

中间绿框区域是命令队列(Queue)提交给当前帧的所有 Vulkan 命令。选中某一条命令,右侧资源面板将展示出此命令涉及的所有资源:图形|计算管线,纹理,着色器等等。

右侧蓝框区域是资源面板。选中某一项资源,下方的面板将能展示出资源详情。
比如,选择纹理资源后,下方的 Image Preview 选项卡会展示可缩放的大图预览,鼠标在纹理图片上滑动可显示对应像素的 RGB 色值,Inspector 选项卡会展示纹理的格式及层次细节参数等(左图); 选择布局描述符资源后,Inspector 选项卡会展示出绑定组布局描述符(BindGroupLayoutDescriptor)详情(右图):

',43),k=[u];function m(P,b,f,_,S,y){return a(),n("div",null,k)}const w=s(h,[["render",m]]);export{E as __pageData,w as default}; diff --git a/assets/integration-and-debugging_snapdragon-profiler_index.md.UnTr8zla.lean.js b/assets/integration-and-debugging_snapdragon-profiler_index.md.UnTr8zla.lean.js new file mode 100644 index 000000000..0601dc38c --- /dev/null +++ b/assets/integration-and-debugging_snapdragon-profiler_index.md.UnTr8zla.lean.js @@ -0,0 +1 @@ +import{_ as s,o as a,c as n,R as r}from"./chunks/framework.bMtwhlie.js";const e="/learn-wgpu-zh/assets/connect.tEwLZZd_.jpg",i="/learn-wgpu-zh/assets/realtime-left.nZSwmtCo.jpg",o="/learn-wgpu-zh/assets/realtime.i8oUMXdC.jpg",t="/learn-wgpu-zh/assets/trace.PJbClu6C.png",p="/learn-wgpu-zh/assets/GMEM_load.KFQaOUnP.jpg",l="/learn-wgpu-zh/assets/GMEM_store.gbnpZX9D.jpg",d="/learn-wgpu-zh/assets/frame.tKpmLFBx.jpg",g="/learn-wgpu-zh/assets/resource-left.a2vJQEkx.jpg",c="/learn-wgpu-zh/assets/resource-right.Kw0F1aVF.jpg",E=JSON.parse('{"title":"使用 Snapdragon Profiler 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/snapdragon-profiler/index.md","filePath":"integration-and-debugging/snapdragon-profiler/index.md","lastUpdated":1703303099000}'),h={name:"integration-and-debugging/snapdragon-profiler/index.md"},u=r("",43),k=[u];function m(P,b,f,_,S,y){return a(),n("div",null,k)}const w=s(h,[["render",m]]);export{E as __pageData,w as default}; diff --git a/assets/integration-and-debugging_xcode_index.md.8dd60914.lean.js b/assets/integration-and-debugging_xcode_index.md.8dd60914.lean.js deleted file mode 100644 index 12666b59e..000000000 --- a/assets/integration-and-debugging_xcode_index.md.8dd60914.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as e,o as a,c as s,S as n}from"./chunks/framework.adbf3c9e.js";const o="/learn-wgpu-zh/assets/create.2648e6ab.png",t="/learn-wgpu-zh/assets/template.3e66407c.png",r="/learn-wgpu-zh/assets/name.8d9431f3.png",l="/learn-wgpu-zh/assets/project.2c0f8089.png",p="/learn-wgpu-zh/assets/option.5b8443fb.png",c="/learn-wgpu-zh/assets/info.2afe1049.png",i="/learn-wgpu-zh/assets/triangle.a62d2783.png",d="/learn-wgpu-zh/assets/run.84b2b947.png",g="/learn-wgpu-zh/assets/result.e293ca98.png",u="/learn-wgpu-zh/assets/fps.2666c991.png",m="/learn-wgpu-zh/assets/capture.b5f12af0.png",h="/learn-wgpu-zh/assets/debug_navigator.a853a4e1.png",D="/learn-wgpu-zh/assets/buffer_data.e88484b4.png",w=JSON.parse('{"title":"使用 Xcode 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/xcode/index.md","filePath":"integration-and-debugging/xcode/index.md","lastUpdated":1701933923000}'),F={name:"integration-and-debugging/xcode/index.md"},b=n("",36),y=[b];function _(f,C,P,v,A,x){return a(),s("div",null,y)}const M=e(F,[["render",_]]);export{w as __pageData,M as default}; diff --git a/assets/integration-and-debugging_xcode_index.md.8dd60914.js b/assets/integration-and-debugging_xcode_index.md.jb-X6rcJ.js similarity index 63% rename from assets/integration-and-debugging_xcode_index.md.8dd60914.js rename to assets/integration-and-debugging_xcode_index.md.jb-X6rcJ.js index cca21ff9f..4eaa2d4cf 100644 --- a/assets/integration-and-debugging_xcode_index.md.8dd60914.js +++ b/assets/integration-and-debugging_xcode_index.md.jb-X6rcJ.js @@ -1,9 +1,9 @@ -import{_ as e,o as a,c as s,S as n}from"./chunks/framework.adbf3c9e.js";const o="/learn-wgpu-zh/assets/create.2648e6ab.png",t="/learn-wgpu-zh/assets/template.3e66407c.png",r="/learn-wgpu-zh/assets/name.8d9431f3.png",l="/learn-wgpu-zh/assets/project.2c0f8089.png",p="/learn-wgpu-zh/assets/option.5b8443fb.png",c="/learn-wgpu-zh/assets/info.2afe1049.png",i="/learn-wgpu-zh/assets/triangle.a62d2783.png",d="/learn-wgpu-zh/assets/run.84b2b947.png",g="/learn-wgpu-zh/assets/result.e293ca98.png",u="/learn-wgpu-zh/assets/fps.2666c991.png",m="/learn-wgpu-zh/assets/capture.b5f12af0.png",h="/learn-wgpu-zh/assets/debug_navigator.a853a4e1.png",D="/learn-wgpu-zh/assets/buffer_data.e88484b4.png",w=JSON.parse('{"title":"使用 Xcode 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/xcode/index.md","filePath":"integration-and-debugging/xcode/index.md","lastUpdated":1701933923000}'),F={name:"integration-and-debugging/xcode/index.md"},b=n('

使用 Xcode 调试 wgpu 程序

Xcode 与 Metal

Xcode 是苹果官方提供的运行在 macOS 系统上的 IDE,它提供了一整套工具来方便为苹果设备(比如,iPad、iPhone、Mac 等等)创建、测试、优化 App 并最终发布到 App Store。Xcode 是免费的,如果你使用的是 macOS 系统,就可以从 App Store 上下载安装。

Metal 是 wgpu 在苹果平台上使用的图形后端,Metal 框架(Framework)通过提供低开销的底层 GPU 编程 API、图形和计算管线之间的紧密集成以及一套强大的 GPU 分析和调试工具,为苹果平台上的图形硬件加速提供动力。

2014 年,苹果在全球开发者大会 (WWDC) 上宣布为 iOS 推出全新的 Metal 框架, 一年后的 WWDC 2015,苹果宣布了 macOS 上也将支持 Metal,随后是 watchOS 和 tvOS。 随着 Metal API 的不断发展,在 WWDC 2017 上推出了新版本,Metal 2。它增加了对虚拟现实 (VR)、增强现实 (AR) 和机器学习 (ML) 的支持,以及其它许多新功能.

今年(WWDC 2022)推出的 Metal 3,引入了更为强大的功能,能帮助游戏和专业应用程序充分挖掘苹果芯片的潜力:借助高性能放大和抗锯齿(anti-aliasing)功能,能在更短的时间内渲染高分辨率的图形; 使用异步 I/O 将资源数据直接从存储优化流式传输到 Metal 纹理和缓冲区,能更快地加载资源; 新的光线追踪(Metal Ray Tracing)构建加速结构所花费的 GPU 时间更少,可以通过将剔除(Culling)工作转移到 GPU 以减少 CPU 开销,并且通过直接访问原始数据来优化光线求交和着色; 还有机器学习加速及新的网格着色器(Mesh Shader)等等。

创建调试项目

首先,我们打开 Xcode,使用菜单或启动对话框中的 Create a new Xcode project 来创建一个新项目(左图),然后单击 Other 选项卡, 选中外部构建系统(External Build System)作为项目模板(右图):

创建一个新的 Xcode 项目
选择项目模板

然后,在构建工具(Build Tool)字段中填入要使用的构建工具,Xcode 在运行项目时,将会调用此处设置的构建工具。 如果希望 Xcode 运行 cargo 构建命令,就可以填写 cargo 在你的 macOS 上的安装路径(默认的路径是 ${HOME}/.cargo/bin/cargo),也可以留空来表示跳过构建步骤,以避免 Xcode 为我们构建项目。

其余的字段实际上对我们来说并不重要,因此可以随意填,但产品名称(Product Name)字段还是尽量填一个有意义的名称吧:

构建工具设置

编辑 Scheme

接着编辑项目的方案(Scheme)来启用 Metal 的 GPU 帧捕获(Frame Capture)及 接口验证(API Validation)工具集。 通常,如果 Xcode 项目里的代码调用了 Metal 框架或任何其他使用了 Metal API 的框架,Xcode 会自动启用 GPU 帧捕获 和 Metal 接口验证,但我们的这个项目使用的是外部构建系统(External Build System),只能手动设置。

手动设置其实非常简单:

1,单击项目名称,然后单击 Edit Scheme(左图)(或者,从顶部菜单栏上选择 Product -> Scheme -> Edit Scheme);

2,在弹出的方案功能面板中选中左边栏的 Run,将右边的功能配置表切换至 Options 选项卡,设置 GPU Frame Capture 栏为 Metal 来启用 GPU 帧捕获(右图);

GPU Frame Capture 栏三个选项的详细说明:
  • Automatically:自动捕获项目中的 Metal 或 OpenGL ES API 使用情况。如果项目未链接到 Metal 或 OpenGL ES 框架,则 Capture GPU Frame 按钮不会显示在调试栏中。如果项目同时使用 Metal 和 OpenGL ES API,则可以单击并按住 Capture GPU Frame 按钮来选择要捕获的 API;
  • Metal:仅捕获项目中的 Metal API 使用情况;
  • Disabled:禁用 GPU 帧捕获功能;
Edit Scheme
GPU frame capture

3,在 Info 选项卡下的 executable 栏(左图),我们来指定要运行的可执行文件:单击可选项里的 Other,然后在目标目录中找到由 cargo 创建的二进制文件(右图)。

Info 选项卡
如何找到 cargo 创建的二进制可执行文件?

我们以管线教程的示例代码为例,先在项目根目录(learn-wgpu-zh/)运行管线示例:

cargo run --bin tutorial3-pipeline

然后在 learn-wgpu-zh/target/debug/ 路径下你就能找到一个名为 tutorial3-pipeline 的可执行文件。

接下来,点击 Start 按钮,Xcode 就能运行我们指定的二进制文件了:

你应该能看三角形绘制程序正在运行,并且 Xcode 控制台的一些输出告诉我们已启用了 Metal 接口验证:

查看实时 GPU 统计数据

仅需点击 Start 按钮运行我们要调试的程序,然后将 Xcode 左边栏切换至调试导航栏(Debug Navigator,通常 Xcode 会在调试项目启动时自动跳转到调试导航栏), 就能查看到实时的内存、CPU 占用及帧率(FPS)等,选中每一栏还可查看详情,以帧率栏为例,详情内还提供了 CPU 与 GPU 的每帧耗时,GPU 顶点片元运算单元的利用率等信息,方便我们诊断出程序的性能瓶颈之所在:

GPU 帧捕获

要启动 Metal 的调试器(Debugger),在点击 Start 按钮运行程序之后,需再点击 Xcode 调试区(Debug Area)工具栏的 Capture GPU Frame 按钮(上面有个 Metal 图标的按钮)。 捕获一帧之后,我们就能够使用所有常规的 Metal 调试工具(例如 GPU 统计、着色器及缓冲区调试等)了:

调试帧数据

我们以 Uniform 缓冲区 教程的示例为例来调试 Uniform 缓冲区中的数据: 捕获一帧之后,在调试导航栏选择 Render Pass 下的 All Resources 项,右边的列表里将会列出当前程序使用到的各种资源(纹理,缓冲区等)(左图),双击 Camera Buffer 就能格式化展示此缓冲区的数据了,同时数据展示区的下边会多出来一栏工具,方便我们切换数据的格式化类型及展示列数等(右图)。

Debug 导航栏
Camera 缓冲区的数据
不知你有没有注意到

左侧的调试导航栏中的 Render Pass 及右侧资源列表里展示的名称(如,Camera Buffer)都是我们在代码里设置的 labal 参数:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-    label: Some("Render Pass"),
-    // ...
-};
-// ...
-let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-    label: Some("Camera Buffer"),
-    // ...
-});
`,36),y=[b];function _(f,C,P,v,A,x){return a(),s("div",null,y)}const M=e(F,[["render",_]]);export{w as __pageData,M as default}; +import{_ as s,o as a,c as e,R as i}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/create.PI67xtCk.png",n="/learn-wgpu-zh/assets/template.ZaQEvDur.png",r="/learn-wgpu-zh/assets/name.E7OVs5to.png",l="/learn-wgpu-zh/assets/project.OJieJOtf.png",p="/learn-wgpu-zh/assets/option.tgsWiNAn.png",o="/learn-wgpu-zh/assets/info.zIhwR3lv.png",d="/learn-wgpu-zh/assets/triangle.swPHx-oi.png",h="/learn-wgpu-zh/assets/run.anexA9XQ.png",c="/learn-wgpu-zh/assets/result.Ny0pk3iA.png",g="/learn-wgpu-zh/assets/fps.KHtAZo6h.png",k="/learn-wgpu-zh/assets/capture.UPiFxBju.png",u="/learn-wgpu-zh/assets/debug_navigator.H_YJMyMQ.png",m="/learn-wgpu-zh/assets/buffer_data.6SFbM26w.png",D=JSON.parse('{"title":"使用 Xcode 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/xcode/index.md","filePath":"integration-and-debugging/xcode/index.md","lastUpdated":1703303099000}'),E={name:"integration-and-debugging/xcode/index.md"},b=i('

使用 Xcode 调试 wgpu 程序

Xcode 与 Metal

Xcode 是苹果官方提供的运行在 macOS 系统上的 IDE,它提供了一整套工具来方便为苹果设备(比如,iPad、iPhone、Mac 等等)创建、测试、优化 App 并最终发布到 App Store。Xcode 是免费的,如果你使用的是 macOS 系统,就可以从 App Store 上下载安装。

Metal 是 wgpu 在苹果平台上使用的图形后端,Metal 框架(Framework)通过提供低开销的底层 GPU 编程 API、图形和计算管线之间的紧密集成以及一套强大的 GPU 分析和调试工具,为苹果平台上的图形硬件加速提供动力。

2014 年,苹果在全球开发者大会 (WWDC) 上宣布为 iOS 推出全新的 Metal 框架, 一年后的 WWDC 2015,苹果宣布了 macOS 上也将支持 Metal,随后是 watchOS 和 tvOS。 随着 Metal API 的不断发展,在 WWDC 2017 上推出了新版本,Metal 2。它增加了对虚拟现实 (VR)、增强现实 (AR) 和机器学习 (ML) 的支持,以及其它许多新功能.

今年(WWDC 2022)推出的 Metal 3,引入了更为强大的功能,能帮助游戏和专业应用程序充分挖掘苹果芯片的潜力:借助高性能放大和抗锯齿(anti-aliasing)功能,能在更短的时间内渲染高分辨率的图形; 使用异步 I/O 将资源数据直接从存储优化流式传输到 Metal 纹理和缓冲区,能更快地加载资源; 新的光线追踪(Metal Ray Tracing)构建加速结构所花费的 GPU 时间更少,可以通过将剔除(Culling)工作转移到 GPU 以减少 CPU 开销,并且通过直接访问原始数据来优化光线求交和着色; 还有机器学习加速及新的网格着色器(Mesh Shader)等等。

创建调试项目

首先,我们打开 Xcode,使用菜单或启动对话框中的 Create a new Xcode project 来创建一个新项目(左图),然后单击 Other 选项卡, 选中外部构建系统(External Build System)作为项目模板(右图):

创建一个新的 Xcode 项目
选择项目模板

然后,在构建工具(Build Tool)字段中填入要使用的构建工具,Xcode 在运行项目时,将会调用此处设置的构建工具。 如果希望 Xcode 运行 cargo 构建命令,就可以填写 cargo 在你的 macOS 上的安装路径(默认的路径是 ${HOME}/.cargo/bin/cargo),也可以留空来表示跳过构建步骤,以避免 Xcode 为我们构建项目。

其余的字段实际上对我们来说并不重要,因此可以随意填,但产品名称(Product Name)字段还是尽量填一个有意义的名称吧:

构建工具设置

编辑 Scheme

接着编辑项目的方案(Scheme)来启用 Metal 的 GPU 帧捕获(Frame Capture)及 接口验证(API Validation)工具集。 通常,如果 Xcode 项目里的代码调用了 Metal 框架或任何其他使用了 Metal API 的框架,Xcode 会自动启用 GPU 帧捕获 和 Metal 接口验证,但我们的这个项目使用的是外部构建系统(External Build System),只能手动设置。

手动设置其实非常简单:

1,单击项目名称,然后单击 Edit Scheme(左图)(或者,从顶部菜单栏上选择 Product -> Scheme -> Edit Scheme);

2,在弹出的方案功能面板中选中左边栏的 Run,将右边的功能配置表切换至 Options 选项卡,设置 GPU Frame Capture 栏为 Metal 来启用 GPU 帧捕获(右图);

GPU Frame Capture 栏三个选项的详细说明:
  • Automatically:自动捕获项目中的 Metal 或 OpenGL ES API 使用情况。如果项目未链接到 Metal 或 OpenGL ES 框架,则 Capture GPU Frame 按钮不会显示在调试栏中。如果项目同时使用 Metal 和 OpenGL ES API,则可以单击并按住 Capture GPU Frame 按钮来选择要捕获的 API;
  • Metal:仅捕获项目中的 Metal API 使用情况;
  • Disabled:禁用 GPU 帧捕获功能;
Edit Scheme
GPU frame capture

3,在 Info 选项卡下的 executable 栏(左图),我们来指定要运行的可执行文件:单击可选项里的 Other,然后在目标目录中找到由 cargo 创建的二进制文件(右图)。

Info 选项卡
如何找到 cargo 创建的二进制可执行文件?

我们以管线教程的示例代码为例,先在项目根目录(learn-wgpu-zh/)运行管线示例:

cargo run --bin tutorial3-pipeline

然后在 learn-wgpu-zh/target/debug/ 路径下你就能找到一个名为 tutorial3-pipeline 的可执行文件。

接下来,点击 Start 按钮,Xcode 就能运行我们指定的二进制文件了:

你应该能看三角形绘制程序正在运行,并且 Xcode 控制台的一些输出告诉我们已启用了 Metal 接口验证:

查看实时 GPU 统计数据

仅需点击 Start 按钮运行我们要调试的程序,然后将 Xcode 左边栏切换至调试导航栏(Debug Navigator,通常 Xcode 会在调试项目启动时自动跳转到调试导航栏), 就能查看到实时的内存、CPU 占用及帧率(FPS)等,选中每一栏还可查看详情,以帧率栏为例,详情内还提供了 CPU 与 GPU 的每帧耗时,GPU 顶点片元运算单元的利用率等信息,方便我们诊断出程序的性能瓶颈之所在:

GPU 帧捕获

要启动 Metal 的调试器(Debugger),在点击 Start 按钮运行程序之后,需再点击 Xcode 调试区(Debug Area)工具栏的 Capture GPU Frame 按钮(上面有个 Metal 图标的按钮)。 捕获一帧之后,我们就能够使用所有常规的 Metal 调试工具(例如 GPU 统计、着色器及缓冲区调试等)了:

调试帧数据

我们以 Uniform 缓冲区 教程的示例为例来调试 Uniform 缓冲区中的数据: 捕获一帧之后,在调试导航栏选择 Render Pass 下的 All Resources 项,右边的列表里将会列出当前程序使用到的各种资源(纹理,缓冲区等)(左图),双击 Camera Buffer 就能格式化展示此缓冲区的数据了,同时数据展示区的下边会多出来一栏工具,方便我们切换数据的格式化类型及展示列数等(右图)。

Debug 导航栏
Camera 缓冲区的数据
不知你有没有注意到

左侧的调试导航栏中的 Render Pass 及右侧资源列表里展示的名称(如,Camera Buffer)都是我们在代码里设置的 labal 参数:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+    label: Some("Render Pass"),
+    // ...
+};
+// ...
+let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+    label: Some("Camera Buffer"),
+    // ...
+});
`,36),y=[b];function _(P,v,f,F,A,x){return a(),e("div",null,y)}const w=s(E,[["render",_]]);export{D as __pageData,w as default}; diff --git a/assets/integration-and-debugging_xcode_index.md.jb-X6rcJ.lean.js b/assets/integration-and-debugging_xcode_index.md.jb-X6rcJ.lean.js new file mode 100644 index 000000000..7f503ad22 --- /dev/null +++ b/assets/integration-and-debugging_xcode_index.md.jb-X6rcJ.lean.js @@ -0,0 +1 @@ +import{_ as s,o as a,c as e,R as i}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/create.PI67xtCk.png",n="/learn-wgpu-zh/assets/template.ZaQEvDur.png",r="/learn-wgpu-zh/assets/name.E7OVs5to.png",l="/learn-wgpu-zh/assets/project.OJieJOtf.png",p="/learn-wgpu-zh/assets/option.tgsWiNAn.png",o="/learn-wgpu-zh/assets/info.zIhwR3lv.png",d="/learn-wgpu-zh/assets/triangle.swPHx-oi.png",h="/learn-wgpu-zh/assets/run.anexA9XQ.png",c="/learn-wgpu-zh/assets/result.Ny0pk3iA.png",g="/learn-wgpu-zh/assets/fps.KHtAZo6h.png",k="/learn-wgpu-zh/assets/capture.UPiFxBju.png",u="/learn-wgpu-zh/assets/debug_navigator.H_YJMyMQ.png",m="/learn-wgpu-zh/assets/buffer_data.6SFbM26w.png",D=JSON.parse('{"title":"使用 Xcode 调试 wgpu 程序","description":"","frontmatter":{},"headers":[],"relativePath":"integration-and-debugging/xcode/index.md","filePath":"integration-and-debugging/xcode/index.md","lastUpdated":1703303099000}'),E={name:"integration-and-debugging/xcode/index.md"},b=i("",36),y=[b];function _(P,v,f,F,A,x){return a(),e("div",null,y)}const w=s(E,[["render",_]]);export{D as __pageData,w as default}; diff --git a/assets/inter-italic-cyrillic-ext.33bd5a8e.woff2 b/assets/inter-italic-cyrillic-ext.OVycGSDq.woff2 similarity index 100% rename from assets/inter-italic-cyrillic-ext.33bd5a8e.woff2 rename to assets/inter-italic-cyrillic-ext.OVycGSDq.woff2 diff --git a/assets/inter-italic-cyrillic.ea42a392.woff2 b/assets/inter-italic-cyrillic.-nLMcIwj.woff2 similarity index 100% rename from assets/inter-italic-cyrillic.ea42a392.woff2 rename to assets/inter-italic-cyrillic.-nLMcIwj.woff2 diff --git a/assets/inter-italic-greek-ext.4fbe9427.woff2 b/assets/inter-italic-greek-ext.hznxWNZO.woff2 similarity index 100% rename from assets/inter-italic-greek-ext.4fbe9427.woff2 rename to assets/inter-italic-greek-ext.hznxWNZO.woff2 diff --git a/assets/inter-italic-greek.8f4463c4.woff2 b/assets/inter-italic-greek.PSfer2Kc.woff2 similarity index 100% rename from assets/inter-italic-greek.8f4463c4.woff2 rename to assets/inter-italic-greek.PSfer2Kc.woff2 diff --git a/assets/inter-italic-latin-ext.bd8920cc.woff2 b/assets/inter-italic-latin-ext.RnFly65-.woff2 similarity index 100% rename from assets/inter-italic-latin-ext.bd8920cc.woff2 rename to assets/inter-italic-latin-ext.RnFly65-.woff2 diff --git a/assets/inter-italic-latin.bd3b6f56.woff2 b/assets/inter-italic-latin.27E69YJn.woff2 similarity index 100% rename from assets/inter-italic-latin.bd3b6f56.woff2 rename to assets/inter-italic-latin.27E69YJn.woff2 diff --git a/assets/inter-italic-vietnamese.6ce511fb.woff2 b/assets/inter-italic-vietnamese.xzQHe1q1.woff2 similarity index 100% rename from assets/inter-italic-vietnamese.6ce511fb.woff2 rename to assets/inter-italic-vietnamese.xzQHe1q1.woff2 diff --git a/assets/inter-roman-cyrillic-ext.e75737ce.woff2 b/assets/inter-roman-cyrillic-ext.8T9wMG5w.woff2 similarity index 100% rename from assets/inter-roman-cyrillic-ext.e75737ce.woff2 rename to assets/inter-roman-cyrillic-ext.8T9wMG5w.woff2 diff --git a/assets/inter-roman-cyrillic.5f2c6c8c.woff2 b/assets/inter-roman-cyrillic.jIZ9REo5.woff2 similarity index 100% rename from assets/inter-roman-cyrillic.5f2c6c8c.woff2 rename to assets/inter-roman-cyrillic.jIZ9REo5.woff2 diff --git a/assets/inter-roman-greek-ext.ab0619bc.woff2 b/assets/inter-roman-greek-ext.9JiNzaSO.woff2 similarity index 100% rename from assets/inter-roman-greek-ext.ab0619bc.woff2 rename to assets/inter-roman-greek-ext.9JiNzaSO.woff2 diff --git a/assets/inter-roman-greek.d5a6d92a.woff2 b/assets/inter-roman-greek.Cb5wWeGA.woff2 similarity index 100% rename from assets/inter-roman-greek.d5a6d92a.woff2 rename to assets/inter-roman-greek.Cb5wWeGA.woff2 diff --git a/assets/inter-roman-latin-ext.0030eebd.woff2 b/assets/inter-roman-latin-ext.GZWE-KO4.woff2 similarity index 100% rename from assets/inter-roman-latin-ext.0030eebd.woff2 rename to assets/inter-roman-latin-ext.GZWE-KO4.woff2 diff --git a/assets/inter-roman-latin.2ed14f66.woff2 b/assets/inter-roman-latin.bvIUbFQP.woff2 similarity index 100% rename from assets/inter-roman-latin.2ed14f66.woff2 rename to assets/inter-roman-latin.bvIUbFQP.woff2 diff --git a/assets/inter-roman-vietnamese.14ce25a6.woff2 b/assets/inter-roman-vietnamese.paY3CzEB.woff2 similarity index 100% rename from assets/inter-roman-vietnamese.14ce25a6.woff2 rename to assets/inter-roman-vietnamese.paY3CzEB.woff2 diff --git a/assets/intermediate_compute-pipeline_index.md.96e3a5dc.js b/assets/intermediate_compute-pipeline_index.md.96e3a5dc.js deleted file mode 100644 index 1526f4797..000000000 --- a/assets/intermediate_compute-pipeline_index.md.96e3a5dc.js +++ /dev/null @@ -1,65 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/workgroups.e0cdd028.png",c="/learn-wgpu-zh/assets/global_invocation_id.88d705bb.png",D="/learn-wgpu-zh/assets/local_invocation_id.2af66c96.png",_=JSON.parse('{"title":"计算管线","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/compute-pipeline/index.md","filePath":"intermediate/compute-pipeline/index.md","lastUpdated":1701933923000}'),F={name:"intermediate/compute-pipeline/index.md"},y=r(`

计算管线

计算管线ComputePipeline)是 WebGPU 中控制计算着色器(Compute Shader)阶段的管线。

计算管线适用于各种 GPU 通用计算场景,这是 WebGL 所不具备的。我们通过绑定的存储缓冲区(Storage Buffer)及存储纹理(Storage Texture)来获得计算输出。

创建一个计算管线

计算管线的创建相比于渲染管线简单得多,甚至我们都不需要显式创建并指定绑定组布局(BindGroupLayout):

rust
let compute_shader = device.create_shader_module(...);
-let compute_pipeline = device
-    .create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
-        layout: None, // 此处使用了隐式布局
-        module: &compute_shader,
-        entry_point: "cs_main",
-        label: None,
-    });

使用隐式绑定组布局有一个小小的局限:绑定的每个资源必须在入口点(Entry Point)中有被访问到。如果有没被访问的绑定资源,就必须显式指定布局。

使用存储缓冲区与存储纹理

存储缓冲区存储纹理都是 WGSL 中的资源类型。

而我们在 wgpu 中创建及绑定它们时,与其它缓冲区及纹理的创建是一样的,只需要在 usage 字段中标记出用途:

rust
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-        // ...
-        // VERTEX | STORAGE 表示此缓冲区可以做为顶点缓冲区以及存储缓冲区来使用
-        usage: BufferUsages::VERTEX | BufferUsages::STORAGE,
-    });
-
-let tex = app.device.create_texture(&wgpu::TextureDescriptor {
-        // ...
-        // TEXTURE_BINDING | STORAGE_BINDING 表示此纹理可以做为采样纹理以及存储纹理来使用
-        usage: TextureUsages::TEXTURE_BINDING | TextureUsages::STORAGE_BINDING,
-    });

在 WGSL 中,存储缓冲区存储纹理有一些使用上的区别:

  • 存储缓冲区:默认访问模式是只读,可以通过 read_write 声明为可读可写, 读写操作类似于数组的访问与赋值;
  • 存储纹理:默认访问模式是只写,而且在 Web 端只能用只写模式,在 Native 端我们可以使用 TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES feature 来打开可读可写的访问模式。还有,存储纹理必须明确声明纹素的格式,且不支持带 Srgb 后缀的格式。从这里可以查阅到所有受支持的格式: WGSL 标准:存储纹素格式
rust
struct Particle {
-  pos : vec2f,
-  vel : vec2f,
-};
-// 存储缓冲区
-@group(0) @binding(0) var<storage, read_write> particles: array<Particle>;
-@group(0) @binding(1) var from_tex: texture_2d<f32>;
-// 存储纹理
-@group(0) @binding(2) var to_tex: texture_storage_2d<rgba8unorm, write>;
-
-@compute @workgroup_size(16, 16)
-fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
-    let uv = vec2<i32>(global_id.xy);
-    // 读取存储缓冲区
-    let particle = particles[vu.x * uv.y];
-
-    var texel: vec4f;
-    // ...
-    // 写入纹素数据到存储纹理
-    textureStore(to_tex, uv, texel);
-}

计算通道

与创建渲染通道类似,我们需要使用 encoder 来创建计算通道ComputePass),然后用计算通道来编码所有的计算命令

rust
// let encoder = ...
-{
-    let mut cpass = encoder.begin_compute_pass(&ComputePassDescriptor::default());
-    cpass.set_pipeline(&self.compute_pipeline);
-    cpass.set_bind_group(0, &self.bind_group, &[]);
-    cpass.dispatch_workgroups(self.workgroup_count.0, self.workgroup_count.1, 1);
-}
-queue.submit(iter::once(encoder.finish()));

展示平面章节已讲解过 {} 开辟块空间的用途,这里就不再赘述。

dispatch_workgroups() 就是调度计算任务的命令,接受 3 个 u32 类型的值做为参数。这些参数具体是什么意思呢?那就要说到计算管线里最重要的一个概念了:

工作组

GPU 通过同时运行大量线程来实现并行处理的能力,而工作组(Workgroup)就是用于指定 GPU 如何组织这些线程。
一个工作组实质上就是一组调用,同一工作组中的线程同时分别执行一个计算着色器实例,并共享对工作组地址空间中着色器变量的访问。计算着色器通常被设计成线程相互独立运行,但线程在其工作组上进行协作也很常见。

可以将工作组理解为一个三维网格,网格中的每个点就代表一个线程。在图像处理中,网格通常是一个二维的线程矩阵(二维就是其中一个维度为 1 的特殊三维形式),代表整个图像,每个线程对应于正在处理的图像的一个像素。

我们通过(x, y, z)三个维度来声明计算着色器的工作组大小,每个维度上的默认值都是 1:

rust
@workgroup_size(32, 16) // x = 32, y = 16, z = 1

下图显示了正在被着色器处理的图像如何被划分为工作组,以及每个工作组是如何由单个线程组成:

工作组网格

那么,线程与工作组在计算着色器网格中的位置是如何被标识的呢?

内建输入量

WGSL 计算着色器有 5 个内建输入量(Buit-in Input Values)用于标识当前线程及工作组:

  • global_invocation_id:当前线程在计算着色器网格中的全局三维坐标;
  • local_invocation_id:当前线程在所处的工作组中的局部三维坐标;
  • local_invocation_index:当前线程在所处的工作组中的线性化索引;
  • workgroup_id:当前工作组在工作组网格中的三维坐标;
  • num_workgroups:当前调度(dispatch)的工作组维度量;

最常用的是前三个内建输入量

例如,给定一个由 16 * 16 * 1 个线程组成的网格,将其划分为 2 * 4 * 1 个工作组,8 * 4 * 1 个线程。 那么:

  • 一个线程在网格中的 global_invocation_id 全局三维坐标是 (9, 10)(左图);
  • 此线程在所处工作组中的 local_invocation_id 局部三维坐标是 (1, 2)local_invocation_index 线性化索引是 17(右图);
  • 所处工作组在工作组网格中的 workgroup_id 三维坐标就是 (1, 2) (右图的蓝绿色块):
global_invocation_id
local_invocation_id

这些内建输入量在着色器中具体如何使用呢?下边的着色器中演示了如何直接通过 global_invocation_id 来获取纹素的坐标:

rust
struct UniformParams {
-  img_size: vec2<i32>,
-  uv_offset: vec2<i32>,
-};
-@group(0) @binding(0) var<uniform> params: UniformParams;
-@group(0) @binding(1) var from_tex: texture_2d<f32>;
-
-@compute @workgroup_size(16, 16)
-fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
-  // 纹素(图片的像素)坐标
-  let uv = vec2<i32>(global_id.xy);
-  // 判断当前坐标是否超出了纹素坐标范围
-  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
-    return;
-  }
-
-  // 从纹理图像中读取纹素
-  var texel = textureLoad(from_tex, uv, 0)
-  // ...
-}

使用计算着色器需要注意避免坐标越界问题,因为通常纹理图像的分辨率与我们的工作组大小不是整除关系。

确定工作组大小与数量

工作组的最佳大小(指 x, y, z 三个维度的大小)并没有固定的值,需要结合实际使用场景来确定,而且能支持的最大值还与目标硬件有关。

我们从 adapter.limits() 里,能获取到当前设备支持的最大工作组大小 (maxComputeWorkgroupSizeX,maxComputeWorkgroupSizeY,maxComputeWorkgroupSizeZ),它们的默认值分别为 (256, 256, 64)

这三个维度的最大值容易让人误解,以为可以在计算着色器中设置 @workgroup_size(256, 256, 64)

事实上 WebGPU spec 的验证规则是:x * y * z <= max(x, max(y, z)), 也就是说,设置的 @workgroup_size 三个维度的乘积不能大于 maxComputeWorkgroupSizeX,Y,Z 三个维度中的最大值。

通常,当只需要在计算着色器中操作存储缓冲区时,使用一维工作组 @workgroup_size(x) 是合适的, y、z 维度保持默认值 1; 当需要操作纹理,使用二维或三维工作组 @workgroup_size(x,y) 会更便利。

现在我们可以来回答开头的问题了:

rust
cpass.dispatch_workgroups(workgroup_count.0, workgroup_count.1, workgroup_count.2);

上面计算通道的调度命令接收的参数具体是什么意思呢?

它们就是工作组网格的 3 个维度量。
1000 * 768 个线程组成的网格为例,假设工作组大小为@workgroup_size(32,16),那么:

rust
// 计算维度值并向上取整
-workgroup_count = ((1000 + (32 -1)) / 32, (768 + (16 -1)) / 16, 1);

CPU 端读取计算管线输出

案例展示/离屏渲染章节已讲解过如何从缓冲区中读取数据,存储纹理的读取也是一样的,这里不再赘述。

实战:实现高斯模糊

要掌握 WebGPU 计算管线,核心内容就是上边讲解的 “使用存储缓冲区与存储纹理” 及 “工作组”,某个具体实现的着色器逻辑代码并不重要,因为它们与顶点及片元着色器代码没有太大的区别。

点击下方的查看源码就能看到所有实现代码。如对图片模糊算法的细节感兴趣,可以查看这里

运行示例代码

此示例可在桌面端及 Chrome/Edge 113+、Chrome/Edge Canary、Firefox Nightly 浏览器中运行(如何开启浏览器 webgpu 试验功能)

`,54);function i(C,A,u,d,b,m){const a=s("WebGPUExample"),l=s("AutoGithubLink");return o(),e("div",null,[y,n(a,{example:"compute_pipeline"}),n(l)])}const h=p(F,[["render",i]]);export{_ as __pageData,h as default}; diff --git a/assets/intermediate_compute-pipeline_index.md.96e3a5dc.lean.js b/assets/intermediate_compute-pipeline_index.md.96e3a5dc.lean.js deleted file mode 100644 index 2980f6801..000000000 --- a/assets/intermediate_compute-pipeline_index.md.96e3a5dc.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/workgroups.e0cdd028.png",c="/learn-wgpu-zh/assets/global_invocation_id.88d705bb.png",D="/learn-wgpu-zh/assets/local_invocation_id.2af66c96.png",_=JSON.parse('{"title":"计算管线","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/compute-pipeline/index.md","filePath":"intermediate/compute-pipeline/index.md","lastUpdated":1701933923000}'),F={name:"intermediate/compute-pipeline/index.md"},y=r("",54);function i(C,A,u,d,b,m){const a=s("WebGPUExample"),l=s("AutoGithubLink");return o(),e("div",null,[y,n(a,{example:"compute_pipeline"}),n(l)])}const h=p(F,[["render",i]]);export{_ as __pageData,h as default}; diff --git a/assets/intermediate_compute-pipeline_index.md.I_Ricl1V.js b/assets/intermediate_compute-pipeline_index.md.I_Ricl1V.js new file mode 100644 index 000000000..27f682ff4 --- /dev/null +++ b/assets/intermediate_compute-pipeline_index.md.I_Ricl1V.js @@ -0,0 +1,65 @@ +import{_ as p,D as s,o as l,c as h,I as i,R as t}from"./chunks/framework.bMtwhlie.js";const k="/learn-wgpu-zh/assets/workgroups.ZXZigntI.png",e="/learn-wgpu-zh/assets/global_invocation_id.WloB-8Kb.png",r="/learn-wgpu-zh/assets/local_invocation_id.JaKnNG8K.png",C=JSON.parse('{"title":"计算管线","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/compute-pipeline/index.md","filePath":"intermediate/compute-pipeline/index.md","lastUpdated":1703303099000}'),d={name:"intermediate/compute-pipeline/index.md"},E=t(`

计算管线

计算管线ComputePipeline)是 WebGPU 中控制计算着色器(Compute Shader)阶段的管线。

计算管线适用于各种 GPU 通用计算场景,这是 WebGL 所不具备的。我们通过绑定的存储缓冲区(Storage Buffer)及存储纹理(Storage Texture)来获得计算输出。

创建一个计算管线

计算管线的创建相比于渲染管线简单得多,甚至我们都不需要显式创建并指定绑定组布局(BindGroupLayout):

rust
let compute_shader = device.create_shader_module(...);
+let compute_pipeline = device
+    .create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
+        layout: None, // 此处使用了隐式布局
+        module: &compute_shader,
+        entry_point: "cs_main",
+        label: None,
+    });

使用隐式绑定组布局有一个小小的局限:绑定的每个资源必须在入口点(Entry Point)中有被访问到。如果有没被访问的绑定资源,就必须显式指定布局。

使用存储缓冲区与存储纹理

存储缓冲区存储纹理都是 WGSL 中的资源类型。

而我们在 wgpu 中创建及绑定它们时,与其它缓冲区及纹理的创建是一样的,只需要在 usage 字段中标记出用途:

rust
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+        // ...
+        // VERTEX | STORAGE 表示此缓冲区可以做为顶点缓冲区以及存储缓冲区来使用
+        usage: BufferUsages::VERTEX | BufferUsages::STORAGE,
+    });
+
+let tex = app.device.create_texture(&wgpu::TextureDescriptor {
+        // ...
+        // TEXTURE_BINDING | STORAGE_BINDING 表示此纹理可以做为采样纹理以及存储纹理来使用
+        usage: TextureUsages::TEXTURE_BINDING | TextureUsages::STORAGE_BINDING,
+    });

在 WGSL 中,存储缓冲区存储纹理有一些使用上的区别:

  • 存储缓冲区:默认访问模式是只读,可以通过 read_write 声明为可读可写, 读写操作类似于数组的访问与赋值;
  • 存储纹理:默认访问模式是只写,而且在 Web 端只能用只写模式,在 Native 端我们可以使用 TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES feature 来打开可读可写的访问模式。还有,存储纹理必须明确声明纹素的格式,且不支持带 Srgb 后缀的格式。从这里可以查阅到所有受支持的格式: WGSL 标准:存储纹素格式
rust
struct Particle {
+  pos : vec2f,
+  vel : vec2f,
+};
+// 存储缓冲区
+@group(0) @binding(0) var<storage, read_write> particles: array<Particle>;
+@group(0) @binding(1) var from_tex: texture_2d<f32>;
+// 存储纹理
+@group(0) @binding(2) var to_tex: texture_storage_2d<rgba8unorm, write>;
+
+@compute @workgroup_size(16, 16)
+fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
+    let uv = vec2<i32>(global_id.xy);
+    // 读取存储缓冲区
+    let particle = particles[vu.x * uv.y];
+
+    var texel: vec4f;
+    // ...
+    // 写入纹素数据到存储纹理
+    textureStore(to_tex, uv, texel);
+}

计算通道

与创建渲染通道类似,我们需要使用 encoder 来创建计算通道ComputePass),然后用计算通道来编码所有的计算命令

rust
// let encoder = ...
+{
+    let mut cpass = encoder.begin_compute_pass(&ComputePassDescriptor::default());
+    cpass.set_pipeline(&self.compute_pipeline);
+    cpass.set_bind_group(0, &self.bind_group, &[]);
+    cpass.dispatch_workgroups(self.workgroup_count.0, self.workgroup_count.1, 1);
+}
+queue.submit(iter::once(encoder.finish()));

展示平面章节已讲解过 {} 开辟块空间的用途,这里就不再赘述。

dispatch_workgroups() 就是调度计算任务的命令,接受 3 个 u32 类型的值做为参数。这些参数具体是什么意思呢?那就要说到计算管线里最重要的一个概念了:

工作组

GPU 通过同时运行大量线程来实现并行处理的能力,而工作组(Workgroup)就是用于指定 GPU 如何组织这些线程。
一个工作组实质上就是一组调用,同一工作组中的线程同时分别执行一个计算着色器实例,并共享对工作组地址空间中着色器变量的访问。计算着色器通常被设计成线程相互独立运行,但线程在其工作组上进行协作也很常见。

可以将工作组理解为一个三维网格,网格中的每个点就代表一个线程。在图像处理中,网格通常是一个二维的线程矩阵(二维就是其中一个维度为 1 的特殊三维形式),代表整个图像,每个线程对应于正在处理的图像的一个像素。

我们通过(x, y, z)三个维度来声明计算着色器的工作组大小,每个维度上的默认值都是 1:

rust
@workgroup_size(32, 16) // x = 32, y = 16, z = 1

下图显示了正在被着色器处理的图像如何被划分为工作组,以及每个工作组是如何由单个线程组成:

工作组网格

那么,线程与工作组在计算着色器网格中的位置是如何被标识的呢?

内建输入量

WGSL 计算着色器有 5 个内建输入量(Buit-in Input Values)用于标识当前线程及工作组:

  • global_invocation_id:当前线程在计算着色器网格中的全局三维坐标;
  • local_invocation_id:当前线程在所处的工作组中的局部三维坐标;
  • local_invocation_index:当前线程在所处的工作组中的线性化索引;
  • workgroup_id:当前工作组在工作组网格中的三维坐标;
  • num_workgroups:当前调度(dispatch)的工作组维度量;

最常用的是前三个内建输入量

例如,给定一个由 16 * 16 * 1 个线程组成的网格,将其划分为 2 * 4 * 1 个工作组,8 * 4 * 1 个线程。 那么:

  • 一个线程在网格中的 global_invocation_id 全局三维坐标是 (9, 10)(左图);
  • 此线程在所处工作组中的 local_invocation_id 局部三维坐标是 (1, 2)local_invocation_index 线性化索引是 17(右图);
  • 所处工作组在工作组网格中的 workgroup_id 三维坐标就是 (1, 2) (右图的蓝绿色块):
global_invocation_id
local_invocation_id

这些内建输入量在着色器中具体如何使用呢?下边的着色器中演示了如何直接通过 global_invocation_id 来获取纹素的坐标:

rust
struct UniformParams {
+  img_size: vec2<i32>,
+  uv_offset: vec2<i32>,
+};
+@group(0) @binding(0) var<uniform> params: UniformParams;
+@group(0) @binding(1) var from_tex: texture_2d<f32>;
+
+@compute @workgroup_size(16, 16)
+fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
+  // 纹素(图片的像素)坐标
+  let uv = vec2<i32>(global_id.xy);
+  // 判断当前坐标是否超出了纹素坐标范围
+  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
+    return;
+  }
+
+  // 从纹理图像中读取纹素
+  var texel = textureLoad(from_tex, uv, 0)
+  // ...
+}

使用计算着色器需要注意避免坐标越界问题,因为通常纹理图像的分辨率与我们的工作组大小不是整除关系。

确定工作组大小与数量

工作组的最佳大小(指 x, y, z 三个维度的大小)并没有固定的值,需要结合实际使用场景来确定,而且能支持的最大值还与目标硬件有关。

我们从 adapter.limits() 里,能获取到当前设备支持的最大工作组大小 (maxComputeWorkgroupSizeX,maxComputeWorkgroupSizeY,maxComputeWorkgroupSizeZ),它们的默认值分别为 (256, 256, 64)

这三个维度的最大值容易让人误解,以为可以在计算着色器中设置 @workgroup_size(256, 256, 64)

事实上 WebGPU spec 的验证规则是:x * y * z <= max(x, max(y, z)), 也就是说,设置的 @workgroup_size 三个维度的乘积不能大于 maxComputeWorkgroupSizeX,Y,Z 三个维度中的最大值。

通常,当只需要在计算着色器中操作存储缓冲区时,使用一维工作组 @workgroup_size(x) 是合适的, y、z 维度保持默认值 1; 当需要操作纹理,使用二维或三维工作组 @workgroup_size(x,y) 会更便利。

现在我们可以来回答开头的问题了:

rust
cpass.dispatch_workgroups(workgroup_count.0, workgroup_count.1, workgroup_count.2);

上面计算通道的调度命令接收的参数具体是什么意思呢?

它们就是工作组网格的 3 个维度量。
1000 * 768 个线程组成的网格为例,假设工作组大小为@workgroup_size(32,16),那么:

rust
// 计算维度值并向上取整
+workgroup_count = ((1000 + (32 -1)) / 32, (768 + (16 -1)) / 16, 1);

CPU 端读取计算管线输出

案例展示/离屏渲染章节已讲解过如何从缓冲区中读取数据,存储纹理的读取也是一样的,这里不再赘述。

实战:实现高斯模糊

要掌握 WebGPU 计算管线,核心内容就是上边讲解的 “使用存储缓冲区与存储纹理” 及 “工作组”,某个具体实现的着色器逻辑代码并不重要,因为它们与顶点及片元着色器代码没有太大的区别。

点击下方的查看源码就能看到所有实现代码。如对图片模糊算法的细节感兴趣,可以查看这里

运行示例代码

此示例可在桌面端及 Chrome/Edge 113+、Chrome/Edge Canary、Firefox Nightly 浏览器中运行(如何开启浏览器 webgpu 试验功能)

`,54);function g(o,y,c,F,u,b){const a=s("WebGPUExample"),n=s("AutoGithubLink");return l(),h("div",null,[E,i(a,{example:"compute_pipeline"}),i(n)])}const A=p(d,[["render",g]]);export{C as __pageData,A as default}; diff --git a/assets/intermediate_compute-pipeline_index.md.I_Ricl1V.lean.js b/assets/intermediate_compute-pipeline_index.md.I_Ricl1V.lean.js new file mode 100644 index 000000000..62ee9154c --- /dev/null +++ b/assets/intermediate_compute-pipeline_index.md.I_Ricl1V.lean.js @@ -0,0 +1 @@ +import{_ as p,D as s,o as l,c as h,I as i,R as t}from"./chunks/framework.bMtwhlie.js";const k="/learn-wgpu-zh/assets/workgroups.ZXZigntI.png",e="/learn-wgpu-zh/assets/global_invocation_id.WloB-8Kb.png",r="/learn-wgpu-zh/assets/local_invocation_id.JaKnNG8K.png",C=JSON.parse('{"title":"计算管线","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/compute-pipeline/index.md","filePath":"intermediate/compute-pipeline/index.md","lastUpdated":1703303099000}'),d={name:"intermediate/compute-pipeline/index.md"},E=t("",54);function g(o,y,c,F,u,b){const a=s("WebGPUExample"),n=s("AutoGithubLink");return l(),h("div",null,[E,i(a,{example:"compute_pipeline"}),i(n)])}const A=p(d,[["render",g]]);export{C as __pageData,A as default}; diff --git a/assets/intermediate_pbr-notes.md.fc667d08.js b/assets/intermediate_pbr-notes.md.TZH05UCG.js similarity index 81% rename from assets/intermediate_pbr-notes.md.fc667d08.js rename to assets/intermediate_pbr-notes.md.TZH05UCG.js index 770d7e7e5..2f1afd3f3 100644 --- a/assets/intermediate_pbr-notes.md.fc667d08.js +++ b/assets/intermediate_pbr-notes.md.TZH05UCG.js @@ -1 +1 @@ -import{_ as t,o as r,c as s,k as e,a}from"./chunks/framework.adbf3c9e.js";const b=JSON.parse('{"title":"Sources","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/pbr-notes.md","filePath":"intermediate/pbr-notes.md","lastUpdated":1701933923000}'),n={name:"intermediate/pbr-notes.md"},o=e("h1",{id:"sources",tabindex:"-1"},[a("Sources "),e("a",{class:"header-anchor",href:"#sources","aria-label":'Permalink to "Sources"'},"​")],-1),i=e("ul",null,[e("li",null,[e("a",{href:"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading",target:"_blank",rel:"noreferrer"},"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading")]),e("li",null,[e("a",{href:"http://jimmiejohnsson84.me/pages/rendering_pbr.html",target:"_blank",rel:"noreferrer"},"http://jimmiejohnsson84.me/pages/rendering_pbr.html")])],-1),c=[o,i];function d(l,h,p,m,_,u){return r(),s("div",null,c)}const g=t(n,[["render",d]]);export{b as __pageData,g as default}; +import{_ as t,o as r,c as s,k as e,a}from"./chunks/framework.bMtwhlie.js";const b=JSON.parse('{"title":"Sources","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/pbr-notes.md","filePath":"intermediate/pbr-notes.md","lastUpdated":1703303099000}'),n={name:"intermediate/pbr-notes.md"},o=e("h1",{id:"sources",tabindex:"-1"},[a("Sources "),e("a",{class:"header-anchor",href:"#sources","aria-label":'Permalink to "Sources"'},"​")],-1),i=e("ul",null,[e("li",null,[e("a",{href:"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading",target:"_blank",rel:"noreferrer"},"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading")]),e("li",null,[e("a",{href:"http://jimmiejohnsson84.me/pages/rendering_pbr.html",target:"_blank",rel:"noreferrer"},"http://jimmiejohnsson84.me/pages/rendering_pbr.html")])],-1),c=[o,i];function d(l,h,p,m,_,u){return r(),s("div",null,c)}const g=t(n,[["render",d]]);export{b as __pageData,g as default}; diff --git a/assets/intermediate_pbr-notes.md.fc667d08.lean.js b/assets/intermediate_pbr-notes.md.TZH05UCG.lean.js similarity index 81% rename from assets/intermediate_pbr-notes.md.fc667d08.lean.js rename to assets/intermediate_pbr-notes.md.TZH05UCG.lean.js index 770d7e7e5..2f1afd3f3 100644 --- a/assets/intermediate_pbr-notes.md.fc667d08.lean.js +++ b/assets/intermediate_pbr-notes.md.TZH05UCG.lean.js @@ -1 +1 @@ -import{_ as t,o as r,c as s,k as e,a}from"./chunks/framework.adbf3c9e.js";const b=JSON.parse('{"title":"Sources","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/pbr-notes.md","filePath":"intermediate/pbr-notes.md","lastUpdated":1701933923000}'),n={name:"intermediate/pbr-notes.md"},o=e("h1",{id:"sources",tabindex:"-1"},[a("Sources "),e("a",{class:"header-anchor",href:"#sources","aria-label":'Permalink to "Sources"'},"​")],-1),i=e("ul",null,[e("li",null,[e("a",{href:"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading",target:"_blank",rel:"noreferrer"},"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading")]),e("li",null,[e("a",{href:"http://jimmiejohnsson84.me/pages/rendering_pbr.html",target:"_blank",rel:"noreferrer"},"http://jimmiejohnsson84.me/pages/rendering_pbr.html")])],-1),c=[o,i];function d(l,h,p,m,_,u){return r(),s("div",null,c)}const g=t(n,[["render",d]]);export{b as __pageData,g as default}; +import{_ as t,o as r,c as s,k as e,a}from"./chunks/framework.bMtwhlie.js";const b=JSON.parse('{"title":"Sources","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/pbr-notes.md","filePath":"intermediate/pbr-notes.md","lastUpdated":1703303099000}'),n={name:"intermediate/pbr-notes.md"},o=e("h1",{id:"sources",tabindex:"-1"},[a("Sources "),e("a",{class:"header-anchor",href:"#sources","aria-label":'Permalink to "Sources"'},"​")],-1),i=e("ul",null,[e("li",null,[e("a",{href:"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading",target:"_blank",rel:"noreferrer"},"https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/diffuse-lambertian-shading")]),e("li",null,[e("a",{href:"http://jimmiejohnsson84.me/pages/rendering_pbr.html",target:"_blank",rel:"noreferrer"},"http://jimmiejohnsson84.me/pages/rendering_pbr.html")])],-1),c=[o,i];function d(l,h,p,m,_,u){return r(),s("div",null,c)}const g=t(n,[["render",d]]);export{b as __pageData,g as default}; diff --git a/assets/intermediate_tutorial10-lighting_index.md.bf469bcb.js b/assets/intermediate_tutorial10-lighting_index.md.bf469bcb.js deleted file mode 100644 index 06dba724e..000000000 --- a/assets/intermediate_tutorial10-lighting_index.md.bf469bcb.js +++ /dev/null @@ -1,575 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/light-in-scene.630f9dca.png",t="/learn-wgpu-zh/assets/ambient_lighting.d9ea7212.png",F="/learn-wgpu-zh/assets/normal_diagram.35def195.png",D="/learn-wgpu-zh/assets/ambient_diffuse_wrong.a2837930.png",y="/learn-wgpu-zh/assets/diffuse_wrong.70c1d359.png",C="/learn-wgpu-zh/assets/normal_not_rotated.7327fe66.png",A="/learn-wgpu-zh/assets/diffuse_right.33406428.png",i="/learn-wgpu-zh/assets/ambient_diffuse_lighting.14acf617.png",b="/learn-wgpu-zh/assets/normal-scale-issue.0959834f.png",u="/learn-wgpu-zh/assets/specular_diagram.417ae155.png",m="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.4ab44d3e.png",B="/learn-wgpu-zh/assets/specular_lighting.8a1c656e.png",d="/learn-wgpu-zh/assets/half_dir.96da210f.png",V=JSON.parse('{"title":"光照","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial10-lighting/index.md","filePath":"intermediate/tutorial10-lighting/index.md","lastUpdated":1701933923000}'),g={name:"intermediate/tutorial10-lighting/index.md"},_=r(`

光照

虽然我们的场景是 3D 的,但它们看起来像是平的,对象表面缺乏现实光照环境中的明暗变化,所以无法体现模型的三维特性。这是因为我们的模型没有考虑光线和对象表面之间的相互作用,无论如何摆放都会保持着相同的着色。

如果想修正这一点,就需要在我们的场景中添加光照(Lighting)。

在现实世界中,光源发出的光子会四处反射,最后进入我们的眼睛。 当观察对象上的一点时,我们所看到的颜色取决于多个光源和多个反射表面之间的多次相互作用。

在计算机图形学领域,为单个光子建模的计算成本极高。一个 100 瓦的灯泡每秒钟发出大约 3.27×10^20 个光子,再试想一下太阳每秒发出的光子的数量级。为了解决这个问题,我们要用数学来 “作弊”(也就是模拟。严格来说,这不是作弊,计算机图形学里有这么一句名言:"If it looks right, it is right.", 意思就是,如果它看起来是对的,那么它就是对的)。

我们来看看计算机图形学里常用的几个光照模型。

光线/路径追踪

光线/路径追踪(Ray/Path tracing)以虛拟摄像机模型为基础,但是对于每条与某个三角形相交的投影线,在计算光源对交点处明暗值的直接贡献之前,还要确定是否有一个或者多个光源能够照射到这个交点。

它是最接近光的真实工作方式的模型,所以我觉得必须提到它。但这是一个高级话题,我们不会在这里深入讨论。

Blinn-Phong 反射模型

对于大多数实时(real-time)应用来说,光线/路径追踪的计算成本十在太高了(尽管这种情况已经开始改变),所以通常使用一种更有效的,精度较低的 Phong 反射模型 来解决光照问题。它考虑了光线与材质的 3 种相互作用:环境光反射、漫反射和镜面反射。我们将学习 Blinn-Phong 反射模型,它能加速镜面反射的计算。

在开始学习之前,需要在我们的场景中添加一个光源:

rust
// lib.rs
-#[repr(C)]
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct LightUniform {
-    position: [f32; 3],
-    // 由于 Uniform 需要字段按 16 字节对齐,我们需要在这里使用一个填充字段
-    _padding: u32,
-    color: [f32; 3],
-    _padding2: u32,
-}

LightUniform 代表空间中的一个彩色点光源。虽然通常是使用纯白色的光,但使用其它颜色的光也是可以的。

使 WGSL 结构体内存字节对齐的经验法则是:字段保持按 2 的 N 次幂来对齐。 例如,一个 vec3 如果是 3 个单精度浮点数,它的大小为 12 字节,对齐后将被提升到 2 的下一个次幂,即 16 字节. 这意味着必须更加小心地布局你的结构体。

一些开发者会选择使用 vec4 而不是 vec3 来避免对齐问题。 你可以在 wgsl spec 中了解更多关于对齐规则的信息。

接下来,创建一个 Uniform 缓冲区来存储我们的光源:

rust
let light_uniform = LightUniform {
-    position: [2.0, 2.0, 2.0],
-    _padding: 0,
-    color: [1.0, 1.0, 1.0],
-    _padding2: 0,
-};
-
- // 我们希望能更新光源位置,所以用了 COPY_DST 这个使用范围标志
-let light_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Light VB"),
-        contents: bytemuck::cast_slice(&[light_uniform]),
-        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
-    }
-);

别忘记把 light_uniformlight_buffer 添加到 State。之后,我们为光源创建一个绑定组的布局绑定组

rust
let light_bind_group_layout =
-    device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-        entries: &[wgpu::BindGroupLayoutEntry {
-            binding: 0,
-            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
-            ty: wgpu::BindingType::Buffer {
-                ty: wgpu::BufferBindingType::Uniform,
-                has_dynamic_offset: false,
-                min_binding_size: None,
-            },
-            count: None,
-        }],
-        label: None,
-    });
-
-let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-    layout: &light_bind_group_layout,
-    entries: &[wgpu::BindGroupEntry {
-        binding: 0,
-        resource: light_buffer.as_entire_binding(),
-    }],
-    label: None,
-});

把它们添加到 State 中,同时更新 render_pipeline_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-    bind_group_layouts: &[
-        &texture_bind_group_layout,
-        &camera_bind_group_layout,
-        &light_bind_group_layout,
-    ],
-});

update() 函数中更新光源的位置,这样便能看到对象在不同角度下的光照效果:

rust
// 更新光源
-let old_position = glam::Vec3::from_array(self.light_uniform.position);
-self.light_uniform.position =
-    (glam::Quat::from_axis_angle(glam::Vec3::Y, consts::PI / 180.)
-        * old_position).into();
-self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light_uniform]));

上面的代码使光源围绕原点以每 1 度的速度旋转。

查看光源

出于调试的目的,如果能够查看光源本身的位置,以确保场景的光照效果是正确的,那就太好了。

尽管可以直接调整现有的渲染管线来绘制光源,但这可能不利于代码的维护。所以我们把创建渲染管线的代码提取到一个叫做 create_render_pipeline() 的新函数中:

rust
fn create_render_pipeline(
-    device: &wgpu::Device,
-    layout: &wgpu::PipelineLayout,
-    color_format: wgpu::TextureFormat,
-    depth_format: Option<wgpu::TextureFormat>,
-    vertex_layouts: &[wgpu::VertexBufferLayout],
-    shader: wgpu::ShaderModuleDescriptor,
-) -> wgpu::RenderPipeline {
-    let shader = device.create_shader_module(shader);
-
-    device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-        label: Some("Render Pipeline"),
-        layout: Some(layout),
-        vertex: wgpu::VertexState {
-            module: &shader,
-            entry_point: "vs_main",
-            buffers: vertex_layouts,
-        },
-        fragment: Some(wgpu::FragmentState {
-            module: &shader,
-            entry_point: "fs_main",
-            targets: &[Some(wgpu::ColorTargetState {
-                format: color_format,
-                blend: Some(wgpu::BlendState {
-                    alpha: wgpu::BlendComponent::REPLACE,
-                    color: wgpu::BlendComponent::REPLACE,
-                }),
-                write_mask: wgpu::ColorWrites::ALL,
-            })],
-        }),
-        primitive: wgpu::PrimitiveState {
-            topology: wgpu::PrimitiveTopology::TriangleList,
-            strip_index_format: None,
-            front_face: wgpu::FrontFace::Ccw,
-            cull_mode: Some(wgpu::Face::Back),
-            // 此处设置为 Fill 以外的任何值都需要开启 Feature::NON_FILL_POLYGON_MODE
-            polygon_mode: wgpu::PolygonMode::Fill,
-            unclipped_depth: false,
-            conservative: false,
-        },
-        depth_stencil: depth_format.map(|format| wgpu::DepthStencilState {
-            format,
-            depth_write_enabled: true,
-            depth_compare: wgpu::CompareFunction::Less,
-            stencil: wgpu::StencilState::default(),
-            bias: wgpu::DepthBiasState::default(),
-        }),
-        multisample: wgpu::MultisampleState {
-            count: 1,
-            mask: !0,
-            alpha_to_coverage_enabled: false,
-        },
-    })
-}

修改 State::new() 中的代码来调用 create_render_pipeline 函数:

rust
let render_pipeline = {
-    let shader = wgpu::ShaderModuleDescriptor {
-        label: Some("Normal Shader"),
-        source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
-    };
-    create_render_pipeline(
-        &device,
-        &render_pipeline_layout,
-        config.format,
-        Some(texture::Texture::DEPTH_FORMAT),
-        &[model::ModelVertex::desc(), InstanceRaw::desc()],
-        shader,
-    )
-};

修改 model::DrawModel 以使用 light_bind_group

rust
// model.rs
-pub trait DrawModel<'a> {
-    fn draw_mesh(
-        &mut self,
-        mesh: &'a Mesh,
-        material: &'a Material,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        material: &'a Material,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-
-    fn draw_model(
-        &mut self,
-        model: &'a Model,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_model_instanced(
-        &mut self,
-        model: &'a Model,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_mesh(
-        &mut self,
-        mesh: &'b Mesh,
-        material: &'b Material,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group, light_bind_group);
-    }
-
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        material: &'b Material,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.set_bind_group(0, &material.bind_group, &[]);
-        self.set_bind_group(1, camera_bind_group, &[]);
-        self.set_bind_group(2, light_bind_group, &[]);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-
-    fn draw_model(
-        &mut self,
-        model: &'b Model,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
-    }
-
-    fn draw_model_instanced(
-        &mut self,
-        model: &'b Model,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            let material = &model.materials[mesh.material];
-            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
-        }
-    }
-}

完成这些后,就可以为我们的光源创建另一条渲染管线了:

rust
// lib.rs
-let light_render_pipeline = {
-    let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-        label: Some("Light Pipeline Layout"),
-        bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout],
-        push_constant_ranges: &[],
-    });
-    let shader = wgpu::ShaderModuleDescriptor {
-        label: Some("Light Shader"),
-        source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()),
-    };
-    create_render_pipeline(
-        &device,
-        &layout,
-        config.format,
-        Some(texture::Texture::DEPTH_FORMAT),
-        &[model::ModelVertex::desc()],
-        shader,
-    )
-};

我选择为 light_render_pipeline 创建一个单独的布局,因为它不需要常规渲染管线所需要的资源(主要是纹理)。

之后,我们来编写实际的着色器代码:

rust
// light.wgsl
-// 顶点着色器
-
-struct Camera {
-    view_proj: mat4x4f,
-}
-@group(0) @binding(0)
-var<uniform> camera: Camera;
-
-struct Light {
-    position: vec3f,
-    color: vec3f,
-}
-@group(1) @binding(0)
-var<uniform> light: Light;
-
-struct VertexInput {
-    @location(0) position: vec3f,
-};
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) color: vec3f,
-};
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    let scale = 0.25;
-    var out: VertexOutput;
-    out.clip_position = camera.view_proj * vec4f(model.position * scale + light.position, 1.0);
-    out.color = light.color;
-    return out;
-}
-
-// 片元着色器
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(in.color, 1.0);
-}

现在就能在 render() 函数中手动实现光源的绘制代码了,但是为了保持之前开发的绘制模式,让我们来创建一个名为 DrawLight 的新 trait:

rust
// model.rs
-pub trait DrawLight<'a> {
-    fn draw_light_mesh(
-        &mut self,
-        mesh: &'a Mesh,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_light_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-
-    fn draw_light_model(
-        &mut self,
-        model: &'a Model,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_light_model_instanced(
-        &mut self,
-        model: &'a Model,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawLight<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_light_mesh(
-        &mut self,
-        mesh: &'b Mesh,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_light_mesh_instanced(mesh, 0..1, camera_bind_group, light_bind_group);
-    }
-
-    fn draw_light_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.set_bind_group(0, camera_bind_group, &[]);
-        self.set_bind_group(1, light_bind_group, &[]);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-
-    fn draw_light_model(
-        &mut self,
-        model: &'b Model,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_light_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
-    }
-    fn draw_light_model_instanced(
-        &mut self,
-        model: &'b Model,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            self.draw_light_mesh_instanced(mesh, instances.clone(), camera_bind_group, light_bind_group);
-        }
-    }
-}

最后,在渲染通道中加入光源的渲染:

rust
impl State {
-    // ...
-   fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
-        // ...
-        render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-
-        use crate::model::DrawLight; // 新增!
-        render_pass.set_pipeline(&self.light_render_pipeline); // 新增!
-        render_pass.draw_light_model(
-            &self.obj_model,
-            &self.camera_bind_group,
-            &self.light_bind_group,
-        ); // 新增!
-
-        render_pass.set_pipeline(&self.render_pipeline);
-        render_pass.draw_model_instanced(
-            &self.obj_model,
-            0..self.instances.len() as u32,
-            &self.camera_bind_group,
-            &self.light_bind_group, // 新增
-        );
-}

完成上面这些后,我们将看到如下渲染效果:

./light-in-scene.png

环境光反射

现实世界中,光线在进入我们的眼睛之前往往在物体表面之间经历了多次反射。这就是为什么你能看见阴影区域的东西。在计算机上实现这种互动模型很昂贵,所以需要“作弊”(模拟)。

环境光反射(Ambient Reflection)定义了对象表面所有点的环境光强度相同,代表从场景的其他部分反射过来的光照亮我们的对象。 环境光反射值 = 光源颜色 _ 环境光强度 _ 片元的颜色。

请在 shader.wgsl 中的纹理 Uniform 之下添加以下代码:

rust
struct Light {
-    position: vec3f,
-    color: vec3f,
-}
-@group(2) @binding(0)
-var<uniform> light: Light;

然后更新片元色器代码来计算和使用环境光的色值:

rust
@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
-
-    // 我们不需要太强的环境光,强度设置为 0.1 就够了
-    let ambient_strength = 0.1;
-    let ambient_color = light.color * ambient_strength;
-
-    let result = ambient_color * object_color.rgb;
-
-    return vec4f(result, object_color.a);
-}

完成上面的修改后,我们将得到如下渲染效果:

./ambient_lighting.png

漫反射

理想的漫反射(Diffuse Reflection)表面将光线向所有方向均匀地散射,因此,这样的表面在所有的观察者看来亮度都一样。不过,反射出去的光线强度依赖于材质以及光源相对于表面的位置。

还记得我们的模型中包含的法向量(Normal Vector)吗?现在终于要使用它们了。 法向量(也叫做法线)代表一个表面的朝向。通过计算片元的法向量和它指向光源的向量之间的夹角,可以得到该片元漫反射强度值。我们使用点积来计算向量之间夹角的余弦值:

./normal_diagram.png

如果法向量和光源方向向量的点积为 1.0,则表示当前片元与光源对齐,将反射光线的全部强度。值为 0 或更低表示表面垂直于或远离光源,因此反射强度小。

我们将法向量加入到 shader.wgsl 中:

rust
struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f,
-    @location(2) normal: vec3f, // 新增!
-};

接着定义该值以及顶点的位置来传递给片元着色器:

rust
struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-    @location(1) world_normal: vec3f,
-    @location(2) world_position: vec3f,
-};

我们先按原样传递法向量的值。这是错误的,稍后会修复它:

rust
@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    let model_matrix = mat4x4f(
-        instance.model_matrix_0,
-        instance.model_matrix_1,
-        instance.model_matrix_2,
-        instance.model_matrix_3,
-    );
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.world_normal = model.normal;
-    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
-    out.world_position = world_position.xyz;
-    out.clip_position = camera.view_proj * world_position;
-    return out;
-}

现在来进行实际的计算,在 ambient_colorresult 代码行之间,添加如下代码:

rust
let light_dir = normalize(light.position - in.world_position);
-
-let diffuse_strength = max(dot(in.world_normal, light_dir), 0.0);
-let diffuse_color = light.color * diffuse_strength;

然后在 result 中包含漫反射光(diffuse_color):

rust
let result = (ambient_color + diffuse_color) * object_color.xyz;

完成后,我们将获得如下渲染效果:

./ambient_diffuse_wrong.png

法线矩阵

还记得我说过将顶点法向量直接传递给片元着色器是错误的吗?我们通过只在场景中保留一个在 y 轴上旋转了 180 度的立方体来探索这一点:

rust
const NUM_INSTANCES_PER_ROW: u32 = 1;
-
-// In the loop we create the instances in
-let rotation = glam::Quat::from_axis_angle(glam::Vec3::Y, (180.0).to_radians());

同时从 result 中移除环境光 ambient_color

rust
let result = (diffuse_color) * object_color.xyz;

我们将得到如下渲染效果:

./diffuse_wrong.png

渲染结果显然是错误的,因为光线照亮了立方体的背光侧。这是由于法向量并没有随对象一起旋转,因此无论对象转向哪个方向,法向量的方向始终没变:

./normal_not_rotated.png

我们将使用法线矩阵(Normal Matrix)将法向量变换为正确的方向。需要注意的是,法向量表示一个方向,它应该做为单位向量(Unit Vector)来参与整个计算过程。

虽然可以在顶点着色器中计算法线矩阵,但这涉及到反转模型矩阵 model_matrix,而 WGSL 实际上没有矩阵求逆的函数,必须自己编写此代码。更重要的是,矩阵求逆的计算在着色器里实际上非常昂贵,特别是每个顶点都要计算一遍。

我们的替代方案是,向 InstanceRaw 结构体添加一个 normal 字段。不用去反转模型矩阵,而是使用模型实例的旋转来创建一个 Matrix3 类型的法线矩阵。

我们只需要用到矩阵的旋转分量,故法线矩阵的类型是 Matrix3 而不是 Matrix4

rust
#[repr(C)]
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-#[allow(dead_code)]
-struct InstanceRaw {
-    model: [[f32; 4]; 4],
-    normal: [[f32; 3]; 3],
-}
-
-impl model::Vertex for InstanceRaw {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
-            // step_mode 的值需要从 Vertex 改为 Instance
-            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
-            step_mode: wgpu::VertexStepMode::Instance,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
-                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
-                    shader_location: 5,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
-                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
-                    shader_location: 6,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
-                    shader_location: 7,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
-                    shader_location: 8,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                // 新增!
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress,
-                    shader_location: 9,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress,
-                    shader_location: 10,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress,
-                    shader_location: 11,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-            ],
-        }
-    }
-}

然后修改 Instance 以创建法线矩阵:

rust
struct Instance {
-    position: glam::Vec3,
-    rotation: glam::Quat,
-}
-
-impl Instance {
-    fn to_raw(&self) -> InstanceRaw {
-        let model =
-            glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation);
-        InstanceRaw {
-            model: model.to_cols_array_2d(),
-            // 新增!
-            normal: glam::Mat3::from_mat4(glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
-        }
-    }
-}

现在,我们在顶点着色器中重构法线矩阵:

rust
struct InstanceInput {
-    @location(5) model_matrix_0: vec4f,
-    @location(6) model_matrix_1: vec4f,
-    @location(7) model_matrix_2: vec4f,
-    @location(8) model_matrix_3: vec4f,
-    // 新增!
-    @location(9) normal_matrix_0: vec3f,
-    @location(10) normal_matrix_1: vec3f,
-    @location(11) normal_matrix_2: vec3f,
-};
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-    @location(1) world_normal: vec3f,
-    @location(2) world_position: vec3f,
-};
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    let model_matrix = mat4x4f(
-        instance.model_matrix_0,
-        instance.model_matrix_1,
-        instance.model_matrix_2,
-        instance.model_matrix_3,
-    );
-    // 新增!
-    let normal_matrix = mat3x3f(
-        instance.normal_matrix_0,
-        instance.normal_matrix_1,
-        instance.normal_matrix_2,
-    );
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.world_normal = normal_matrix * model.normal; // UPDATED!
-    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
-    out.world_position = world_position.xyz;
-    out.clip_position = camera.view_proj * world_position;
-    return out;
-}

上边的实现是基于 世界空间 的。在视图空间(view-space),也就是眼空间(eye-space)来实现是更标准的做法,因为对象在离原点较远的地方会产生光照问题。 如果改为使用视图空间,就需要包括由视图矩阵产生的旋转。还须使用 view_matrix * model_matrix * light_position 来变换光源的位置,以防止摄像机移动后产生计算错误。

使用视图空间的最大优势是:能避免在大规模的场景中进行光照和其他计算时,由于对象之间的空间间距导致的问题。 因为当数字变得非常大时,浮点数精度会下降。视图空间使摄像机保持在原点,这意味着所有的计算都会使用较小的数字。 最终的光照计算过程是一样的,只是需要多一点点设置。

经过以上修改,光照效果现在看起来已经正确了:

./diffuse_right.png

现在把场景中其他对象加回来,再加上环境光反射,我们就会得到如下渲染效果:

./ambient_diffuse_lighting.png

如果能保证模型矩阵总是对对象应用统一的缩放因子,你就可以只使用模型矩阵了。Github 用户 @julhe 与我分享的这段代码可以做到这一点:

rust
out.world_normal = (model_matrix * vec4f(model.normal, 0.0)).xyz;

他利用的是这样一个事实:即用一个 4x4 矩阵乘以一个 w 分量为 0 的向量时,只有旋转和缩放将被应用于向量。 不过你需要对这个向量进行归一化(Normalize)处理,因为法向量必须是单位向量

模型矩阵的缩放因子必须是统一的才能适用。否则产生的法向量将是倾斜于表面的,如下图片所示:

./normal-scale-issue.png

镜面反射

镜面反射(Specular Reflection)模拟了现实世界中从特定角度观察物体时出现的高光(Highlights,亮点)。 如果曾在阳光下观察过汽车,定会注意到车身出现的高亮部分。基本上来说,我们在观察有光泽的物体时就会看到高光。 从表面光滑的物体上反射出去的光线会倾向于集中在一个角度的附近,所以高光的位置会根据你观察的角度而变化。

./specular_diagram.png

因为镜面反射是相对于视角而言的,所以我们需要将摄像机的位置传入顶点及片元着色器中:

rust
struct Camera {
-    view_pos: vec4f,
-    view_proj: mat4x4f,
-}
-@group(1) @binding(0)
-var<uniform> camera: Camera;

别忘了也要更新 light.wgsl 中的 Camera 结构体,一旦它与 Rust 中的 CameraUniform 结构体不匹配,光照效果就会渲染错误。

同时也需要更新 CameraUniform 结构体:

rust
// lib.rs
-#[repr(C)]
-#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct CameraUniform {
-    view_position: [f32; 4],
-    view_proj: [[f32; 4]; 4],
-}
-
-impl CameraUniform {
-    fn new() -> Self {
-        Self {
-            view_position: [0.0; 4],
-            view_proj: glam::Mat4::IDENTITY.into(),
-        }
-    }
-
-    fn update_view_proj(&mut self, camera: &Camera) {
-        // 使用 vec4 纯粹是因为 Uniform 的 16 字节对齐要求
-        self.view_position = camera.eye.extend(1.0).into();
-        self.view_proj = (camera.build_view_projection_matrix()).into();
-    }
-}

由于现在要在片元着色器中使用 Uniform,得修改它的可见性:

rust
// lib.rs
-let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-    entries: &[
-        wgpu::BindGroupLayoutBinding {
-            // ...
-            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, // 更新!
-            // ...
-        },
-        // ...
-    ],
-    label: None,
-});

计算从片元位置到摄像机的方向向量,并用此向量和法向量来计算反射方向 reflect_dir

rust
// shader.wgsl
-// 片元着色器内...
-let view_dir = normalize(camera.view_pos.xyz - in.world_position);
-let reflect_dir = reflect(-light_dir, in.world_normal);

然后使用点积来计算镜面反射的强度 specular_strength,并用它算出高光颜色 specular_color

rust
let specular_strength = pow(max(dot(view_dir, reflect_dir), 0.0), 32.0);
-let specular_color = specular_strength * light.color;

最后,将高光颜色合成到片元输出结果中:

rust
let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;

完成全部代码之后,就能得到如下渲染效果:

./ambient_diffuse_specular_lighting.png

假如只查看镜面反射得到的高光颜色 specular_color 本身,渲染效果如下:

./specular_lighting.png

半程向量

所谓的半程向量(Halfway Vector)也是一个单位向量,它正好在视图方向和光源方向的中间。

到目前为止,我们实际上只实现了 Blinn-Phong 的 Phong 部分。Phong 反射模型很好用,但在某些情况下会产生 bug。 Blinn-Phong 的 Blinn 部分来自于这样的事实:如果把 view_dirlight_dir 加在一起,对结果进行归一化处理后得到一个半程向量,然后再与法向量 normal 求点积,就会得到大致相同的渲染结果,且不会有使用反射方向 reflect_dir 可能产生的问题。

rust
let view_dir = normalize(camera.view_pos.xyz - in.world_position);
-let half_dir = normalize(view_dir + light_dir);
-
-let specular_strength = pow(max(dot(in.world_normal, half_dir), 0.0), 32.0);

在我们这个场景下很难看出有何不同,但以下就是改进了光照计算后的渲染效果:

./half_dir.png

',119);function f(h,w,v,x,E,k){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[_,n(a,{example:"tutorial10_lighting"}),n(l)])}const P=p(g,[["render",f]]);export{V as __pageData,P as default}; diff --git a/assets/intermediate_tutorial10-lighting_index.md.bf469bcb.lean.js b/assets/intermediate_tutorial10-lighting_index.md.bf469bcb.lean.js deleted file mode 100644 index da04a52f7..000000000 --- a/assets/intermediate_tutorial10-lighting_index.md.bf469bcb.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/light-in-scene.630f9dca.png",t="/learn-wgpu-zh/assets/ambient_lighting.d9ea7212.png",F="/learn-wgpu-zh/assets/normal_diagram.35def195.png",D="/learn-wgpu-zh/assets/ambient_diffuse_wrong.a2837930.png",y="/learn-wgpu-zh/assets/diffuse_wrong.70c1d359.png",C="/learn-wgpu-zh/assets/normal_not_rotated.7327fe66.png",A="/learn-wgpu-zh/assets/diffuse_right.33406428.png",i="/learn-wgpu-zh/assets/ambient_diffuse_lighting.14acf617.png",b="/learn-wgpu-zh/assets/normal-scale-issue.0959834f.png",u="/learn-wgpu-zh/assets/specular_diagram.417ae155.png",m="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.4ab44d3e.png",B="/learn-wgpu-zh/assets/specular_lighting.8a1c656e.png",d="/learn-wgpu-zh/assets/half_dir.96da210f.png",V=JSON.parse('{"title":"光照","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial10-lighting/index.md","filePath":"intermediate/tutorial10-lighting/index.md","lastUpdated":1701933923000}'),g={name:"intermediate/tutorial10-lighting/index.md"},_=r("",119);function f(h,w,v,x,E,k){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[_,n(a,{example:"tutorial10_lighting"}),n(l)])}const P=p(g,[["render",f]]);export{V as __pageData,P as default}; diff --git a/assets/intermediate_tutorial10-lighting_index.md.sSfFtIbh.js b/assets/intermediate_tutorial10-lighting_index.md.sSfFtIbh.js new file mode 100644 index 000000000..44aee8126 --- /dev/null +++ b/assets/intermediate_tutorial10-lighting_index.md.sSfFtIbh.js @@ -0,0 +1,575 @@ +import{_ as h,D as s,o as p,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/light-in-scene.J0TJXHw7.png",e="/learn-wgpu-zh/assets/ambient_lighting.Yz5DfKXU.png",r="/learn-wgpu-zh/assets/normal_diagram._dkbBVWT.png",E="/learn-wgpu-zh/assets/ambient_diffuse_wrong.a6ZMIR1H.png",d="/learn-wgpu-zh/assets/diffuse_wrong.-cOTaPSJ.png",g="/learn-wgpu-zh/assets/normal_not_rotated.X_bk5vl-.png",y="/learn-wgpu-zh/assets/diffuse_right.Lc1TfbXm.png",F="/learn-wgpu-zh/assets/ambient_diffuse_lighting.Aqd8YxwU.png",c="/learn-wgpu-zh/assets/normal-scale-issue.o_duVY_I.png",b="/learn-wgpu-zh/assets/specular_diagram.q_G0tMuB.png",u="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.5nUogza3.png",o="/learn-wgpu-zh/assets/specular_lighting.YjF8bpkw.png",m="/learn-wgpu-zh/assets/half_dir.ayFab_R-.png",V=JSON.parse('{"title":"光照","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial10-lighting/index.md","filePath":"intermediate/tutorial10-lighting/index.md","lastUpdated":1703303099000}'),C={name:"intermediate/tutorial10-lighting/index.md"},D=k(`

光照

虽然我们的场景是 3D 的,但它们看起来像是平的,对象表面缺乏现实光照环境中的明暗变化,所以无法体现模型的三维特性。这是因为我们的模型没有考虑光线和对象表面之间的相互作用,无论如何摆放都会保持着相同的着色。

如果想修正这一点,就需要在我们的场景中添加光照(Lighting)。

在现实世界中,光源发出的光子会四处反射,最后进入我们的眼睛。 当观察对象上的一点时,我们所看到的颜色取决于多个光源和多个反射表面之间的多次相互作用。

在计算机图形学领域,为单个光子建模的计算成本极高。一个 100 瓦的灯泡每秒钟发出大约 3.27×10^20 个光子,再试想一下太阳每秒发出的光子的数量级。为了解决这个问题,我们要用数学来 “作弊”(也就是模拟。严格来说,这不是作弊,计算机图形学里有这么一句名言:"If it looks right, it is right.", 意思就是,如果它看起来是对的,那么它就是对的)。

我们来看看计算机图形学里常用的几个光照模型。

光线/路径追踪

光线/路径追踪(Ray/Path tracing)以虛拟摄像机模型为基础,但是对于每条与某个三角形相交的投影线,在计算光源对交点处明暗值的直接贡献之前,还要确定是否有一个或者多个光源能够照射到这个交点。

它是最接近光的真实工作方式的模型,所以我觉得必须提到它。但这是一个高级话题,我们不会在这里深入讨论。

Blinn-Phong 反射模型

对于大多数实时(real-time)应用来说,光线/路径追踪的计算成本十在太高了(尽管这种情况已经开始改变),所以通常使用一种更有效的,精度较低的 Phong 反射模型 来解决光照问题。它考虑了光线与材质的 3 种相互作用:环境光反射、漫反射和镜面反射。我们将学习 Blinn-Phong 反射模型,它能加速镜面反射的计算。

在开始学习之前,需要在我们的场景中添加一个光源:

rust
// lib.rs
+#[repr(C)]
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct LightUniform {
+    position: [f32; 3],
+    // 由于 Uniform 需要字段按 16 字节对齐,我们需要在这里使用一个填充字段
+    _padding: u32,
+    color: [f32; 3],
+    _padding2: u32,
+}

LightUniform 代表空间中的一个彩色点光源。虽然通常是使用纯白色的光,但使用其它颜色的光也是可以的。

使 WGSL 结构体内存字节对齐的经验法则是:字段保持按 2 的 N 次幂来对齐。 例如,一个 vec3 如果是 3 个单精度浮点数,它的大小为 12 字节,对齐后将被提升到 2 的下一个次幂,即 16 字节. 这意味着必须更加小心地布局你的结构体。

一些开发者会选择使用 vec4 而不是 vec3 来避免对齐问题。 你可以在 wgsl spec 中了解更多关于对齐规则的信息。

接下来,创建一个 Uniform 缓冲区来存储我们的光源:

rust
let light_uniform = LightUniform {
+    position: [2.0, 2.0, 2.0],
+    _padding: 0,
+    color: [1.0, 1.0, 1.0],
+    _padding2: 0,
+};
+
+ // 我们希望能更新光源位置,所以用了 COPY_DST 这个使用范围标志
+let light_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Light VB"),
+        contents: bytemuck::cast_slice(&[light_uniform]),
+        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
+    }
+);

别忘记把 light_uniformlight_buffer 添加到 State。之后,我们为光源创建一个绑定组的布局绑定组

rust
let light_bind_group_layout =
+    device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+        entries: &[wgpu::BindGroupLayoutEntry {
+            binding: 0,
+            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
+            ty: wgpu::BindingType::Buffer {
+                ty: wgpu::BufferBindingType::Uniform,
+                has_dynamic_offset: false,
+                min_binding_size: None,
+            },
+            count: None,
+        }],
+        label: None,
+    });
+
+let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+    layout: &light_bind_group_layout,
+    entries: &[wgpu::BindGroupEntry {
+        binding: 0,
+        resource: light_buffer.as_entire_binding(),
+    }],
+    label: None,
+});

把它们添加到 State 中,同时更新 render_pipeline_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+    bind_group_layouts: &[
+        &texture_bind_group_layout,
+        &camera_bind_group_layout,
+        &light_bind_group_layout,
+    ],
+});

update() 函数中更新光源的位置,这样便能看到对象在不同角度下的光照效果:

rust
// 更新光源
+let old_position = glam::Vec3::from_array(self.light_uniform.position);
+self.light_uniform.position =
+    (glam::Quat::from_axis_angle(glam::Vec3::Y, consts::PI / 180.)
+        * old_position).into();
+self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light_uniform]));

上面的代码使光源围绕原点以每 1 度的速度旋转。

查看光源

出于调试的目的,如果能够查看光源本身的位置,以确保场景的光照效果是正确的,那就太好了。

尽管可以直接调整现有的渲染管线来绘制光源,但这可能不利于代码的维护。所以我们把创建渲染管线的代码提取到一个叫做 create_render_pipeline() 的新函数中:

rust
fn create_render_pipeline(
+    device: &wgpu::Device,
+    layout: &wgpu::PipelineLayout,
+    color_format: wgpu::TextureFormat,
+    depth_format: Option<wgpu::TextureFormat>,
+    vertex_layouts: &[wgpu::VertexBufferLayout],
+    shader: wgpu::ShaderModuleDescriptor,
+) -> wgpu::RenderPipeline {
+    let shader = device.create_shader_module(shader);
+
+    device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+        label: Some("Render Pipeline"),
+        layout: Some(layout),
+        vertex: wgpu::VertexState {
+            module: &shader,
+            entry_point: "vs_main",
+            buffers: vertex_layouts,
+        },
+        fragment: Some(wgpu::FragmentState {
+            module: &shader,
+            entry_point: "fs_main",
+            targets: &[Some(wgpu::ColorTargetState {
+                format: color_format,
+                blend: Some(wgpu::BlendState {
+                    alpha: wgpu::BlendComponent::REPLACE,
+                    color: wgpu::BlendComponent::REPLACE,
+                }),
+                write_mask: wgpu::ColorWrites::ALL,
+            })],
+        }),
+        primitive: wgpu::PrimitiveState {
+            topology: wgpu::PrimitiveTopology::TriangleList,
+            strip_index_format: None,
+            front_face: wgpu::FrontFace::Ccw,
+            cull_mode: Some(wgpu::Face::Back),
+            // 此处设置为 Fill 以外的任何值都需要开启 Feature::NON_FILL_POLYGON_MODE
+            polygon_mode: wgpu::PolygonMode::Fill,
+            unclipped_depth: false,
+            conservative: false,
+        },
+        depth_stencil: depth_format.map(|format| wgpu::DepthStencilState {
+            format,
+            depth_write_enabled: true,
+            depth_compare: wgpu::CompareFunction::Less,
+            stencil: wgpu::StencilState::default(),
+            bias: wgpu::DepthBiasState::default(),
+        }),
+        multisample: wgpu::MultisampleState {
+            count: 1,
+            mask: !0,
+            alpha_to_coverage_enabled: false,
+        },
+    })
+}

修改 State::new() 中的代码来调用 create_render_pipeline 函数:

rust
let render_pipeline = {
+    let shader = wgpu::ShaderModuleDescriptor {
+        label: Some("Normal Shader"),
+        source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
+    };
+    create_render_pipeline(
+        &device,
+        &render_pipeline_layout,
+        config.format,
+        Some(texture::Texture::DEPTH_FORMAT),
+        &[model::ModelVertex::desc(), InstanceRaw::desc()],
+        shader,
+    )
+};

修改 model::DrawModel 以使用 light_bind_group

rust
// model.rs
+pub trait DrawModel<'a> {
+    fn draw_mesh(
+        &mut self,
+        mesh: &'a Mesh,
+        material: &'a Material,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        material: &'a Material,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+
+    fn draw_model(
+        &mut self,
+        model: &'a Model,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_model_instanced(
+        &mut self,
+        model: &'a Model,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_mesh(
+        &mut self,
+        mesh: &'b Mesh,
+        material: &'b Material,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group, light_bind_group);
+    }
+
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        material: &'b Material,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.set_bind_group(0, &material.bind_group, &[]);
+        self.set_bind_group(1, camera_bind_group, &[]);
+        self.set_bind_group(2, light_bind_group, &[]);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+
+    fn draw_model(
+        &mut self,
+        model: &'b Model,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
+    }
+
+    fn draw_model_instanced(
+        &mut self,
+        model: &'b Model,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            let material = &model.materials[mesh.material];
+            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
+        }
+    }
+}

完成这些后,就可以为我们的光源创建另一条渲染管线了:

rust
// lib.rs
+let light_render_pipeline = {
+    let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+        label: Some("Light Pipeline Layout"),
+        bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout],
+        push_constant_ranges: &[],
+    });
+    let shader = wgpu::ShaderModuleDescriptor {
+        label: Some("Light Shader"),
+        source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()),
+    };
+    create_render_pipeline(
+        &device,
+        &layout,
+        config.format,
+        Some(texture::Texture::DEPTH_FORMAT),
+        &[model::ModelVertex::desc()],
+        shader,
+    )
+};

我选择为 light_render_pipeline 创建一个单独的布局,因为它不需要常规渲染管线所需要的资源(主要是纹理)。

之后,我们来编写实际的着色器代码:

rust
// light.wgsl
+// 顶点着色器
+
+struct Camera {
+    view_proj: mat4x4f,
+}
+@group(0) @binding(0)
+var<uniform> camera: Camera;
+
+struct Light {
+    position: vec3f,
+    color: vec3f,
+}
+@group(1) @binding(0)
+var<uniform> light: Light;
+
+struct VertexInput {
+    @location(0) position: vec3f,
+};
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) color: vec3f,
+};
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    let scale = 0.25;
+    var out: VertexOutput;
+    out.clip_position = camera.view_proj * vec4f(model.position * scale + light.position, 1.0);
+    out.color = light.color;
+    return out;
+}
+
+// 片元着色器
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(in.color, 1.0);
+}

现在就能在 render() 函数中手动实现光源的绘制代码了,但是为了保持之前开发的绘制模式,让我们来创建一个名为 DrawLight 的新 trait:

rust
// model.rs
+pub trait DrawLight<'a> {
+    fn draw_light_mesh(
+        &mut self,
+        mesh: &'a Mesh,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_light_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+
+    fn draw_light_model(
+        &mut self,
+        model: &'a Model,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_light_model_instanced(
+        &mut self,
+        model: &'a Model,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawLight<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_light_mesh(
+        &mut self,
+        mesh: &'b Mesh,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_light_mesh_instanced(mesh, 0..1, camera_bind_group, light_bind_group);
+    }
+
+    fn draw_light_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.set_bind_group(0, camera_bind_group, &[]);
+        self.set_bind_group(1, light_bind_group, &[]);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+
+    fn draw_light_model(
+        &mut self,
+        model: &'b Model,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_light_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
+    }
+    fn draw_light_model_instanced(
+        &mut self,
+        model: &'b Model,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            self.draw_light_mesh_instanced(mesh, instances.clone(), camera_bind_group, light_bind_group);
+        }
+    }
+}

最后,在渲染通道中加入光源的渲染:

rust
impl State {
+    // ...
+   fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
+        // ...
+        render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+
+        use crate::model::DrawLight; // 新增!
+        render_pass.set_pipeline(&self.light_render_pipeline); // 新增!
+        render_pass.draw_light_model(
+            &self.obj_model,
+            &self.camera_bind_group,
+            &self.light_bind_group,
+        ); // 新增!
+
+        render_pass.set_pipeline(&self.render_pipeline);
+        render_pass.draw_model_instanced(
+            &self.obj_model,
+            0..self.instances.len() as u32,
+            &self.camera_bind_group,
+            &self.light_bind_group, // 新增
+        );
+}

完成上面这些后,我们将看到如下渲染效果:

./light-in-scene.png

环境光反射

现实世界中,光线在进入我们的眼睛之前往往在物体表面之间经历了多次反射。这就是为什么你能看见阴影区域的东西。在计算机上实现这种互动模型很昂贵,所以需要“作弊”(模拟)。

环境光反射(Ambient Reflection)定义了对象表面所有点的环境光强度相同,代表从场景的其他部分反射过来的光照亮我们的对象。 环境光反射值 = 光源颜色 _ 环境光强度 _ 片元的颜色。

请在 shader.wgsl 中的纹理 Uniform 之下添加以下代码:

rust
struct Light {
+    position: vec3f,
+    color: vec3f,
+}
+@group(2) @binding(0)
+var<uniform> light: Light;

然后更新片元色器代码来计算和使用环境光的色值:

rust
@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
+
+    // 我们不需要太强的环境光,强度设置为 0.1 就够了
+    let ambient_strength = 0.1;
+    let ambient_color = light.color * ambient_strength;
+
+    let result = ambient_color * object_color.rgb;
+
+    return vec4f(result, object_color.a);
+}

完成上面的修改后,我们将得到如下渲染效果:

./ambient_lighting.png

漫反射

理想的漫反射(Diffuse Reflection)表面将光线向所有方向均匀地散射,因此,这样的表面在所有的观察者看来亮度都一样。不过,反射出去的光线强度依赖于材质以及光源相对于表面的位置。

还记得我们的模型中包含的法向量(Normal Vector)吗?现在终于要使用它们了。 法向量(也叫做法线)代表一个表面的朝向。通过计算片元的法向量和它指向光源的向量之间的夹角,可以得到该片元漫反射强度值。我们使用点积来计算向量之间夹角的余弦值:

./normal_diagram.png

如果法向量和光源方向向量的点积为 1.0,则表示当前片元与光源对齐,将反射光线的全部强度。值为 0 或更低表示表面垂直于或远离光源,因此反射强度小。

我们将法向量加入到 shader.wgsl 中:

rust
struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f,
+    @location(2) normal: vec3f, // 新增!
+};

接着定义该值以及顶点的位置来传递给片元着色器:

rust
struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+    @location(1) world_normal: vec3f,
+    @location(2) world_position: vec3f,
+};

我们先按原样传递法向量的值。这是错误的,稍后会修复它:

rust
@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    let model_matrix = mat4x4f(
+        instance.model_matrix_0,
+        instance.model_matrix_1,
+        instance.model_matrix_2,
+        instance.model_matrix_3,
+    );
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.world_normal = model.normal;
+    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
+    out.world_position = world_position.xyz;
+    out.clip_position = camera.view_proj * world_position;
+    return out;
+}

现在来进行实际的计算,在 ambient_colorresult 代码行之间,添加如下代码:

rust
let light_dir = normalize(light.position - in.world_position);
+
+let diffuse_strength = max(dot(in.world_normal, light_dir), 0.0);
+let diffuse_color = light.color * diffuse_strength;

然后在 result 中包含漫反射光(diffuse_color):

rust
let result = (ambient_color + diffuse_color) * object_color.xyz;

完成后,我们将获得如下渲染效果:

./ambient_diffuse_wrong.png

法线矩阵

还记得我说过将顶点法向量直接传递给片元着色器是错误的吗?我们通过只在场景中保留一个在 y 轴上旋转了 180 度的立方体来探索这一点:

rust
const NUM_INSTANCES_PER_ROW: u32 = 1;
+
+// In the loop we create the instances in
+let rotation = glam::Quat::from_axis_angle(glam::Vec3::Y, (180.0).to_radians());

同时从 result 中移除环境光 ambient_color

rust
let result = (diffuse_color) * object_color.xyz;

我们将得到如下渲染效果:

./diffuse_wrong.png

渲染结果显然是错误的,因为光线照亮了立方体的背光侧。这是由于法向量并没有随对象一起旋转,因此无论对象转向哪个方向,法向量的方向始终没变:

./normal_not_rotated.png

我们将使用法线矩阵(Normal Matrix)将法向量变换为正确的方向。需要注意的是,法向量表示一个方向,它应该做为单位向量(Unit Vector)来参与整个计算过程。

虽然可以在顶点着色器中计算法线矩阵,但这涉及到反转模型矩阵 model_matrix,而 WGSL 实际上没有矩阵求逆的函数,必须自己编写此代码。更重要的是,矩阵求逆的计算在着色器里实际上非常昂贵,特别是每个顶点都要计算一遍。

我们的替代方案是,向 InstanceRaw 结构体添加一个 normal 字段。不用去反转模型矩阵,而是使用模型实例的旋转来创建一个 Matrix3 类型的法线矩阵。

我们只需要用到矩阵的旋转分量,故法线矩阵的类型是 Matrix3 而不是 Matrix4

rust
#[repr(C)]
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+#[allow(dead_code)]
+struct InstanceRaw {
+    model: [[f32; 4]; 4],
+    normal: [[f32; 3]; 3],
+}
+
+impl model::Vertex for InstanceRaw {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
+            // step_mode 的值需要从 Vertex 改为 Instance
+            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
+            step_mode: wgpu::VertexStepMode::Instance,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
+                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
+                    shader_location: 5,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
+                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
+                    shader_location: 6,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
+                    shader_location: 7,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
+                    shader_location: 8,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                // 新增!
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress,
+                    shader_location: 9,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress,
+                    shader_location: 10,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress,
+                    shader_location: 11,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+            ],
+        }
+    }
+}

然后修改 Instance 以创建法线矩阵:

rust
struct Instance {
+    position: glam::Vec3,
+    rotation: glam::Quat,
+}
+
+impl Instance {
+    fn to_raw(&self) -> InstanceRaw {
+        let model =
+            glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation);
+        InstanceRaw {
+            model: model.to_cols_array_2d(),
+            // 新增!
+            normal: glam::Mat3::from_mat4(glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
+        }
+    }
+}

现在,我们在顶点着色器中重构法线矩阵:

rust
struct InstanceInput {
+    @location(5) model_matrix_0: vec4f,
+    @location(6) model_matrix_1: vec4f,
+    @location(7) model_matrix_2: vec4f,
+    @location(8) model_matrix_3: vec4f,
+    // 新增!
+    @location(9) normal_matrix_0: vec3f,
+    @location(10) normal_matrix_1: vec3f,
+    @location(11) normal_matrix_2: vec3f,
+};
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+    @location(1) world_normal: vec3f,
+    @location(2) world_position: vec3f,
+};
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    let model_matrix = mat4x4f(
+        instance.model_matrix_0,
+        instance.model_matrix_1,
+        instance.model_matrix_2,
+        instance.model_matrix_3,
+    );
+    // 新增!
+    let normal_matrix = mat3x3f(
+        instance.normal_matrix_0,
+        instance.normal_matrix_1,
+        instance.normal_matrix_2,
+    );
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.world_normal = normal_matrix * model.normal; // UPDATED!
+    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
+    out.world_position = world_position.xyz;
+    out.clip_position = camera.view_proj * world_position;
+    return out;
+}

上边的实现是基于 世界空间 的。在视图空间(view-space),也就是眼空间(eye-space)来实现是更标准的做法,因为对象在离原点较远的地方会产生光照问题。 如果改为使用视图空间,就需要包括由视图矩阵产生的旋转。还须使用 view_matrix * model_matrix * light_position 来变换光源的位置,以防止摄像机移动后产生计算错误。

使用视图空间的最大优势是:能避免在大规模的场景中进行光照和其他计算时,由于对象之间的空间间距导致的问题。 因为当数字变得非常大时,浮点数精度会下降。视图空间使摄像机保持在原点,这意味着所有的计算都会使用较小的数字。 最终的光照计算过程是一样的,只是需要多一点点设置。

经过以上修改,光照效果现在看起来已经正确了:

./diffuse_right.png

现在把场景中其他对象加回来,再加上环境光反射,我们就会得到如下渲染效果:

./ambient_diffuse_lighting.png

如果能保证模型矩阵总是对对象应用统一的缩放因子,你就可以只使用模型矩阵了。Github 用户 @julhe 与我分享的这段代码可以做到这一点:

rust
out.world_normal = (model_matrix * vec4f(model.normal, 0.0)).xyz;

他利用的是这样一个事实:即用一个 4x4 矩阵乘以一个 w 分量为 0 的向量时,只有旋转和缩放将被应用于向量。 不过你需要对这个向量进行归一化(Normalize)处理,因为法向量必须是单位向量

模型矩阵的缩放因子必须是统一的才能适用。否则产生的法向量将是倾斜于表面的,如下图片所示:

./normal-scale-issue.png

镜面反射

镜面反射(Specular Reflection)模拟了现实世界中从特定角度观察物体时出现的高光(Highlights,亮点)。 如果曾在阳光下观察过汽车,定会注意到车身出现的高亮部分。基本上来说,我们在观察有光泽的物体时就会看到高光。 从表面光滑的物体上反射出去的光线会倾向于集中在一个角度的附近,所以高光的位置会根据你观察的角度而变化。

./specular_diagram.png

因为镜面反射是相对于视角而言的,所以我们需要将摄像机的位置传入顶点及片元着色器中:

rust
struct Camera {
+    view_pos: vec4f,
+    view_proj: mat4x4f,
+}
+@group(1) @binding(0)
+var<uniform> camera: Camera;

别忘了也要更新 light.wgsl 中的 Camera 结构体,一旦它与 Rust 中的 CameraUniform 结构体不匹配,光照效果就会渲染错误。

同时也需要更新 CameraUniform 结构体:

rust
// lib.rs
+#[repr(C)]
+#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct CameraUniform {
+    view_position: [f32; 4],
+    view_proj: [[f32; 4]; 4],
+}
+
+impl CameraUniform {
+    fn new() -> Self {
+        Self {
+            view_position: [0.0; 4],
+            view_proj: glam::Mat4::IDENTITY.into(),
+        }
+    }
+
+    fn update_view_proj(&mut self, camera: &Camera) {
+        // 使用 vec4 纯粹是因为 Uniform 的 16 字节对齐要求
+        self.view_position = camera.eye.extend(1.0).into();
+        self.view_proj = (camera.build_view_projection_matrix()).into();
+    }
+}

由于现在要在片元着色器中使用 Uniform,得修改它的可见性:

rust
// lib.rs
+let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+    entries: &[
+        wgpu::BindGroupLayoutBinding {
+            // ...
+            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, // 更新!
+            // ...
+        },
+        // ...
+    ],
+    label: None,
+});

计算从片元位置到摄像机的方向向量,并用此向量和法向量来计算反射方向 reflect_dir

rust
// shader.wgsl
+// 片元着色器内...
+let view_dir = normalize(camera.view_pos.xyz - in.world_position);
+let reflect_dir = reflect(-light_dir, in.world_normal);

然后使用点积来计算镜面反射的强度 specular_strength,并用它算出高光颜色 specular_color

rust
let specular_strength = pow(max(dot(view_dir, reflect_dir), 0.0), 32.0);
+let specular_color = specular_strength * light.color;

最后,将高光颜色合成到片元输出结果中:

rust
let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;

完成全部代码之后,就能得到如下渲染效果:

./ambient_diffuse_specular_lighting.png

假如只查看镜面反射得到的高光颜色 specular_color 本身,渲染效果如下:

./specular_lighting.png

半程向量

所谓的半程向量(Halfway Vector)也是一个单位向量,它正好在视图方向和光源方向的中间。

到目前为止,我们实际上只实现了 Blinn-Phong 的 Phong 部分。Phong 反射模型很好用,但在某些情况下会产生 bug。 Blinn-Phong 的 Blinn 部分来自于这样的事实:如果把 view_dirlight_dir 加在一起,对结果进行归一化处理后得到一个半程向量,然后再与法向量 normal 求点积,就会得到大致相同的渲染结果,且不会有使用反射方向 reflect_dir 可能产生的问题。

rust
let view_dir = normalize(camera.view_pos.xyz - in.world_position);
+let half_dir = normalize(view_dir + light_dir);
+
+let specular_strength = pow(max(dot(in.world_normal, half_dir), 0.0), 32.0);

在我们这个场景下很难看出有何不同,但以下就是改进了光照计算后的渲染效果:

./half_dir.png

',119);function A(B,_,f,v,w,x){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),l("div",null,[D,i(a,{example:"tutorial10_lighting"}),i(n)])}const P=h(C,[["render",A]]);export{V as __pageData,P as default}; diff --git a/assets/intermediate_tutorial10-lighting_index.md.sSfFtIbh.lean.js b/assets/intermediate_tutorial10-lighting_index.md.sSfFtIbh.lean.js new file mode 100644 index 000000000..69a9fbf2e --- /dev/null +++ b/assets/intermediate_tutorial10-lighting_index.md.sSfFtIbh.lean.js @@ -0,0 +1 @@ +import{_ as h,D as s,o as p,c as l,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/light-in-scene.J0TJXHw7.png",e="/learn-wgpu-zh/assets/ambient_lighting.Yz5DfKXU.png",r="/learn-wgpu-zh/assets/normal_diagram._dkbBVWT.png",E="/learn-wgpu-zh/assets/ambient_diffuse_wrong.a6ZMIR1H.png",d="/learn-wgpu-zh/assets/diffuse_wrong.-cOTaPSJ.png",g="/learn-wgpu-zh/assets/normal_not_rotated.X_bk5vl-.png",y="/learn-wgpu-zh/assets/diffuse_right.Lc1TfbXm.png",F="/learn-wgpu-zh/assets/ambient_diffuse_lighting.Aqd8YxwU.png",c="/learn-wgpu-zh/assets/normal-scale-issue.o_duVY_I.png",b="/learn-wgpu-zh/assets/specular_diagram.q_G0tMuB.png",u="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.5nUogza3.png",o="/learn-wgpu-zh/assets/specular_lighting.YjF8bpkw.png",m="/learn-wgpu-zh/assets/half_dir.ayFab_R-.png",V=JSON.parse('{"title":"光照","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial10-lighting/index.md","filePath":"intermediate/tutorial10-lighting/index.md","lastUpdated":1703303099000}'),C={name:"intermediate/tutorial10-lighting/index.md"},D=k("",119);function A(B,_,f,v,w,x){const a=s("WasmExample"),n=s("AutoGithubLink");return p(),l("div",null,[D,i(a,{example:"tutorial10_lighting"}),i(n)])}const P=h(C,[["render",A]]);export{V as __pageData,P as default}; diff --git a/assets/intermediate_tutorial11-normals_index.md.0360ba19.js b/assets/intermediate_tutorial11-normals_index.md.0360ba19.js deleted file mode 100644 index be66881d4..000000000 --- a/assets/intermediate_tutorial11-normals_index.md.0360ba19.js +++ /dev/null @@ -1,432 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/cube-normal.076c62a3.png",t="/learn-wgpu-zh/assets/normal_mapping_wrong.a2c5f143.png",D="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.4ab44d3e.png",F="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAAGQCAYAAACAvzbMAAABg2lDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV9TS0UqDmaQ4pChOlkQFXHUKhShQqgVWnUwufQLmrQkKS6OgmvBwY/FqoOLs64OroIg+AHi5Oik6CIl/i8ptIjx4Lgf7+497t4BQrPCdKtnHNAN20wnE1I2tyqFXxFCECKigMKs2pwsp+A7vu4R4OtdnGf5n/tz9Gt5iwEBiXiW1UybeIN4etOucd4nFllJ0YjPicdMuiDxI9dVj984F10WeKZoZtLzxCKxVOxitYtZydSJp4hjmm5QvpD1WOO8xVmv1Fn7nvyFkbyxssx1msNIYhFLkCFBRR1lVGAjTqtBioU07Sd8/FHXL5NLJVcZjBwLqEKH4vrB/+B3t1ZhcsJLiiSA0IvjfIwA4V2g1XCc72PHaZ0AwWfgyuj4q01g5pP0RkeLHQED28DFdUdT94DLHWDoqaaYiisFaQqFAvB+Rt+UAwZvgb41r7f2Pk4fgAx1lboBDg6B0SJlr/u8u7e7t3/PtPv7AfEscnMV1+LuAAAABmJLR0QAAACSAP8q1jhsAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH5AUDFCAS5kaN7AAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAApfSURBVHja7d1PbhNnHMfh14YjINTsuEXUayCx8BFKRGRlWXVZdYkiV0mP4EUkroFyi+yool4huCsH1409M55/7++d51khSAMYaT79vjOJZ2m12SQAaGjuJQBAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAAMjXay9BbN8uZ88/PlttvCDAYGZptXHVCR6OfUICCAiN4yEkwFDcAxEaAAERBQABQXAAAUFEAAEhbAhEBBAQ8RARQEAY/sIvIoCAiIeIAALC8Bd6EQEERDxEBBAQhr+wiwggIOIhIoCAACAgBFkCVgggIAAICAACQg1jvgGUN58CBAQAAbFCrA8gX94TPWN9PxklHICAiItQAINzhAWAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAkJXXXgKA8S0eb59/vH77UUAAqB+O/Z/LPSSOsAAyisf+r1d9jIAAEC4kAgKQ4fqIEBIBAZhAfAQEgGzWiIAACImAAAiJgAAUra+v8RgyJAICUOgiERCAAi/uQ1zg+/49fCsTgID/99/kz9LXcZmAAASNw9gEBKDwQPS1QgQEEAcEBBAIBARAHDriJjogDlgggEAQe30ICCAOBVvOn9LaAgEEgqbx6JuAgDgw8kX++vurXj7v+ZtPAgIIhBWQ3+8nIJTv179//PiPn8SBkBfrpn+evteHgDCdcOz/XICQCMR0ArGcP7U6xhojHgLCtOKx/+sjRkQcrIc+/i5DhUNAoMc1IhACUXo8BIRpro+Wa0QcxKGt8zef0v0/f/7v71X3GGvscAgIHIiIQIhD3/bjEWl1CAgciMhi+cXrIBCjvj7HVkhO8UgppVlabTb+2eL5djmr/bFnq+n8E2/Xw/r6fbvPIyTiMLLdkOQWDgERkLBxqNI2HiIiELnKKR4pOcIiWByGtL5+P5mIiINwCAiTD0RX62P/80UPiUCIh4BgPVgj4iAcAoJA1FkROV20x4qIQIhHrtxED6qzm+g7X3TX9uLY9sJ/7Pipzufr+viqzZ9FHCg9HhbIhBfESxfbU8/7u/hcVRf/qDe0BYISwyEgE9blxbrUC3+dv5M4MOV4pJTS3D8duQet6cdZFkQKR9R4CIiLdauPi3jhB6ujO46woKH7qzsvApMOh4CQjcXyS62Vsr2Xcuj4aO2lRDwG5Qhrghfrrj6uy89Vx3L+dDAe558/WB+IhwVCHWerTa2vBbn67SarP/ehACyv7o5GYOwLt3AgHAJCqj4yarIYqj5Xkwvv9mN3QyIciEe+fCV65BA83qbPv1+0Wh9Nvnq86tHVMS/8XR9hCQfCISBFx2PXbkhOObaK/nUNXQVEOBCP+hxhFaIqGr7wTTgQDgHh2cPFj9Xx7uZm0vG4r7gJLxyIh4CQUvr5wyw9HIjJsZAgHAhHl9wDCRiPKvsRmdLxVdUKEQ7EwwKBo4HYD4lwIB4WiPVR0+4KcQMdhKMPvpUJgHicxBEWgHBYIKX7elfvtNHxFYiHBQIgHBYIw6wQXwcC4mGBUBmJul+JDoiHgGBtgHBkwxFWMPvfhRcQDwsEQDgsEPLgEV4QDwsEQDgsEADxsEAYmcd4QTwEhJPDsf9zQgLCMRRHWIEsHm9fjEdVXEA8sEAAhCMb3lAqkCZvKHX918oLhnhggQAIR37cAwHEAwEpXd0nrDyJxVTCIR4CAmB1CAh92X4X3qp1YX0gHgzFTfSAvKEUwoGA0ElI9vkuvIgHAgIIB9lyDwQQDywQQDiwQADxQEBoa/sIL4gHOXGEBQgHFgge4UU8sEAA4UBA2Gryfh5f77xNC+KBgAhHg3C89N881PxvfDsThIMheUfCDOPRlncjRDwQEAFpHZHlL5fCgnggIOJhoSAc5MNjvBO0u0pAPDiVm+iAcGCB5CbnR3GtEMQDCwQQDiwQKwTEAwuEzHkSC+HAArFCQDwQEKwPxIM4HGENuEJy/cJCEA4skAARsT4QDywQwhEPhAMLxAoRD8QDCwQQDiwQMl0h1gfigYAgHogH2XCENeIK8VgvwoEFwskRsT4QDywQsiIeCAcWiBUiHogHFgggHFggZLpCrA/EAwEREfFAPMiaIywQDrBAprxCrA/EAwsE8UA4sEDof4WIB+KBBUJKKaXF42168DIgHFggnOLdzU2nH4d4gICIyH9+fTl/8kKJB4zGEVaAiDxcXFgdCAcCQvdrBPEAAQGEgzDcAwHxAAskusXjrRcB4cACAcQDC4QMeYRXOMACAcQDAQHEg+lwhAXCARZIZJ7AEg+wQADhwAIhT57AEg+wQEA4wAIBxAMBAcQDKjnCAuEACyQqj/CKB1ggIBxggZAnj/CKB1ggIBxggYB4gIAA4gG1OMIC4QALJCKP8IoHWCAgHGCBkCeP8IoHWCAgHGCBgHiABQLCAVggIB4gIMXyCK94QGSOsBAOwAIpmUd4xQMsEBAOsEBAPMACAeEALBAQDxCQYnmEVzwgOkdYCAdggZTKI7ziARYICAdYICAeYIGAcAAWCOIBCEixPMIrHlACR1gIB2CBlMgjvOIBFggIB1ggIB6ABYJwABYI4gFYIMXyCK9wgAUC4gECQp5Kf4RXPCA2R1gIB2CBIB6ABYJwABYI4iEeYIHQytQe4RUOsEBAPAABiSLyI7ziAdPgCAvhACwQxAOwQBAOwAJBPAALhJOV9AivcAAWCOIBCEhJcn2EVzyAXY6wWjh0LLV++9HqAASE+uGo++viAQiIeBRLOAABCej6+6vnH49xP0Q8gDpmabXZeBnirI8+gyIcQBOewgq4TnYXingAY3GEFTgkXawR4QAskAF4PBdAQIpZIeIBCIgVMtjqEA9AQLA6gNF4jPdEkR7pFQ7AAslIlKMs8QAsEEuk0foQDsACQTwAASnR0EdZ4gHkwhFWR/o+yhIOwAJBPAALhP5XyLF4CAcgICJidQDh+G68GbI6gAjcA+lY26eyxAOIwhFWT5oeZTmyAiwQOlkiu+EQD0BAaLQ+hAMQECvE6gCK4x7IAI7dD9ldH6IBCAiVISnt/dUBAQGAWtwDAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAExEsAgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAU/EvfiluCemVcXgAAAAASUVORK5CYII=",y="/learn-wgpu-zh/assets/normal_mapping_correct.c594f67a.png",C="/learn-wgpu-zh/assets/no_srgb.c60c78b6.png",A="/learn-wgpu-zh/assets/debug_material.49f1d418.png",h=JSON.parse('{"title":"法线映射","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial11-normals/index.md","filePath":"intermediate/tutorial11-normals/index.md","lastUpdated":1701933923000}'),i={name:"intermediate/tutorial11-normals/index.md"},b=r('

法线映射

添加光照后,我们的场景已经看起来很不错了。不过,对象表面还缺少实物的那种凹凸细节。如果使用的纹理是光滑的就不是问题,但是砖块纹理应该是比较粗糙的。 我们可以给模型添加更多的三角形来雕刻出表面的凹凸细节,但这将使得顶点数据倍增而渲染变慢,而且也很难知道在哪里添加新的三角形。这,就是法线映射(Normal Mapping)的用武之地了。

法线映射也叫凹凸映射(Bump Mapping),是一种纹理映射技术,它不用增加模型的几何复杂度就能够模拟具有复杂凹凸细节的表面。与简单的纹理映射不同,当绘制对象的表面时,法线映射技术通过扰动表面的法向量来改变它的形状,使得着色处理后的颜色能反映出表面几何特性的变化。

还记得在实例化绘制教程中,我们尝试了在纹理中存储实例数据吗?法线贴图(Normal Map)就是存储着法向量数据的纹理!除了顶点法向量外,我们还将在光照计算中使用法线贴图中的法向量。

我们的砖块纹理对应的法线贴图(也就是法线纹理)长这样:

./cube-normal.png

纹理的 r、g、b 分量对应于法向量的 x、y 和 z 坐标分量。所有的 z 值都应该是正的,这就是为什么法线贴图有一个蓝色的色调。

我们来修改 model.rs 中的材质 Material 结构体,新增一个法线纹理 normal_texture 字段:

rust
pub struct Material {
-    pub name: String,
-    pub diffuse_texture: texture::Texture,
-    pub normal_texture: texture::Texture, // 更新!
-    pub bind_group: wgpu::BindGroup,
-}

还得更新纹理绑定组布局 texture_bind_group_layout 以包括法线贴图:

rust
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-    entries: &[
-        // ...
-        // 法线贴图
-        wgpu::BindGroupLayoutEntry {
-            binding: 2,
-            visibility: wgpu::ShaderStages::FRAGMENT,
-            ty: wgpu::BindingType::Texture {
-                multisampled: false,
-                sample_type: wgpu::TextureSampleType::Float { filterable: true },
-                view_dimension: wgpu::TextureViewDimension::D2,
-            },
-            count: None,
-        },
-        wgpu::BindGroupLayoutEntry {
-            binding: 3,
-            visibility: wgpu::ShaderStages::FRAGMENT,
-            ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
-            count: None,
-        },
-    ],
-    label: Some("texture_bind_group_layout"),
-});

resources.rsload_model() 函数中创建材质的循环里,添加以下代码来实际加载法线贴图:

rust
// resources.rs
-let mut materials = Vec::new();
-for m in obj_materials? {
-    let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
-    // 新增!
-    let normal_texture = load_texture(&m.normal_texture, device, queue).await?;
-
-    materials.push(model::Material::new(
-        device,
-        &m.name,
-        diffuse_texture,
-        normal_texture, // 新增!
-        layout,
-    ));
-}

上面使用的 Material::new() 函数的具体代码如下:

rust
impl Material {
-    pub fn new(
-        device: &wgpu::Device,
-        name: &str,
-        diffuse_texture: texture::Texture,
-        normal_texture: texture::Texture, // 新增!
-        layout: &wgpu::BindGroupLayout,
-    ) -> Self {
-        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-            layout,
-            entries: &[
-                wgpu::BindGroupEntry {
-                    binding: 0,
-                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
-                },
-                wgpu::BindGroupEntry {
-                    binding: 1,
-                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
-                },
-                // 新增!
-                wgpu::BindGroupEntry {
-                    binding: 2,
-                    resource: wgpu::BindingResource::TextureView(&normal_texture.view),
-                },
-                wgpu::BindGroupEntry {
-                    binding: 3,
-                    resource: wgpu::BindingResource::Sampler(&normal_texture.sampler),
-                },
-            ],
-            label: Some(name),
-        });
-
-        Self {
-            name: String::from(name),
-            diffuse_texture,
-            normal_texture, // 新增!
-            bind_group,
-        }
-    }
-}

现在我们可以在片元着色器中使用纹理了:

rust
// 片元着色器
-
-@group(0) @binding(0)
-var t_diffuse: texture_2d<f32>;
-@group(0)@binding(1)
-var s_diffuse: sampler;
-@group(0)@binding(2)
-var t_normal: texture_2d<f32>;
-@group(0) @binding(3)
-var s_normal: sampler;
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
-    let object_normal: vec4f = textureSample(t_normal, s_normal, in.tex_coords);
-    // 环境光强度
-    let ambient_strength = 0.1;
-    let ambient_color = light.color * ambient_strength;
-
-    // Create the lighting vectors
-    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
-    let light_dir = normalize(light.position - in.world_position);
-    let view_dir = normalize(camera.view_pos.xyz - in.world_position);
-    let half_dir = normalize(view_dir + light_dir);
-
-    let diffuse_strength = max(dot(tangent_normal, light_dir), 0.0);
-    let diffuse_color = light.color * diffuse_strength;
-
-    let specular_strength = pow(max(dot(tangent_normal, half_dir), 0.0), 32.0);
-    let specular_color = specular_strength * light.color;
-
-    let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
-
-    return vec4f(result, object_color.a);
-}

如果现在运行代码,你会发现渲染效果看起来不太对劲。让我们将效果与上一个教程比较一下:

场景中应该被点亮的部分是黑暗的,反之亦然。

从切空间到世界空间

光照教程的法线矩阵 部分有提到:我们是在世界空间中进行光照计算的。也就是说,整个场景的方向是相对于世界坐标系而言的。 从法线纹理中提取的法向量都处在正 Z 方向上,也就是说我们的光照计算认为模型的所有表面都朝向大致相同的方向。这被称为切空间(Tangent Space,也叫做切向量空间)。

光照教程 中我们用顶点法向量来表示表面的方向。现在,可以用它来将法线贴图中的法向量从切空间变换到世界空间。实现此变换需要用到一点点线性代数。

我们将创建一个矩阵,代表相对于顶点法向量的坐标空间(Coordinate Space)。然后使用它来变换法线贴图数据,使其处于世界空间:

rust
let coordinate_system = mat3x3f(
-    vec3(1, 0, 0), // x axis (右)
-    vec3(0, 1, 0), // y axis (上)
-    vec3(0, 0, 1)  // z axis (前)
-);

切向量与副切向量

我们已经有了需要的 3 个向量中的一个,即法向量。另外两个是切向量(Tangent Vector)与副切向量(Bitangent Vector, 也被叫作副法向量(Binormal))。切向量是与法向量垂直且表面平行的向量(也就是不与表面相交)。副切向量是同时垂直于由法向量与切向量的向量,所以可以由法向量与切向量的叉积计算得出。切向量、副切向量和法向量一起分别代表坐标空间 x、y 和 z 轴。

一些模型格式会在顶点数据中包括切向量副切向量,但 OBJ 没有。我们得手动计算,可以从现有的顶点数据中推导出切向量与副切向量。请看下图:

可以使用三角形的边和法线来计算切向量与副切向量。首先,我们需要更新在 model.rs 中的顶点 ModelVertex 结构体:

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-pub struct ModelVertex {
-    position: [f32; 3],
-    tex_coords: [f32; 2],
-    normal: [f32; 3],
-    // 新增!
-    tangent: [f32; 3],
-    bitangent: [f32; 3],
-}

同时也需要更新顶点缓冲区布局 VertexBufferLayout:

rust
impl Vertex for ModelVertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                // ...
-
-                // Tangent and bitangent
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
-                    shader_location: 3,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 11]>() as wgpu::BufferAddress,
-                    shader_location: 4,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-            ],
-        }
-    }
-}

现在可以计算新的切向量副切向量了, 用以下代码来更新 resource.rsload_model() 函数的网格生成:

rust
let meshes = models
-    .into_iter()
-    .map(|m| {
-        let mut vertices = (0..m.mesh.positions.len() / 3)
-            .map(|i| model::ModelVertex {
-                position: [
-                    m.mesh.positions[i * 3],
-                    m.mesh.positions[i * 3 + 1],
-                    m.mesh.positions[i * 3 + 2],
-                ],
-                tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
-                normal: [
-                    m.mesh.normals[i * 3],
-                    m.mesh.normals[i * 3 + 1],
-                    m.mesh.normals[i * 3 + 2],
-                ],
-                // 随后会计算实际值来替换
-                tangent: [0.0; 3],
-                bitangent: [0.0; 3],
-            })
-            .collect::<Vec<_>>();
-
-        let indices = &m.mesh.indices;
-        let mut triangles_included = vec![0; vertices.len()];
-
-        // 遍历三角形的三个顶点来计算切向量与副切向量.
-        for c in indices.chunks(3) {
-            let v0 = vertices[c[0] as usize];
-            let v1 = vertices[c[1] as usize];
-            let v2 = vertices[c[2] as usize];
-
-            let pos0: glam::Vec3 = v0.position.into();
-            let pos1: glam::Vec3 = v1.position.into();
-            let pos2: glam::Vec3 = v2.position.into();
-
-            let uv0: glam::Vec2 = v0.tex_coords.into();
-            let uv1: glam::Vec2 = v1.tex_coords.into();
-            let uv2: glam::Vec2 = v2.tex_coords.into();
-
-            // 计算三角形的边
-            let delta_pos1 = pos1 - pos0;
-            let delta_pos2 = pos2 - pos0;
-
-            // 计算切向量/副切向量需要用到的两个方向向量
-            let delta_uv1 = uv1 - uv0;
-            let delta_uv2 = uv2 - uv0;
-
-            // 求解以下方程组
-            //     delta_pos1 = delta_uv1.x * T + delta_u.y * B
-            //     delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
-            // 幸运的是,在我发现这个方程的地方提供了如下求解方案!
-            let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
-            let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
-            // 我们翻转副切向量以启用具有 wgpu 纹理坐标系的右手标架的法线贴图
-            let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * -r;
-
-            // 我们为三角形中的每个顶点使用相同的切向量/副切向量
-            vertices[c[0] as usize].tangent =
-                (tangent + glam::Vec3::from_array(vertices[c[0] as usize].tangent)).into();
-            vertices[c[1] as usize].tangent =
-                (tangent + glam::Vec3::from_array(vertices[c[1] as usize].tangent)).into();
-            vertices[c[2] as usize].tangent =
-                (tangent + glam::Vec3::from_array(vertices[c[2] as usize].tangent)).into();
-            vertices[c[0] as usize].bitangent =
-                (bitangent + glam::Vec3::from_array(vertices[c[0] as usize].bitangent)).into();
-            vertices[c[1] as usize].bitangent =
-                (bitangent + glam::Vec3::from_array(vertices[c[1] as usize].bitangent)).into();
-            vertices[c[2] as usize].bitangent =
-                (bitangent + glam::Vec3::from_array(vertices[c[2] as usize].bitangent)).into();
-
-            // 用于计算顶点上切向量/副切向量的平均值
-            triangles_included[c[0] as usize] += 1;
-            triangles_included[c[1] as usize] += 1;
-            triangles_included[c[2] as usize] += 1;
-        }
-
-        // 计算切向量/副切向量的平均值
-        for (i, n) in triangles_included.into_iter().enumerate() {
-            let denom = 1.0 / n as f32;
-            let mut v = &mut vertices[i];
-            v.tangent = (glam::Vec3::from_array(v.tangent) * denom).into();
-            v.bitangent = (glam::Vec3::from_array(v.bitangent) * denom).into();
-        }
-
-        let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-            label: Some(&format!("{:?} Vertex Buffer", file_name)),
-            contents: bytemuck::cast_slice(&vertices),
-            usage: wgpu::BufferUsages::VERTEX,
-        });
-        let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-            label: Some(&format!("{:?} Index Buffer", file_name)),
-            contents: bytemuck::cast_slice(&m.mesh.indices),
-            usage: wgpu::BufferUsages::INDEX,
-        });
-
-        model::Mesh {
-            name: file_name.to_string(),
-            vertex_buffer,
-            index_buffer,
-            num_elements: m.mesh.indices.len() as u32,
-            material: m.mesh.material_id.unwrap_or(0),
-        }
-    })
-    .collect::<Vec<_>>();

从世界空间到切空间

由于法线贴图默认是在切空间中,该计算中使用的所有其他变量也得变换为切空间。我们需要在顶点着色器中构建切向量矩阵,首先,修改 VertexInput 来包括之前计算的切向量与副切向量:

rust
struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f;
-    @location(2) normal: vec3f;
-    @location(3) tangent: vec3f;
-    @location(4) bitangent: vec3f;
-};

接下来构建切向量矩阵 tangent_matrix,然后将顶点,光源和视图坐标变换到切空间:

rust
struct VertexOutput {
-    @builtin(position) clip_position: vec4f;
-    @location(0) tex_coords: vec2f;
-    // 更新!
-    @location(1) tangent_position: vec3f;
-    @location(2) tangent_light_position: vec3f;
-    @location(3) tangent_view_position: vec3f;
-};
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    // ...
-    let normal_matrix = mat3x3f(
-        instance.normal_matrix_0,
-        instance.normal_matrix_1,
-        instance.normal_matrix_2,
-    );
-
-    // 构建切向量矩阵
-    let world_normal = normalize(normal_matrix * model.normal);
-    let world_tangent = normalize(normal_matrix * model.tangent);
-    let world_bitangent = normalize(normal_matrix * model.bitangent);
-    let tangent_matrix = transpose(mat3x3f(
-        world_tangent,
-        world_bitangent,
-        world_normal,
-    ));
-
-    let world_position = model_matrix * vec4f(model.position, 1.0);
-
-    var out: VertexOutput;
-    out.clip_position = camera.view_proj * world_position;
-    out.tex_coords = model.tex_coords;
-    out.tangent_position = tangent_matrix * world_position.xyz;
-    out.tangent_view_position = tangent_matrix * camera.view_pos.xyz;
-    out.tangent_light_position = tangent_matrix * light.position;
-    return out;
-}

最后,更新片元着色器以使用这些转换后的光照值:

rust
@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    // Sample textures..
-
-    // 光照计算需要的向量
-    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
-    let light_dir = normalize(in.tangent_light_position - in.tangent_position);
-    let view_dir = normalize(in.tangent_view_position - in.tangent_position);
-
-    // 执行光照计算...
-}

完成上边的计算,我们会得到如下渲染效果:

sRGB 与法线纹理

光线的强度是对其能量的物理度量,而亮度 (brightness) 度量的是人眼所感知到的光线强度。 由于人眼中的光感受器对不同波长的光线能量的响应不同,即使红光和绿光的物理强度相同,在我们看来它们也并不具有相同的亮度,事实上,人眼是按对数关系来感知光线强度的。根据人类视觉系统所具有的这种特性,如果希望亮度看起来按等间隔的步长递增,那么赋给像素的光强值应该按指数的形式递增。显示设备可以根据所能产生的最小和最大光强值通过计算得到亮度变化的步长。

sRGB 色彩空间是一种于计算机显示设备和打印机等设备的标准颜色系统,包括 WebGPU 在内的大部分图形绘制系统都支持 sRGB。它通过对色值的 𝛄 (gamma) 编码,实现了图像在有限的色值范围(红、绿、蓝每个颜色通道的取值都在 [0, 255] 范围内)内隐藏人眼对色彩的感知差异。

GPU 硬件对 sRGB 色彩空间提供了特殊支持,可以将颜色值从线性值转换到 𝛄 编码,并通过 𝛄 校正(Gamma Correction)解码回线性值。 我们一直在使用 Rgba8UnormSrgb 格式来制作所有的纹理。Srgb 位就是指示 wgpu:

  • 当着色器代码对 sRGB 格式的纹理进行采样时,GPU 硬件要将其从 sRGB 采样值解码为线性值再返回给着色器;
  • 当着色器代码写入线性颜色值到 sRGB 格式的纹理时,GPU 硬件要对其进行 𝛄 编码后再写入;

如果纹理数据不是基于 sRGB 色彩空间制作的,但指定了 RgbaUnormSrgb 格式,会由于改变了 GPU 对纹理的采样方式而导致渲染结果与预期不符。 这可以通过在创建纹理时使用 Rgba8Unorm 来避免。让我们给 Texture 结构体添加一个 is_normal_map 参数。

rust
pub fn from_image(
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    img: &image::DynamicImage,
-    label: Option<&str>,
-    is_normal_map: bool, // 新增!
-) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
-    // ...
-    let texture = device.create_texture(&wgpu::TextureDescriptor {
-        label,
-        size,
-        mip_level_count: 1,
-        sample_count: 1,
-        dimension: wgpu::TextureDimension::D2,
-        // 更新!
-        format: if is_normal_map {
-            wgpu::TextureFormat::Rgba8Unorm
-        } else {
-            wgpu::TextureFormat::Rgba8UnormSrgb
-        },
-        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
-        view_formats: &[],
-    });
-
-    // ...
-
-    Ok((Self { texture, view, sampler }, cmd_buffer))
-}

并将这一修改同步到其他有关的函数:

rust
pub fn from_bytes(
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    bytes: &[u8],
-    label: &str,
-    is_normal_map: bool, // 新增!
-) -> Result<Self> {
-    let img = image::load_from_memory(bytes)?;
-    Self::from_image(device, queue, &img, Some(label), is_normal_map) // 更新!
-}

同时也还要更新 resource.rs

rust
pub async fn load_texture(
-    file_name: &str,
-    is_normal_map: bool,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-) -> anyhow::Result<texture::Texture> {
-    let data = load_binary(file_name).await?;
-    texture::Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
-}
-
-pub async fn load_model(
-    file_name: &str,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    layout: &wgpu::BindGroupLayout,
-) -> anyhow::Result<model::Model> {
-    // ...
-
-    let mut materials = Vec::new();
-    for m in obj_materials? {
-        let diffuse_texture = load_texture(&m.diffuse_texture, false, device, queue).await?; // 更新!
-        let normal_texture = load_texture(&m.normal_texture, true, device, queue).await?; // 更新!
-
-        materials.push(model::Material::new(
-            device,
-            &m.name,
-            diffuse_texture,
-            normal_texture,
-            layout,
-        ));
-    }
-}

现在的渲染效果如下:

试试其他材质

现在改用其他材质来试试效果,在 DrawModel trait 中添加了一个 draw_model_instanced_with_material() 接口并在渲染通道对象上实现此接口:

rust
pub trait DrawModel<'a> {
-    // ...
-    fn draw_model_instanced_with_material(
-        &mut self,
-        model: &'a Model,
-        material: &'a Material,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    // ...
-    fn draw_model_instanced_with_material(
-        &mut self,
-        model: &'b Model,
-        material: &'b Material,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
-        }
-    }
-}

我找到了一个鹅卵石纹理及匹配的法线贴图,并为它创建一个叫 debug_material 的材质实例:

rust
// lib.rs
-impl State {
-    async fn new(window: &Window) -> Result<Self> {
-        // ...
-        let debug_material = {
-            let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
-            let normal_bytes = include_bytes!("../res/cobble-normal.png");
-
-            let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
-            let normal_texture = texture::Texture::from_bytes(&device, &queue, normal_bytes, "res/alt-normal.png", true).unwrap();
-
-            model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
-        };
-        Self {
-            // ...
-            #[allow(dead_code)]
-            debug_material,
-        }
-    }
-}

然后调用刚实现的 draw_model_instanced_with_material() 函数来使用 debug_material 渲染:

rust
render_pass.set_pipeline(&self.render_pipeline);
-render_pass.draw_model_instanced_with_material(
-    &self.obj_model,
-    &self.debug_material,
-    0..self.instances.len() as u32,
-    &self.camera_bind_group,
-    &self.light_bind_group,
-);

得到的渲染效果如下:

上面使用的纹理可以在 Github 源码库中找到。

',67);function u(m,B,g,d,_,f){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[b,n(a,{example:"tutorial11_normals"}),n(l)])}const v=p(i,[["render",u]]);export{h as __pageData,v as default}; diff --git a/assets/intermediate_tutorial11-normals_index.md.0360ba19.lean.js b/assets/intermediate_tutorial11-normals_index.md.0360ba19.lean.js deleted file mode 100644 index 8b1d02f8c..000000000 --- a/assets/intermediate_tutorial11-normals_index.md.0360ba19.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/cube-normal.076c62a3.png",t="/learn-wgpu-zh/assets/normal_mapping_wrong.a2c5f143.png",D="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.4ab44d3e.png",F="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAAGQCAYAAACAvzbMAAABg2lDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV9TS0UqDmaQ4pChOlkQFXHUKhShQqgVWnUwufQLmrQkKS6OgmvBwY/FqoOLs64OroIg+AHi5Oik6CIl/i8ptIjx4Lgf7+497t4BQrPCdKtnHNAN20wnE1I2tyqFXxFCECKigMKs2pwsp+A7vu4R4OtdnGf5n/tz9Gt5iwEBiXiW1UybeIN4etOucd4nFllJ0YjPicdMuiDxI9dVj984F10WeKZoZtLzxCKxVOxitYtZydSJp4hjmm5QvpD1WOO8xVmv1Fn7nvyFkbyxssx1msNIYhFLkCFBRR1lVGAjTqtBioU07Sd8/FHXL5NLJVcZjBwLqEKH4vrB/+B3t1ZhcsJLiiSA0IvjfIwA4V2g1XCc72PHaZ0AwWfgyuj4q01g5pP0RkeLHQED28DFdUdT94DLHWDoqaaYiisFaQqFAvB+Rt+UAwZvgb41r7f2Pk4fgAx1lboBDg6B0SJlr/u8u7e7t3/PtPv7AfEscnMV1+LuAAAABmJLR0QAAACSAP8q1jhsAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH5AUDFCAS5kaN7AAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAApfSURBVHja7d1PbhNnHMfh14YjINTsuEXUayCx8BFKRGRlWXVZdYkiV0mP4EUkroFyi+yool4huCsH1409M55/7++d51khSAMYaT79vjOJZ2m12SQAaGjuJQBAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAAMjXay9BbN8uZ88/PlttvCDAYGZptXHVCR6OfUICCAiN4yEkwFDcAxEaAAERBQABQXAAAUFEAAEhbAhEBBAQ8RARQEAY/sIvIoCAiIeIAALC8Bd6EQEERDxEBBAQhr+wiwggIOIhIoCAACAgBFkCVgggIAAICAACQg1jvgGUN58CBAQAAbFCrA8gX94TPWN9PxklHICAiItQAINzhAWAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAkJXXXgKA8S0eb59/vH77UUAAqB+O/Z/LPSSOsAAyisf+r1d9jIAAEC4kAgKQ4fqIEBIBAZhAfAQEgGzWiIAACImAAAiJgAAUra+v8RgyJAICUOgiERCAAi/uQ1zg+/49fCsTgID/99/kz9LXcZmAAASNw9gEBKDwQPS1QgQEEAcEBBAIBARAHDriJjogDlgggEAQe30ICCAOBVvOn9LaAgEEgqbx6JuAgDgw8kX++vurXj7v+ZtPAgIIhBWQ3+8nIJTv179//PiPn8SBkBfrpn+evteHgDCdcOz/XICQCMR0ArGcP7U6xhojHgLCtOKx/+sjRkQcrIc+/i5DhUNAoMc1IhACUXo8BIRpro+Wa0QcxKGt8zef0v0/f/7v71X3GGvscAgIHIiIQIhD3/bjEWl1CAgciMhi+cXrIBCjvj7HVkhO8UgppVlabTb+2eL5djmr/bFnq+n8E2/Xw/r6fbvPIyTiMLLdkOQWDgERkLBxqNI2HiIiELnKKR4pOcIiWByGtL5+P5mIiINwCAiTD0RX62P/80UPiUCIh4BgPVgj4iAcAoJA1FkROV20x4qIQIhHrtxED6qzm+g7X3TX9uLY9sJ/7Pipzufr+viqzZ9FHCg9HhbIhBfESxfbU8/7u/hcVRf/qDe0BYISwyEgE9blxbrUC3+dv5M4MOV4pJTS3D8duQet6cdZFkQKR9R4CIiLdauPi3jhB6ujO46woKH7qzsvApMOh4CQjcXyS62Vsr2Xcuj4aO2lRDwG5Qhrghfrrj6uy89Vx3L+dDAe558/WB+IhwVCHWerTa2vBbn67SarP/ehACyv7o5GYOwLt3AgHAJCqj4yarIYqj5Xkwvv9mN3QyIciEe+fCV65BA83qbPv1+0Wh9Nvnq86tHVMS/8XR9hCQfCISBFx2PXbkhOObaK/nUNXQVEOBCP+hxhFaIqGr7wTTgQDgHh2cPFj9Xx7uZm0vG4r7gJLxyIh4CQUvr5wyw9HIjJsZAgHAhHl9wDCRiPKvsRmdLxVdUKEQ7EwwKBo4HYD4lwIB4WiPVR0+4KcQMdhKMPvpUJgHicxBEWgHBYIKX7elfvtNHxFYiHBQIgHBYIw6wQXwcC4mGBUBmJul+JDoiHgGBtgHBkwxFWMPvfhRcQDwsEQDgsEPLgEV4QDwsEQDgsEADxsEAYmcd4QTwEhJPDsf9zQgLCMRRHWIEsHm9fjEdVXEA8sEAAhCMb3lAqkCZvKHX918oLhnhggQAIR37cAwHEAwEpXd0nrDyJxVTCIR4CAmB1CAh92X4X3qp1YX0gHgzFTfSAvKEUwoGA0ElI9vkuvIgHAgIIB9lyDwQQDywQQDiwQADxQEBoa/sIL4gHOXGEBQgHFgge4UU8sEAA4UBA2Gryfh5f77xNC+KBgAhHg3C89N881PxvfDsThIMheUfCDOPRlncjRDwQEAFpHZHlL5fCgnggIOJhoSAc5MNjvBO0u0pAPDiVm+iAcGCB5CbnR3GtEMQDCwQQDiwQKwTEAwuEzHkSC+HAArFCQDwQEKwPxIM4HGENuEJy/cJCEA4skAARsT4QDywQwhEPhAMLxAoRD8QDCwQQDiwQMl0h1gfigYAgHogH2XCENeIK8VgvwoEFwskRsT4QDywQsiIeCAcWiBUiHogHFgggHFggZLpCrA/EAwEREfFAPMiaIywQDrBAprxCrA/EAwsE8UA4sEDof4WIB+KBBUJKKaXF42168DIgHFggnOLdzU2nH4d4gICIyH9+fTl/8kKJB4zGEVaAiDxcXFgdCAcCQvdrBPEAAQGEgzDcAwHxAAskusXjrRcB4cACAcQDC4QMeYRXOMACAcQDAQHEg+lwhAXCARZIZJ7AEg+wQADhwAIhT57AEg+wQEA4wAIBxAMBAcQDKjnCAuEACyQqj/CKB1ggIBxggZAnj/CKB1ggIBxggYB4gIAA4gG1OMIC4QALJCKP8IoHWCAgHGCBkCeP8IoHWCAgHGCBgHiABQLCAVggIB4gIMXyCK94QGSOsBAOwAIpmUd4xQMsEBAOsEBAPMACAeEALBAQDxCQYnmEVzwgOkdYCAdggZTKI7ziARYICAdYICAeYIGAcAAWCOIBCEixPMIrHlACR1gIB2CBlMgjvOIBFggIB1ggIB6ABYJwABYI4gFYIMXyCK9wgAUC4gECQp5Kf4RXPCA2R1gIB2CBIB6ABYJwABYI4iEeYIHQytQe4RUOsEBAPAABiSLyI7ziAdPgCAvhACwQxAOwQBAOwAJBPAALhJOV9AivcAAWCOIBCEhJcn2EVzyAXY6wWjh0LLV++9HqAASE+uGo++viAQiIeBRLOAABCej6+6vnH49xP0Q8gDpmabXZeBnirI8+gyIcQBOewgq4TnYXingAY3GEFTgkXawR4QAskAF4PBdAQIpZIeIBCIgVMtjqEA9AQLA6gNF4jPdEkR7pFQ7AAslIlKMs8QAsEEuk0foQDsACQTwAASnR0EdZ4gHkwhFWR/o+yhIOwAJBPAALhP5XyLF4CAcgICJidQDh+G68GbI6gAjcA+lY26eyxAOIwhFWT5oeZTmyAiwQOlkiu+EQD0BAaLQ+hAMQECvE6gCK4x7IAI7dD9ldH6IBCAiVISnt/dUBAQGAWtwDAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAExEsAgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAU/EvfiluCemVcXgAAAAASUVORK5CYII=",y="/learn-wgpu-zh/assets/normal_mapping_correct.c594f67a.png",C="/learn-wgpu-zh/assets/no_srgb.c60c78b6.png",A="/learn-wgpu-zh/assets/debug_material.49f1d418.png",h=JSON.parse('{"title":"法线映射","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial11-normals/index.md","filePath":"intermediate/tutorial11-normals/index.md","lastUpdated":1701933923000}'),i={name:"intermediate/tutorial11-normals/index.md"},b=r("",67);function u(m,B,g,d,_,f){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[b,n(a,{example:"tutorial11_normals"}),n(l)])}const v=p(i,[["render",u]]);export{h as __pageData,v as default}; diff --git a/assets/intermediate_tutorial11-normals_index.md.JKRpqUo4.js b/assets/intermediate_tutorial11-normals_index.md.JKRpqUo4.js new file mode 100644 index 000000000..9eafbebf6 --- /dev/null +++ b/assets/intermediate_tutorial11-normals_index.md.JKRpqUo4.js @@ -0,0 +1,432 @@ +import{_ as h,D as s,o as l,c as k,I as i,R as p}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/cube-normal.ojipvyDD.png",e="/learn-wgpu-zh/assets/normal_mapping_wrong.1lc7KaJ-.png",r="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.5nUogza3.png",E="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAAGQCAYAAACAvzbMAAABg2lDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV9TS0UqDmaQ4pChOlkQFXHUKhShQqgVWnUwufQLmrQkKS6OgmvBwY/FqoOLs64OroIg+AHi5Oik6CIl/i8ptIjx4Lgf7+497t4BQrPCdKtnHNAN20wnE1I2tyqFXxFCECKigMKs2pwsp+A7vu4R4OtdnGf5n/tz9Gt5iwEBiXiW1UybeIN4etOucd4nFllJ0YjPicdMuiDxI9dVj984F10WeKZoZtLzxCKxVOxitYtZydSJp4hjmm5QvpD1WOO8xVmv1Fn7nvyFkbyxssx1msNIYhFLkCFBRR1lVGAjTqtBioU07Sd8/FHXL5NLJVcZjBwLqEKH4vrB/+B3t1ZhcsJLiiSA0IvjfIwA4V2g1XCc72PHaZ0AwWfgyuj4q01g5pP0RkeLHQED28DFdUdT94DLHWDoqaaYiisFaQqFAvB+Rt+UAwZvgb41r7f2Pk4fgAx1lboBDg6B0SJlr/u8u7e7t3/PtPv7AfEscnMV1+LuAAAABmJLR0QAAACSAP8q1jhsAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH5AUDFCAS5kaN7AAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAApfSURBVHja7d1PbhNnHMfh14YjINTsuEXUayCx8BFKRGRlWXVZdYkiV0mP4EUkroFyi+yool4huCsH1409M55/7++d51khSAMYaT79vjOJZ2m12SQAaGjuJQBAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAAMjXay9BbN8uZ88/PlttvCDAYGZptXHVCR6OfUICCAiN4yEkwFDcAxEaAAERBQABQXAAAUFEAAEhbAhEBBAQ8RARQEAY/sIvIoCAiIeIAALC8Bd6EQEERDxEBBAQhr+wiwggIOIhIoCAACAgBFkCVgggIAAICAACQg1jvgGUN58CBAQAAbFCrA8gX94TPWN9PxklHICAiItQAINzhAWAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAkJXXXgKA8S0eb59/vH77UUAAqB+O/Z/LPSSOsAAyisf+r1d9jIAAEC4kAgKQ4fqIEBIBAZhAfAQEgGzWiIAACImAAAiJgAAUra+v8RgyJAICUOgiERCAAi/uQ1zg+/49fCsTgID/99/kz9LXcZmAAASNw9gEBKDwQPS1QgQEEAcEBBAIBARAHDriJjogDlgggEAQe30ICCAOBVvOn9LaAgEEgqbx6JuAgDgw8kX++vurXj7v+ZtPAgIIhBWQ3+8nIJTv179//PiPn8SBkBfrpn+evteHgDCdcOz/XICQCMR0ArGcP7U6xhojHgLCtOKx/+sjRkQcrIc+/i5DhUNAoMc1IhACUXo8BIRpro+Wa0QcxKGt8zef0v0/f/7v71X3GGvscAgIHIiIQIhD3/bjEWl1CAgciMhi+cXrIBCjvj7HVkhO8UgppVlabTb+2eL5djmr/bFnq+n8E2/Xw/r6fbvPIyTiMLLdkOQWDgERkLBxqNI2HiIiELnKKR4pOcIiWByGtL5+P5mIiINwCAiTD0RX62P/80UPiUCIh4BgPVgj4iAcAoJA1FkROV20x4qIQIhHrtxED6qzm+g7X3TX9uLY9sJ/7Pipzufr+viqzZ9FHCg9HhbIhBfESxfbU8/7u/hcVRf/qDe0BYISwyEgE9blxbrUC3+dv5M4MOV4pJTS3D8duQet6cdZFkQKR9R4CIiLdauPi3jhB6ujO46woKH7qzsvApMOh4CQjcXyS62Vsr2Xcuj4aO2lRDwG5Qhrghfrrj6uy89Vx3L+dDAe558/WB+IhwVCHWerTa2vBbn67SarP/ehACyv7o5GYOwLt3AgHAJCqj4yarIYqj5Xkwvv9mN3QyIciEe+fCV65BA83qbPv1+0Wh9Nvnq86tHVMS/8XR9hCQfCISBFx2PXbkhOObaK/nUNXQVEOBCP+hxhFaIqGr7wTTgQDgHh2cPFj9Xx7uZm0vG4r7gJLxyIh4CQUvr5wyw9HIjJsZAgHAhHl9wDCRiPKvsRmdLxVdUKEQ7EwwKBo4HYD4lwIB4WiPVR0+4KcQMdhKMPvpUJgHicxBEWgHBYIKX7elfvtNHxFYiHBQIgHBYIw6wQXwcC4mGBUBmJul+JDoiHgGBtgHBkwxFWMPvfhRcQDwsEQDgsEPLgEV4QDwsEQDgsEADxsEAYmcd4QTwEhJPDsf9zQgLCMRRHWIEsHm9fjEdVXEA8sEAAhCMb3lAqkCZvKHX918oLhnhggQAIR37cAwHEAwEpXd0nrDyJxVTCIR4CAmB1CAh92X4X3qp1YX0gHgzFTfSAvKEUwoGA0ElI9vkuvIgHAgIIB9lyDwQQDywQQDiwQADxQEBoa/sIL4gHOXGEBQgHFgge4UU8sEAA4UBA2Gryfh5f77xNC+KBgAhHg3C89N881PxvfDsThIMheUfCDOPRlncjRDwQEAFpHZHlL5fCgnggIOJhoSAc5MNjvBO0u0pAPDiVm+iAcGCB5CbnR3GtEMQDCwQQDiwQKwTEAwuEzHkSC+HAArFCQDwQEKwPxIM4HGENuEJy/cJCEA4skAARsT4QDywQwhEPhAMLxAoRD8QDCwQQDiwQMl0h1gfigYAgHogH2XCENeIK8VgvwoEFwskRsT4QDywQsiIeCAcWiBUiHogHFgggHFggZLpCrA/EAwEREfFAPMiaIywQDrBAprxCrA/EAwsE8UA4sEDof4WIB+KBBUJKKaXF42168DIgHFggnOLdzU2nH4d4gICIyH9+fTl/8kKJB4zGEVaAiDxcXFgdCAcCQvdrBPEAAQGEgzDcAwHxAAskusXjrRcB4cACAcQDC4QMeYRXOMACAcQDAQHEg+lwhAXCARZIZJ7AEg+wQADhwAIhT57AEg+wQEA4wAIBxAMBAcQDKjnCAuEACyQqj/CKB1ggIBxggZAnj/CKB1ggIBxggYB4gIAA4gG1OMIC4QALJCKP8IoHWCAgHGCBkCeP8IoHWCAgHGCBgHiABQLCAVggIB4gIMXyCK94QGSOsBAOwAIpmUd4xQMsEBAOsEBAPMACAeEALBAQDxCQYnmEVzwgOkdYCAdggZTKI7ziARYICAdYICAeYIGAcAAWCOIBCEixPMIrHlACR1gIB2CBlMgjvOIBFggIB1ggIB6ABYJwABYI4gFYIMXyCK9wgAUC4gECQp5Kf4RXPCA2R1gIB2CBIB6ABYJwABYI4iEeYIHQytQe4RUOsEBAPAABiSLyI7ziAdPgCAvhACwQxAOwQBAOwAJBPAALhJOV9AivcAAWCOIBCEhJcn2EVzyAXY6wWjh0LLV++9HqAASE+uGo++viAQiIeBRLOAABCej6+6vnH49xP0Q8gDpmabXZeBnirI8+gyIcQBOewgq4TnYXingAY3GEFTgkXawR4QAskAF4PBdAQIpZIeIBCIgVMtjqEA9AQLA6gNF4jPdEkR7pFQ7AAslIlKMs8QAsEEuk0foQDsACQTwAASnR0EdZ4gHkwhFWR/o+yhIOwAJBPAALhP5XyLF4CAcgICJidQDh+G68GbI6gAjcA+lY26eyxAOIwhFWT5oeZTmyAiwQOlkiu+EQD0BAaLQ+hAMQECvE6gCK4x7IAI7dD9ldH6IBCAiVISnt/dUBAQGAWtwDAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAExEsAgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAU/EvfiluCemVcXgAAAAASUVORK5CYII=",d="/learn-wgpu-zh/assets/normal_mapping_correct.EKHmPX5_.png",g="/learn-wgpu-zh/assets/no_srgb.TzdPNppd.png",y="/learn-wgpu-zh/assets/debug_material.knbjh1ZS.png",_=JSON.parse('{"title":"法线映射","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial11-normals/index.md","filePath":"intermediate/tutorial11-normals/index.md","lastUpdated":1703303099000}'),F={name:"intermediate/tutorial11-normals/index.md"},A=p('

法线映射

添加光照后,我们的场景已经看起来很不错了。不过,对象表面还缺少实物的那种凹凸细节。如果使用的纹理是光滑的就不是问题,但是砖块纹理应该是比较粗糙的。 我们可以给模型添加更多的三角形来雕刻出表面的凹凸细节,但这将使得顶点数据倍增而渲染变慢,而且也很难知道在哪里添加新的三角形。这,就是法线映射(Normal Mapping)的用武之地了。

法线映射也叫凹凸映射(Bump Mapping),是一种纹理映射技术,它不用增加模型的几何复杂度就能够模拟具有复杂凹凸细节的表面。与简单的纹理映射不同,当绘制对象的表面时,法线映射技术通过扰动表面的法向量来改变它的形状,使得着色处理后的颜色能反映出表面几何特性的变化。

还记得在实例化绘制教程中,我们尝试了在纹理中存储实例数据吗?法线贴图(Normal Map)就是存储着法向量数据的纹理!除了顶点法向量外,我们还将在光照计算中使用法线贴图中的法向量。

我们的砖块纹理对应的法线贴图(也就是法线纹理)长这样:

./cube-normal.png

纹理的 r、g、b 分量对应于法向量的 x、y 和 z 坐标分量。所有的 z 值都应该是正的,这就是为什么法线贴图有一个蓝色的色调。

我们来修改 model.rs 中的材质 Material 结构体,新增一个法线纹理 normal_texture 字段:

rust
pub struct Material {
+    pub name: String,
+    pub diffuse_texture: texture::Texture,
+    pub normal_texture: texture::Texture, // 更新!
+    pub bind_group: wgpu::BindGroup,
+}

还得更新纹理绑定组布局 texture_bind_group_layout 以包括法线贴图:

rust
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+    entries: &[
+        // ...
+        // 法线贴图
+        wgpu::BindGroupLayoutEntry {
+            binding: 2,
+            visibility: wgpu::ShaderStages::FRAGMENT,
+            ty: wgpu::BindingType::Texture {
+                multisampled: false,
+                sample_type: wgpu::TextureSampleType::Float { filterable: true },
+                view_dimension: wgpu::TextureViewDimension::D2,
+            },
+            count: None,
+        },
+        wgpu::BindGroupLayoutEntry {
+            binding: 3,
+            visibility: wgpu::ShaderStages::FRAGMENT,
+            ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
+            count: None,
+        },
+    ],
+    label: Some("texture_bind_group_layout"),
+});

resources.rsload_model() 函数中创建材质的循环里,添加以下代码来实际加载法线贴图:

rust
// resources.rs
+let mut materials = Vec::new();
+for m in obj_materials? {
+    let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
+    // 新增!
+    let normal_texture = load_texture(&m.normal_texture, device, queue).await?;
+
+    materials.push(model::Material::new(
+        device,
+        &m.name,
+        diffuse_texture,
+        normal_texture, // 新增!
+        layout,
+    ));
+}

上面使用的 Material::new() 函数的具体代码如下:

rust
impl Material {
+    pub fn new(
+        device: &wgpu::Device,
+        name: &str,
+        diffuse_texture: texture::Texture,
+        normal_texture: texture::Texture, // 新增!
+        layout: &wgpu::BindGroupLayout,
+    ) -> Self {
+        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+            layout,
+            entries: &[
+                wgpu::BindGroupEntry {
+                    binding: 0,
+                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
+                },
+                wgpu::BindGroupEntry {
+                    binding: 1,
+                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
+                },
+                // 新增!
+                wgpu::BindGroupEntry {
+                    binding: 2,
+                    resource: wgpu::BindingResource::TextureView(&normal_texture.view),
+                },
+                wgpu::BindGroupEntry {
+                    binding: 3,
+                    resource: wgpu::BindingResource::Sampler(&normal_texture.sampler),
+                },
+            ],
+            label: Some(name),
+        });
+
+        Self {
+            name: String::from(name),
+            diffuse_texture,
+            normal_texture, // 新增!
+            bind_group,
+        }
+    }
+}

现在我们可以在片元着色器中使用纹理了:

rust
// 片元着色器
+
+@group(0) @binding(0)
+var t_diffuse: texture_2d<f32>;
+@group(0)@binding(1)
+var s_diffuse: sampler;
+@group(0)@binding(2)
+var t_normal: texture_2d<f32>;
+@group(0) @binding(3)
+var s_normal: sampler;
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
+    let object_normal: vec4f = textureSample(t_normal, s_normal, in.tex_coords);
+    // 环境光强度
+    let ambient_strength = 0.1;
+    let ambient_color = light.color * ambient_strength;
+
+    // Create the lighting vectors
+    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
+    let light_dir = normalize(light.position - in.world_position);
+    let view_dir = normalize(camera.view_pos.xyz - in.world_position);
+    let half_dir = normalize(view_dir + light_dir);
+
+    let diffuse_strength = max(dot(tangent_normal, light_dir), 0.0);
+    let diffuse_color = light.color * diffuse_strength;
+
+    let specular_strength = pow(max(dot(tangent_normal, half_dir), 0.0), 32.0);
+    let specular_color = specular_strength * light.color;
+
+    let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
+
+    return vec4f(result, object_color.a);
+}

如果现在运行代码,你会发现渲染效果看起来不太对劲。让我们将效果与上一个教程比较一下:

场景中应该被点亮的部分是黑暗的,反之亦然。

从切空间到世界空间

光照教程的法线矩阵 部分有提到:我们是在世界空间中进行光照计算的。也就是说,整个场景的方向是相对于世界坐标系而言的。 从法线纹理中提取的法向量都处在正 Z 方向上,也就是说我们的光照计算认为模型的所有表面都朝向大致相同的方向。这被称为切空间(Tangent Space,也叫做切向量空间)。

光照教程 中我们用顶点法向量来表示表面的方向。现在,可以用它来将法线贴图中的法向量从切空间变换到世界空间。实现此变换需要用到一点点线性代数。

我们将创建一个矩阵,代表相对于顶点法向量的坐标空间(Coordinate Space)。然后使用它来变换法线贴图数据,使其处于世界空间:

rust
let coordinate_system = mat3x3f(
+    vec3(1, 0, 0), // x axis (右)
+    vec3(0, 1, 0), // y axis (上)
+    vec3(0, 0, 1)  // z axis (前)
+);

切向量与副切向量

我们已经有了需要的 3 个向量中的一个,即法向量。另外两个是切向量(Tangent Vector)与副切向量(Bitangent Vector, 也被叫作副法向量(Binormal))。切向量是与法向量垂直且表面平行的向量(也就是不与表面相交)。副切向量是同时垂直于由法向量与切向量的向量,所以可以由法向量与切向量的叉积计算得出。切向量、副切向量和法向量一起分别代表坐标空间 x、y 和 z 轴。

一些模型格式会在顶点数据中包括切向量副切向量,但 OBJ 没有。我们得手动计算,可以从现有的顶点数据中推导出切向量与副切向量。请看下图:

可以使用三角形的边和法线来计算切向量与副切向量。首先,我们需要更新在 model.rs 中的顶点 ModelVertex 结构体:

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+pub struct ModelVertex {
+    position: [f32; 3],
+    tex_coords: [f32; 2],
+    normal: [f32; 3],
+    // 新增!
+    tangent: [f32; 3],
+    bitangent: [f32; 3],
+}

同时也需要更新顶点缓冲区布局 VertexBufferLayout:

rust
impl Vertex for ModelVertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                // ...
+
+                // Tangent and bitangent
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
+                    shader_location: 3,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 11]>() as wgpu::BufferAddress,
+                    shader_location: 4,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+            ],
+        }
+    }
+}

现在可以计算新的切向量副切向量了, 用以下代码来更新 resource.rsload_model() 函数的网格生成:

rust
let meshes = models
+    .into_iter()
+    .map(|m| {
+        let mut vertices = (0..m.mesh.positions.len() / 3)
+            .map(|i| model::ModelVertex {
+                position: [
+                    m.mesh.positions[i * 3],
+                    m.mesh.positions[i * 3 + 1],
+                    m.mesh.positions[i * 3 + 2],
+                ],
+                tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
+                normal: [
+                    m.mesh.normals[i * 3],
+                    m.mesh.normals[i * 3 + 1],
+                    m.mesh.normals[i * 3 + 2],
+                ],
+                // 随后会计算实际值来替换
+                tangent: [0.0; 3],
+                bitangent: [0.0; 3],
+            })
+            .collect::<Vec<_>>();
+
+        let indices = &m.mesh.indices;
+        let mut triangles_included = vec![0; vertices.len()];
+
+        // 遍历三角形的三个顶点来计算切向量与副切向量.
+        for c in indices.chunks(3) {
+            let v0 = vertices[c[0] as usize];
+            let v1 = vertices[c[1] as usize];
+            let v2 = vertices[c[2] as usize];
+
+            let pos0: glam::Vec3 = v0.position.into();
+            let pos1: glam::Vec3 = v1.position.into();
+            let pos2: glam::Vec3 = v2.position.into();
+
+            let uv0: glam::Vec2 = v0.tex_coords.into();
+            let uv1: glam::Vec2 = v1.tex_coords.into();
+            let uv2: glam::Vec2 = v2.tex_coords.into();
+
+            // 计算三角形的边
+            let delta_pos1 = pos1 - pos0;
+            let delta_pos2 = pos2 - pos0;
+
+            // 计算切向量/副切向量需要用到的两个方向向量
+            let delta_uv1 = uv1 - uv0;
+            let delta_uv2 = uv2 - uv0;
+
+            // 求解以下方程组
+            //     delta_pos1 = delta_uv1.x * T + delta_u.y * B
+            //     delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
+            // 幸运的是,在我发现这个方程的地方提供了如下求解方案!
+            let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
+            let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
+            // 我们翻转副切向量以启用具有 wgpu 纹理坐标系的右手标架的法线贴图
+            let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * -r;
+
+            // 我们为三角形中的每个顶点使用相同的切向量/副切向量
+            vertices[c[0] as usize].tangent =
+                (tangent + glam::Vec3::from_array(vertices[c[0] as usize].tangent)).into();
+            vertices[c[1] as usize].tangent =
+                (tangent + glam::Vec3::from_array(vertices[c[1] as usize].tangent)).into();
+            vertices[c[2] as usize].tangent =
+                (tangent + glam::Vec3::from_array(vertices[c[2] as usize].tangent)).into();
+            vertices[c[0] as usize].bitangent =
+                (bitangent + glam::Vec3::from_array(vertices[c[0] as usize].bitangent)).into();
+            vertices[c[1] as usize].bitangent =
+                (bitangent + glam::Vec3::from_array(vertices[c[1] as usize].bitangent)).into();
+            vertices[c[2] as usize].bitangent =
+                (bitangent + glam::Vec3::from_array(vertices[c[2] as usize].bitangent)).into();
+
+            // 用于计算顶点上切向量/副切向量的平均值
+            triangles_included[c[0] as usize] += 1;
+            triangles_included[c[1] as usize] += 1;
+            triangles_included[c[2] as usize] += 1;
+        }
+
+        // 计算切向量/副切向量的平均值
+        for (i, n) in triangles_included.into_iter().enumerate() {
+            let denom = 1.0 / n as f32;
+            let mut v = &mut vertices[i];
+            v.tangent = (glam::Vec3::from_array(v.tangent) * denom).into();
+            v.bitangent = (glam::Vec3::from_array(v.bitangent) * denom).into();
+        }
+
+        let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+            label: Some(&format!("{:?} Vertex Buffer", file_name)),
+            contents: bytemuck::cast_slice(&vertices),
+            usage: wgpu::BufferUsages::VERTEX,
+        });
+        let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+            label: Some(&format!("{:?} Index Buffer", file_name)),
+            contents: bytemuck::cast_slice(&m.mesh.indices),
+            usage: wgpu::BufferUsages::INDEX,
+        });
+
+        model::Mesh {
+            name: file_name.to_string(),
+            vertex_buffer,
+            index_buffer,
+            num_elements: m.mesh.indices.len() as u32,
+            material: m.mesh.material_id.unwrap_or(0),
+        }
+    })
+    .collect::<Vec<_>>();

从世界空间到切空间

由于法线贴图默认是在切空间中,该计算中使用的所有其他变量也得变换为切空间。我们需要在顶点着色器中构建切向量矩阵,首先,修改 VertexInput 来包括之前计算的切向量与副切向量:

rust
struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f;
+    @location(2) normal: vec3f;
+    @location(3) tangent: vec3f;
+    @location(4) bitangent: vec3f;
+};

接下来构建切向量矩阵 tangent_matrix,然后将顶点,光源和视图坐标变换到切空间:

rust
struct VertexOutput {
+    @builtin(position) clip_position: vec4f;
+    @location(0) tex_coords: vec2f;
+    // 更新!
+    @location(1) tangent_position: vec3f;
+    @location(2) tangent_light_position: vec3f;
+    @location(3) tangent_view_position: vec3f;
+};
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    // ...
+    let normal_matrix = mat3x3f(
+        instance.normal_matrix_0,
+        instance.normal_matrix_1,
+        instance.normal_matrix_2,
+    );
+
+    // 构建切向量矩阵
+    let world_normal = normalize(normal_matrix * model.normal);
+    let world_tangent = normalize(normal_matrix * model.tangent);
+    let world_bitangent = normalize(normal_matrix * model.bitangent);
+    let tangent_matrix = transpose(mat3x3f(
+        world_tangent,
+        world_bitangent,
+        world_normal,
+    ));
+
+    let world_position = model_matrix * vec4f(model.position, 1.0);
+
+    var out: VertexOutput;
+    out.clip_position = camera.view_proj * world_position;
+    out.tex_coords = model.tex_coords;
+    out.tangent_position = tangent_matrix * world_position.xyz;
+    out.tangent_view_position = tangent_matrix * camera.view_pos.xyz;
+    out.tangent_light_position = tangent_matrix * light.position;
+    return out;
+}

最后,更新片元着色器以使用这些转换后的光照值:

rust
@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    // Sample textures..
+
+    // 光照计算需要的向量
+    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
+    let light_dir = normalize(in.tangent_light_position - in.tangent_position);
+    let view_dir = normalize(in.tangent_view_position - in.tangent_position);
+
+    // 执行光照计算...
+}

完成上边的计算,我们会得到如下渲染效果:

sRGB 与法线纹理

光线的强度是对其能量的物理度量,而亮度 (brightness) 度量的是人眼所感知到的光线强度。 由于人眼中的光感受器对不同波长的光线能量的响应不同,即使红光和绿光的物理强度相同,在我们看来它们也并不具有相同的亮度,事实上,人眼是按对数关系来感知光线强度的。根据人类视觉系统所具有的这种特性,如果希望亮度看起来按等间隔的步长递增,那么赋给像素的光强值应该按指数的形式递增。显示设备可以根据所能产生的最小和最大光强值通过计算得到亮度变化的步长。

sRGB 色彩空间是一种于计算机显示设备和打印机等设备的标准颜色系统,包括 WebGPU 在内的大部分图形绘制系统都支持 sRGB。它通过对色值的 𝛄 (gamma) 编码,实现了图像在有限的色值范围(红、绿、蓝每个颜色通道的取值都在 [0, 255] 范围内)内隐藏人眼对色彩的感知差异。

GPU 硬件对 sRGB 色彩空间提供了特殊支持,可以将颜色值从线性值转换到 𝛄 编码,并通过 𝛄 校正(Gamma Correction)解码回线性值。 我们一直在使用 Rgba8UnormSrgb 格式来制作所有的纹理。Srgb 位就是指示 wgpu:

  • 当着色器代码对 sRGB 格式的纹理进行采样时,GPU 硬件要将其从 sRGB 采样值解码为线性值再返回给着色器;
  • 当着色器代码写入线性颜色值到 sRGB 格式的纹理时,GPU 硬件要对其进行 𝛄 编码后再写入;

如果纹理数据不是基于 sRGB 色彩空间制作的,但指定了 RgbaUnormSrgb 格式,会由于改变了 GPU 对纹理的采样方式而导致渲染结果与预期不符。 这可以通过在创建纹理时使用 Rgba8Unorm 来避免。让我们给 Texture 结构体添加一个 is_normal_map 参数。

rust
pub fn from_image(
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    img: &image::DynamicImage,
+    label: Option<&str>,
+    is_normal_map: bool, // 新增!
+) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
+    // ...
+    let texture = device.create_texture(&wgpu::TextureDescriptor {
+        label,
+        size,
+        mip_level_count: 1,
+        sample_count: 1,
+        dimension: wgpu::TextureDimension::D2,
+        // 更新!
+        format: if is_normal_map {
+            wgpu::TextureFormat::Rgba8Unorm
+        } else {
+            wgpu::TextureFormat::Rgba8UnormSrgb
+        },
+        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
+        view_formats: &[],
+    });
+
+    // ...
+
+    Ok((Self { texture, view, sampler }, cmd_buffer))
+}

并将这一修改同步到其他有关的函数:

rust
pub fn from_bytes(
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    bytes: &[u8],
+    label: &str,
+    is_normal_map: bool, // 新增!
+) -> Result<Self> {
+    let img = image::load_from_memory(bytes)?;
+    Self::from_image(device, queue, &img, Some(label), is_normal_map) // 更新!
+}

同时也还要更新 resource.rs

rust
pub async fn load_texture(
+    file_name: &str,
+    is_normal_map: bool,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+) -> anyhow::Result<texture::Texture> {
+    let data = load_binary(file_name).await?;
+    texture::Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
+}
+
+pub async fn load_model(
+    file_name: &str,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    layout: &wgpu::BindGroupLayout,
+) -> anyhow::Result<model::Model> {
+    // ...
+
+    let mut materials = Vec::new();
+    for m in obj_materials? {
+        let diffuse_texture = load_texture(&m.diffuse_texture, false, device, queue).await?; // 更新!
+        let normal_texture = load_texture(&m.normal_texture, true, device, queue).await?; // 更新!
+
+        materials.push(model::Material::new(
+            device,
+            &m.name,
+            diffuse_texture,
+            normal_texture,
+            layout,
+        ));
+    }
+}

现在的渲染效果如下:

试试其他材质

现在改用其他材质来试试效果,在 DrawModel trait 中添加了一个 draw_model_instanced_with_material() 接口并在渲染通道对象上实现此接口:

rust
pub trait DrawModel<'a> {
+    // ...
+    fn draw_model_instanced_with_material(
+        &mut self,
+        model: &'a Model,
+        material: &'a Material,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    // ...
+    fn draw_model_instanced_with_material(
+        &mut self,
+        model: &'b Model,
+        material: &'b Material,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
+        }
+    }
+}

我找到了一个鹅卵石纹理及匹配的法线贴图,并为它创建一个叫 debug_material 的材质实例:

rust
// lib.rs
+impl State {
+    async fn new(window: &Window) -> Result<Self> {
+        // ...
+        let debug_material = {
+            let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
+            let normal_bytes = include_bytes!("../res/cobble-normal.png");
+
+            let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
+            let normal_texture = texture::Texture::from_bytes(&device, &queue, normal_bytes, "res/alt-normal.png", true).unwrap();
+
+            model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
+        };
+        Self {
+            // ...
+            #[allow(dead_code)]
+            debug_material,
+        }
+    }
+}

然后调用刚实现的 draw_model_instanced_with_material() 函数来使用 debug_material 渲染:

rust
render_pass.set_pipeline(&self.render_pipeline);
+render_pass.draw_model_instanced_with_material(
+    &self.obj_model,
+    &self.debug_material,
+    0..self.instances.len() as u32,
+    &self.camera_bind_group,
+    &self.light_bind_group,
+);

得到的渲染效果如下:

上面使用的纹理可以在 Github 源码库中找到。

',67);function c(b,u,m,D,o,C){const a=s("WasmExample"),n=s("AutoGithubLink");return l(),k("div",null,[A,i(a,{example:"tutorial11_normals"}),i(n)])}const v=h(F,[["render",c]]);export{_ as __pageData,v as default}; diff --git a/assets/intermediate_tutorial11-normals_index.md.JKRpqUo4.lean.js b/assets/intermediate_tutorial11-normals_index.md.JKRpqUo4.lean.js new file mode 100644 index 000000000..549664433 --- /dev/null +++ b/assets/intermediate_tutorial11-normals_index.md.JKRpqUo4.lean.js @@ -0,0 +1 @@ +import{_ as h,D as s,o as l,c as k,I as i,R as p}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/cube-normal.ojipvyDD.png",e="/learn-wgpu-zh/assets/normal_mapping_wrong.1lc7KaJ-.png",r="/learn-wgpu-zh/assets/ambient_diffuse_specular_lighting.5nUogza3.png",E="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAAGQCAYAAACAvzbMAAABg2lDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV9TS0UqDmaQ4pChOlkQFXHUKhShQqgVWnUwufQLmrQkKS6OgmvBwY/FqoOLs64OroIg+AHi5Oik6CIl/i8ptIjx4Lgf7+497t4BQrPCdKtnHNAN20wnE1I2tyqFXxFCECKigMKs2pwsp+A7vu4R4OtdnGf5n/tz9Gt5iwEBiXiW1UybeIN4etOucd4nFllJ0YjPicdMuiDxI9dVj984F10WeKZoZtLzxCKxVOxitYtZydSJp4hjmm5QvpD1WOO8xVmv1Fn7nvyFkbyxssx1msNIYhFLkCFBRR1lVGAjTqtBioU07Sd8/FHXL5NLJVcZjBwLqEKH4vrB/+B3t1ZhcsJLiiSA0IvjfIwA4V2g1XCc72PHaZ0AwWfgyuj4q01g5pP0RkeLHQED28DFdUdT94DLHWDoqaaYiisFaQqFAvB+Rt+UAwZvgb41r7f2Pk4fgAx1lboBDg6B0SJlr/u8u7e7t3/PtPv7AfEscnMV1+LuAAAABmJLR0QAAACSAP8q1jhsAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH5AUDFCAS5kaN7AAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAApfSURBVHja7d1PbhNnHMfh14YjINTsuEXUayCx8BFKRGRlWXVZdYkiV0mP4EUkroFyi+yool4huCsH1409M55/7++d51khSAMYaT79vjOJZ2m12SQAaGjuJQBAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAAMjXay9BbN8uZ88/PlttvCDAYGZptXHVCR6OfUICCAiN4yEkwFDcAxEaAAERBQABQXAAAUFEAAEhbAhEBBAQ8RARQEAY/sIvIoCAiIeIAALC8Bd6EQEERDxEBBAQhr+wiwggIOIhIoCAACAgBFkCVgggIAAICAACQg1jvgGUN58CBAQAAbFCrA8gX94TPWN9PxklHICAiItQAINzhAWAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAkJXXXgKA8S0eb59/vH77UUAAqB+O/Z/LPSSOsAAyisf+r1d9jIAAEC4kAgKQ4fqIEBIBAZhAfAQEgGzWiIAACImAAAiJgAAUra+v8RgyJAICUOgiERCAAi/uQ1zg+/49fCsTgID/99/kz9LXcZmAAASNw9gEBKDwQPS1QgQEEAcEBBAIBARAHDriJjogDlgggEAQe30ICCAOBVvOn9LaAgEEgqbx6JuAgDgw8kX++vurXj7v+ZtPAgIIhBWQ3+8nIJTv179//PiPn8SBkBfrpn+evteHgDCdcOz/XICQCMR0ArGcP7U6xhojHgLCtOKx/+sjRkQcrIc+/i5DhUNAoMc1IhACUXo8BIRpro+Wa0QcxKGt8zef0v0/f/7v71X3GGvscAgIHIiIQIhD3/bjEWl1CAgciMhi+cXrIBCjvj7HVkhO8UgppVlabTb+2eL5djmr/bFnq+n8E2/Xw/r6fbvPIyTiMLLdkOQWDgERkLBxqNI2HiIiELnKKR4pOcIiWByGtL5+P5mIiINwCAiTD0RX62P/80UPiUCIh4BgPVgj4iAcAoJA1FkROV20x4qIQIhHrtxED6qzm+g7X3TX9uLY9sJ/7Pipzufr+viqzZ9FHCg9HhbIhBfESxfbU8/7u/hcVRf/qDe0BYISwyEgE9blxbrUC3+dv5M4MOV4pJTS3D8duQet6cdZFkQKR9R4CIiLdauPi3jhB6ujO46woKH7qzsvApMOh4CQjcXyS62Vsr2Xcuj4aO2lRDwG5Qhrghfrrj6uy89Vx3L+dDAe558/WB+IhwVCHWerTa2vBbn67SarP/ehACyv7o5GYOwLt3AgHAJCqj4yarIYqj5Xkwvv9mN3QyIciEe+fCV65BA83qbPv1+0Wh9Nvnq86tHVMS/8XR9hCQfCISBFx2PXbkhOObaK/nUNXQVEOBCP+hxhFaIqGr7wTTgQDgHh2cPFj9Xx7uZm0vG4r7gJLxyIh4CQUvr5wyw9HIjJsZAgHAhHl9wDCRiPKvsRmdLxVdUKEQ7EwwKBo4HYD4lwIB4WiPVR0+4KcQMdhKMPvpUJgHicxBEWgHBYIKX7elfvtNHxFYiHBQIgHBYIw6wQXwcC4mGBUBmJul+JDoiHgGBtgHBkwxFWMPvfhRcQDwsEQDgsEPLgEV4QDwsEQDgsEADxsEAYmcd4QTwEhJPDsf9zQgLCMRRHWIEsHm9fjEdVXEA8sEAAhCMb3lAqkCZvKHX918oLhnhggQAIR37cAwHEAwEpXd0nrDyJxVTCIR4CAmB1CAh92X4X3qp1YX0gHgzFTfSAvKEUwoGA0ElI9vkuvIgHAgIIB9lyDwQQDywQQDiwQADxQEBoa/sIL4gHOXGEBQgHFgge4UU8sEAA4UBA2Gryfh5f77xNC+KBgAhHg3C89N881PxvfDsThIMheUfCDOPRlncjRDwQEAFpHZHlL5fCgnggIOJhoSAc5MNjvBO0u0pAPDiVm+iAcGCB5CbnR3GtEMQDCwQQDiwQKwTEAwuEzHkSC+HAArFCQDwQEKwPxIM4HGENuEJy/cJCEA4skAARsT4QDywQwhEPhAMLxAoRD8QDCwQQDiwQMl0h1gfigYAgHogH2XCENeIK8VgvwoEFwskRsT4QDywQsiIeCAcWiBUiHogHFgggHFggZLpCrA/EAwEREfFAPMiaIywQDrBAprxCrA/EAwsE8UA4sEDof4WIB+KBBUJKKaXF42168DIgHFggnOLdzU2nH4d4gICIyH9+fTl/8kKJB4zGEVaAiDxcXFgdCAcCQvdrBPEAAQGEgzDcAwHxAAskusXjrRcB4cACAcQDC4QMeYRXOMACAcQDAQHEg+lwhAXCARZIZJ7AEg+wQADhwAIhT57AEg+wQEA4wAIBxAMBAcQDKjnCAuEACyQqj/CKB1ggIBxggZAnj/CKB1ggIBxggYB4gIAA4gG1OMIC4QALJCKP8IoHWCAgHGCBkCeP8IoHWCAgHGCBgHiABQLCAVggIB4gIMXyCK94QGSOsBAOwAIpmUd4xQMsEBAOsEBAPMACAeEALBAQDxCQYnmEVzwgOkdYCAdggZTKI7ziARYICAdYICAeYIGAcAAWCOIBCEixPMIrHlACR1gIB2CBlMgjvOIBFggIB1ggIB6ABYJwABYI4gFYIMXyCK9wgAUC4gECQp5Kf4RXPCA2R1gIB2CBIB6ABYJwABYI4iEeYIHQytQe4RUOsEBAPAABiSLyI7ziAdPgCAvhACwQxAOwQBAOwAJBPAALhJOV9AivcAAWCOIBCEhJcn2EVzyAXY6wWjh0LLV++9HqAASE+uGo++viAQiIeBRLOAABCej6+6vnH49xP0Q8gDpmabXZeBnirI8+gyIcQBOewgq4TnYXingAY3GEFTgkXawR4QAskAF4PBdAQIpZIeIBCIgVMtjqEA9AQLA6gNF4jPdEkR7pFQ7AAslIlKMs8QAsEEuk0foQDsACQTwAASnR0EdZ4gHkwhFWR/o+yhIOwAJBPAALhP5XyLF4CAcgICJidQDh+G68GbI6gAjcA+lY26eyxAOIwhFWT5oeZTmyAiwQOlkiu+EQD0BAaLQ+hAMQECvE6gCK4x7IAI7dD9ldH6IBCAiVISnt/dUBAQGAWtwDAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQAAQEAAQFAQAAQEAAEBAABAQABAUBAABAQAAQEAAEBAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAEBAAEBAABAUBAABAQAAQEAAQEAAEBQEAAEBAABAQABAQAAQFAQAAQEAAExEsAgIAAICAACAgAAgIAAgKAgAAgIAAICAACAgACAoCAACAgAAgIAAICAAICgIAAICAACAgAU/EvfiluCemVcXgAAAAASUVORK5CYII=",d="/learn-wgpu-zh/assets/normal_mapping_correct.EKHmPX5_.png",g="/learn-wgpu-zh/assets/no_srgb.TzdPNppd.png",y="/learn-wgpu-zh/assets/debug_material.knbjh1ZS.png",_=JSON.parse('{"title":"法线映射","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial11-normals/index.md","filePath":"intermediate/tutorial11-normals/index.md","lastUpdated":1703303099000}'),F={name:"intermediate/tutorial11-normals/index.md"},A=p("",67);function c(b,u,m,D,o,C){const a=s("WasmExample"),n=s("AutoGithubLink");return l(),k("div",null,[A,i(a,{example:"tutorial11_normals"}),i(n)])}const v=h(F,[["render",c]]);export{_ as __pageData,v as default}; diff --git a/assets/intermediate_tutorial12-camera_index.md.A0Ys8PDv.js b/assets/intermediate_tutorial12-camera_index.md.A0Ys8PDv.js new file mode 100644 index 000000000..ea7413a52 --- /dev/null +++ b/assets/intermediate_tutorial12-camera_index.md.A0Ys8PDv.js @@ -0,0 +1,313 @@ +import{_ as l,D as s,o as h,c as p,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/left_right_hand._ZtWXizh.gif",e="/learn-wgpu-zh/assets/screenshot.hdnOjorV.png",m=JSON.parse('{"title":"更好的摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial12-camera/index.md","filePath":"intermediate/tutorial12-camera/index.md","lastUpdated":1703303099000}'),r={name:"intermediate/tutorial12-camera/index.md"},E=k(`

更好的摄像机

这个问题已经被推迟了一段时间。实现一个虚拟摄像机与正确使用 wgpu 关系不大,但它一直困扰着我,所以现在来实现它吧。

lib.rs 已经堆砌很多代码了,所以我们创建一个 camera.rs 文件来放置摄像机代码。先导入一些要用到的文件:

rust
use winit::event::*;
+use winit::dpi::PhysicalPosition;
+use instant::Duration;
+use std::f32::consts::FRAC_PI_2;
+
+const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;

在 WASM 中使用 std::time::instant 会导致程序恐慌,所以我们使用 instant 包来替代,在 Cargo.toml 引入此依赖:

toml
instant = "0.1"

虚拟摄像机

接下来,需要创建一个新的 Camera 结构体。我们将使用一个 FPS 风格的摄像机,所以要存储位置(position)、 yaw(偏航,水平旋转)以及 pitch(俯仰,垂直旋转), 定义并实现一个 calc_matrix 函数用于创建视图矩阵:

rust
#[derive(Debug)]
+pub struct Camera {
+    pub position: glam::Vec3,
+    yaw: f32,
+    pitch: f32,
+}
+
+impl Camera {
+    pub fn new<V: Into<glam::Vec3>>(position: V, yaw: f32, pitch: f32) -> Self {
+        Self {
+            position: position.into(),
+            yaw,
+            pitch,
+        }
+    }
+
+    pub fn calc_matrix(&self) -> glam::Mat4 {
+        let (sin_pitch, cos_pitch) = self.pitch.sin_cos();
+        let (sin_yaw, cos_yaw) = self.yaw.sin_cos();
+
+        glam::Mat4::look_to_rh(
+            self.position,
+            glam::Vec3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
+            glam::Vec3::Y,
+        )
+    }
+}

投影

只有在窗口调整大小时,投影(Projection)才真正需要改变,所以我们将投影与摄像机分开,创建一个 Projection 结构体:

rust
pub struct Projection {
+    aspect: f32,
+    fovy: f32,
+    znear: f32,
+    zfar: f32,
+}
+
+impl Projection {
+    pub fn new(width: u32, height: u32, fovy: f32, znear: f32, zfar: f32) -> Self {
+        Self {
+            aspect: width as f32 / height as f32,
+            fovy: fovy.to_radians(),
+            znear,
+            zfar,
+        }
+    }
+
+    pub fn resize(&mut self, width: u32, height: u32) {
+        self.aspect = width as f32 / height as f32;
+    }
+
+    pub fn calc_matrix(&self) -> glam::Mat4 {
+        glam::Mat4::perspective_rh(self.fovy, self.aspect, self.znear, self.zfar)
+    }
+}

有一点需要注意:从 perspective_rh 函数返回的是右手坐标系(right-handed coordinate system)的投影矩阵。也就是说,Z 轴是指向屏幕外的,想让 Z 轴指向屏幕内(也就是左手坐标系的投影矩阵)需要使用 perspective_lh

可以这样分辨右手坐标系和左手坐标系的区别:在身体的正前方把你的拇指指向右边代表 X 轴,食指指向上方代表 Y 轴,伸出中指代表 Z 轴。此时在你的右手上,中指应该指是向你自己。而在左手上,应该是指向远方。

./left_right_hand.gif

摄像机控制器

现在,我们需要一个新的摄像机控制器,在 camera.rs 中添加以下代码:

rust
#[derive(Debug)]
+pub struct CameraController {
+    amount_left: f32,
+    amount_right: f32,
+    amount_forward: f32,
+    amount_backward: f32,
+    amount_up: f32,
+    amount_down: f32,
+    rotate_horizontal: f32,
+    rotate_vertical: f32,
+    scroll: f32,
+    speed: f32,
+    sensitivity: f32,
+}
+
+impl CameraController {
+    pub fn new(speed: f32, sensitivity: f32) -> Self {
+        Self {
+            amount_left: 0.0,
+            amount_right: 0.0,
+            amount_forward: 0.0,
+            amount_backward: 0.0,
+            amount_up: 0.0,
+            amount_down: 0.0,
+            rotate_horizontal: 0.0,
+            rotate_vertical: 0.0,
+            scroll: 0.0,
+            speed,
+            sensitivity,
+        }
+    }
+
+    pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
+        let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
+        match key {
+            VirtualKeyCode::W | VirtualKeyCode::Up => {
+                self.amount_forward = amount;
+                true
+            }
+            VirtualKeyCode::S | VirtualKeyCode::Down => {
+                self.amount_backward = amount;
+                true
+            }
+            VirtualKeyCode::A | VirtualKeyCode::Left => {
+                self.amount_left = amount;
+                true
+            }
+            VirtualKeyCode::D | VirtualKeyCode::Right => {
+                self.amount_right = amount;
+                true
+            }
+            VirtualKeyCode::Space => {
+                self.amount_up = amount;
+                true
+            }
+            VirtualKeyCode::LShift => {
+                self.amount_down = amount;
+                true
+            }
+            _ => false,
+        }
+    }
+
+    pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
+        self.rotate_horizontal = mouse_dx as f32;
+        self.rotate_vertical = mouse_dy as f32;
+    }
+
+    pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
+        self.scroll = -match delta {
+            // 假定一行为 100 个像素,你可以随意修改这个值
+            MouseScrollDelta::LineDelta(_, scroll) => scroll * 100.0,
+            MouseScrollDelta::PixelDelta(PhysicalPosition {
+                y: scroll,
+                ..
+            }) => *scroll as f32,
+        };
+    }
+
+    pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
+        let dt = dt.as_secs_f32();
+
+        // 前后左右移动
+        let (yaw_sin, yaw_cos) = camera.yaw.sin_cos();
+        let forward = glam::Vec3::new(yaw_cos, 0.0, yaw_sin).normalize();
+        let right = glam::Vec3::new(-yaw_sin, 0.0, yaw_cos).normalize();
+        camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
+        camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
+
+        // 变焦(缩放)
+        // 注意:这不是一个真实的变焦。
+        // 通过摄像机的位置变化来模拟变焦,使你更容易靠近想聚焦的物体。
+        let (pitch_sin, pitch_cos) = camera.pitch.sin_cos();
+        let scrollward = glam::Vec3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
+        camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
+        self.scroll = 0.0;
+
+        // 由于我们没有使用滚动,所以直接修改 y 坐标来上下移动。
+        camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
+
+        // 旋转
+        camera.yaw += self.rotate_horizontal * self.sensitivity * dt;
+        camera.pitch += -self.rotate_vertical * self.sensitivity * dt;
+
+        // 重置旋转值为 0。没有鼠标移动发生时,摄像机就停止旋转。
+        self.rotate_horizontal = 0.0;
+        self.rotate_vertical = 0.0;
+
+        // 保持摄像机的角度不要太高/太低。
+        if camera.pitch < -SAFE_FRAC_PI_2 {
+            camera.pitch = -SAFE_FRAC_PI_2;
+        } else if camera.pitch > SAFE_FRAC_PI_2 {
+            camera.pitch = SAFE_FRAC_PI_2;
+        }
+    }
+}

清理 lib.rs

首先,我们从 lib.rs 中删除 CameraCameraController,然后导入 camera.rs

rust
mod model;
+mod texture;
+mod camera; // 新增!

接着更新 update_view_proj 以使用新的 CameraProjection

rust

+impl CameraUniform {
+    // ...
+
+    // 更新!
+    fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
+        self.view_position = camera.position.extend(1.0).into();
+        self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
+    }
+}

我们还要修改 State 来使用新的 CameraCameraProjectionProjection,再添加一个mouse_pressed 字段来存储鼠标是否被按下:

rust
struct State {
+    // ...
+    camera: camera::Camera, // 更新!
+    projection: camera::Projection, // 新增!
+    camera_controller: camera::CameraController, // 更新!
+    // ...
+    // 新增!
+    mouse_pressed: bool,
+}

别忘了需要导入 winit::dpi::PhysicalPosition

然后更新 new() 函数:

rust
impl State {
+    async fn new(window: &Window) -> Self {
+        // ...
+
+        // 更新!
+        let camera = camera::Camera::new((0.0, 5.0, 10.0), -90.0, -20.0);
+        let projection = camera::Projection::new(config.width, config.height, 45.0, 0.1, 100.0);
+        let camera_controller = camera::CameraController::new(4.0, 0.4);
+
+        // ...
+
+        camera_uniform.update_view_proj(&camera, &projection); // 更新!
+
+        // ...
+
+        Self {
+            // ...
+            camera,
+            projection, // 新增!
+            camera_controller,
+            // ...
+            mouse_pressed: false, // 新增!
+        }
+    }
+}

接着在 resize 函数中更新投影矩阵 projection

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+    // 更新!
+    self.projection.resize(new_size.width, new_size.height);
+    // ...
+}

事件输入函数 input() 也需要被更新。 到目前为止,我们一直在使用 WindowEvent 来控制摄像机,这很有效,但它并不是最好的解决方案。winit 文档告诉我们,操作系统通常会对 CursorMoved 事件的数据进行转换,以实现光标加速等效果。

现在为了解决这个问题,可以修改 input() 函数来处理 DeviceEvent 而不是 WindowEvent,但是在 macOS 和 WASM 上,键盘和按键事件不会被当作 DeviceEvent 发送出来。 做为替代方案,我们删除 input() 中的 CursorMoved 检查,并在 run() 函数中手动调用 camera_controller.process_mouse()

rust
// 更新!
+fn input(&mut self, event: &WindowEvent) -> bool {
+    match event {
+        WindowEvent::KeyboardInput {
+            input:
+                KeyboardInput {
+                    virtual_keycode: Some(key),
+                    state,
+                    ..
+                },
+            ..
+        } => self.camera_controller.process_keyboard(*key, *state),
+        WindowEvent::MouseWheel { delta, .. } => {
+            self.camera_controller.process_scroll(delta);
+            true
+        }
+        WindowEvent::MouseInput {
+            button: MouseButton::Left,
+            state,
+            ..
+        } => {
+            self.mouse_pressed = *state == ElementState::Pressed;
+            true
+        }
+        _ => false,
+    }
+}

下面是对事件循环代理(event_loop)的 run() 函数的修改:

rust
fn main() {
+    // ...
+    event_loop.run(move |event, _, control_flow| {
+        *control_flow = ControlFlow::Poll;
+        match event {
+            // ...
+            // 新增!
+            Event::DeviceEvent {
+                event: DeviceEvent::MouseMotion{ delta, },
+                .. // 我们现在没有用到 device_id
+            } => if state.mouse_pressed {
+                state.camera_controller.process_mouse(delta.0, delta.1)
+            }
+            // 更新!
+            Event::WindowEvent {
+                ref event,
+                window_id,
+            } if window_id == state.app.view.id() && !state.input(event) => {
+                match event {
+                    #[cfg(not(target_arch="wasm32"))]
+                    WindowEvent::CloseRequested
+                    | WindowEvent::KeyboardInput {
+                        input:
+                            KeyboardInput {
+                                state: ElementState::Pressed,
+                                virtual_keycode: Some(VirtualKeyCode::Escape),
+                                ..
+                            },
+                        ..
+                    } => *control_flow = ControlFlow::Exit,
+                    WindowEvent::Resized(physical_size) => {
+                        state.resize(*physical_size);
+                    }
+                    WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
+                        state.resize(**new_inner_size);
+                    }
+                    _ => {}
+                }
+            }
+            // ...
+        }
+    });
+}

update 函数需要多解释一下:CameraController 上的 update_camera 函数有一个参数 dt,它是之间的时间差(delta time,也可以说是时间间隔),用来辅助实现摄像机的平滑移动,使其不被帧速率所锁定。所以将它作为一个参数传入 update

rust
fn update(&mut self, dt: instant::Duration) {
+    // 更新!
+    self.camera_controller.update_camera(&mut self.camera, dt);
+    self.camera_uniform.update_view_proj(&self.camera, &self.projection);
+
+    // ..
+}

既然如此,我们也用 dt 来平滑光源的旋转:

rust
self.light_uniform.position =
+    (glam::Quat::from_axis_angle(glam::Vec3::Y, (60.0 * dt.as_secs_f32()).to_radians())
+    * old_position).into(); // 更新!

让我们在 main 函数中来实现 dt 的具体计算:

rust
fn main() {
+    // ...
+    let mut state = State::new(&window).await;
+    let mut last_render_time = instant::Instant::now();  // 新增!
+    event_loop.run(move |event, _, control_flow| {
+        *control_flow = ControlFlow::Poll;
+        match event {
+            // ...
+            // 更新!
+            Event::RedrawRequested(window_id) if window_id == state.app.view.id() => {
+                let now = instant::Instant::now();
+                let dt = now - last_render_time;
+                last_render_time = now;
+                state.update(dt);
+                // ...
+            }
+            _ => {}
+        }
+    });
+}

现在,我们应该可以自由控制摄像机了:

./screenshot.png

',42);function d(g,y,F,c,b,o){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),p("div",null,[E,i(a,{example:"tutorial12_camera"}),i(n)])}const A=l(r,[["render",d]]);export{m as __pageData,A as default}; diff --git a/assets/intermediate_tutorial12-camera_index.md.A0Ys8PDv.lean.js b/assets/intermediate_tutorial12-camera_index.md.A0Ys8PDv.lean.js new file mode 100644 index 000000000..ec7e755c4 --- /dev/null +++ b/assets/intermediate_tutorial12-camera_index.md.A0Ys8PDv.lean.js @@ -0,0 +1 @@ +import{_ as l,D as s,o as h,c as p,I as i,R as k}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/left_right_hand._ZtWXizh.gif",e="/learn-wgpu-zh/assets/screenshot.hdnOjorV.png",m=JSON.parse('{"title":"更好的摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial12-camera/index.md","filePath":"intermediate/tutorial12-camera/index.md","lastUpdated":1703303099000}'),r={name:"intermediate/tutorial12-camera/index.md"},E=k("",42);function d(g,y,F,c,b,o){const a=s("WasmExample"),n=s("AutoGithubLink");return h(),p("div",null,[E,i(a,{example:"tutorial12_camera"}),i(n)])}const A=l(r,[["render",d]]);export{m as __pageData,A as default}; diff --git a/assets/intermediate_tutorial12-camera_index.md.bb97ed34.js b/assets/intermediate_tutorial12-camera_index.md.bb97ed34.js deleted file mode 100644 index 49c18cfa3..000000000 --- a/assets/intermediate_tutorial12-camera_index.md.bb97ed34.js +++ /dev/null @@ -1,312 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/left_right_hand.5c9b4bc7.gif",t="/learn-wgpu-zh/assets/screenshot.342a32a0.png",B=JSON.parse('{"title":"更好的摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial12-camera/index.md","filePath":"intermediate/tutorial12-camera/index.md","lastUpdated":1701933923000}'),D={name:"intermediate/tutorial12-camera/index.md"},F=r(`

更好的摄像机

这个问题已经被推迟了一段时间。实现一个虚拟摄像机与正确使用 wgpu 关系不大,但它一直困扰着我,所以现在来实现它吧。

lib.rs 已经堆砌很多代码了,所以我们创建一个 camera.rs 文件来放置摄像机代码。先导入一些要用到的文件:

rust
use winit::event::*;
-use winit::dpi::PhysicalPosition;
-use instant::Duration;
-use std::f32::consts::FRAC_PI_2;
-
-const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;

在 WASM 中使用 std::time::instant 会导致程序恐慌,所以我们使用 instant 包来替代,在 Cargo.toml 引入此依赖:

toml
instant = "0.1"

虚拟摄像机

接下来,需要创建一个新的 Camera 结构体。我们将使用一个 FPS 风格的摄像机,所以要存储位置(position)、 yaw(偏航,水平旋转)以及 pitch(俯仰,垂直旋转), 定义并实现一个 calc_matrix 函数用于创建视图矩阵:

rust
#[derive(Debug)]
-pub struct Camera {
-    pub position: glam::Vec3,
-    yaw: f32,
-    pitch: f32,
-}
-
-impl Camera {
-    pub fn new<V: Into<glam::Vec3>>(position: V, yaw: f32, pitch: f32) -> Self {
-        Self {
-            position: position.into(),
-            yaw,
-            pitch,
-        }
-    }
-
-    pub fn calc_matrix(&self) -> glam::Mat4 {
-        let (sin_pitch, cos_pitch) = self.pitch.sin_cos();
-        let (sin_yaw, cos_yaw) = self.yaw.sin_cos();
-
-        glam::Mat4::look_to_rh(
-            self.position,
-            glam::Vec3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
-            glam::Vec3::Y,
-        )
-    }
-}

投影

只有在窗口调整大小时,投影(Projection)才真正需要改变,所以我们将投影与摄像机分开,创建一个 Projection 结构体:

rust
pub struct Projection {
-    aspect: f32,
-    fovy: f32,
-    znear: f32,
-    zfar: f32,
-}
-
-impl Projection {
-    pub fn new(width: u32, height: u32, fovy: f32, znear: f32, zfar: f32) -> Self {
-        Self {
-            aspect: width as f32 / height as f32,
-            fovy: fovy.to_radians(),
-            znear,
-            zfar,
-        }
-    }
-
-    pub fn resize(&mut self, width: u32, height: u32) {
-        self.aspect = width as f32 / height as f32;
-    }
-
-    pub fn calc_matrix(&self) -> glam::Mat4 {
-        glam::Mat4::perspective_rh(self.fovy, self.aspect, self.znear, self.zfar)
-    }
-}

有一点需要注意:从 perspective_rh 函数返回的是右手坐标系(right-handed coordinate system)的投影矩阵。也就是说,Z 轴是指向屏幕外的,想让 Z 轴指向屏幕内(也就是左手坐标系的投影矩阵)需要使用 perspective_lh

可以这样分辨右手坐标系和左手坐标系的区别:在身体的正前方把你的拇指指向右边代表 X 轴,食指指向上方代表 Y 轴,伸出中指代表 Z 轴。此时在你的右手上,中指应该指是向你自己。而在左手上,应该是指向远方。

./left_right_hand.gif

摄像机控制器

现在,我们需要一个新的摄像机控制器,在 camera.rs 中添加以下代码:

rust
#[derive(Debug)]
-pub struct CameraController {
-    amount_left: f32,
-    amount_right: f32,
-    amount_forward: f32,
-    amount_backward: f32,
-    amount_up: f32,
-    amount_down: f32,
-    rotate_horizontal: f32,
-    rotate_vertical: f32,
-    scroll: f32,
-    speed: f32,
-    sensitivity: f32,
-}
-
-impl CameraController {
-    pub fn new(speed: f32, sensitivity: f32) -> Self {
-        Self {
-            amount_left: 0.0,
-            amount_right: 0.0,
-            amount_forward: 0.0,
-            amount_backward: 0.0,
-            amount_up: 0.0,
-            amount_down: 0.0,
-            rotate_horizontal: 0.0,
-            rotate_vertical: 0.0,
-            scroll: 0.0,
-            speed,
-            sensitivity,
-        }
-    }
-
-    pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
-        let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
-        match key {
-            VirtualKeyCode::W | VirtualKeyCode::Up => {
-                self.amount_forward = amount;
-                true
-            }
-            VirtualKeyCode::S | VirtualKeyCode::Down => {
-                self.amount_backward = amount;
-                true
-            }
-            VirtualKeyCode::A | VirtualKeyCode::Left => {
-                self.amount_left = amount;
-                true
-            }
-            VirtualKeyCode::D | VirtualKeyCode::Right => {
-                self.amount_right = amount;
-                true
-            }
-            VirtualKeyCode::Space => {
-                self.amount_up = amount;
-                true
-            }
-            VirtualKeyCode::LShift => {
-                self.amount_down = amount;
-                true
-            }
-            _ => false,
-        }
-    }
-
-    pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
-        self.rotate_horizontal = mouse_dx as f32;
-        self.rotate_vertical = mouse_dy as f32;
-    }
-
-    pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
-        self.scroll = -match delta {
-            // 假定一行为 100 个像素,你可以随意修改这个值
-            MouseScrollDelta::LineDelta(_, scroll) => scroll * 100.0,
-            MouseScrollDelta::PixelDelta(PhysicalPosition {
-                y: scroll,
-                ..
-            }) => *scroll as f32,
-        };
-    }
-
-    pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
-        let dt = dt.as_secs_f32();
-
-        // 前后左右移动
-        let (yaw_sin, yaw_cos) = camera.yaw.sin_cos();
-        let forward = glam::Vec3::new(yaw_cos, 0.0, yaw_sin).normalize();
-        let right = glam::Vec3::new(-yaw_sin, 0.0, yaw_cos).normalize();
-        camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
-        camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
-
-        // 变焦(缩放)
-        // 注意:这不是一个真实的变焦。
-        // 通过摄像机的位置变化来模拟变焦,使你更容易靠近想聚焦的物体。
-        let (pitch_sin, pitch_cos) = camera.pitch.sin_cos();
-        let scrollward = glam::Vec3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
-        camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
-        self.scroll = 0.0;
-
-        // 由于我们没有使用滚动,所以直接修改 y 坐标来上下移动。
-        camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
-
-        // 旋转
-        camera.yaw += self.rotate_horizontal * self.sensitivity * dt;
-        camera.pitch += -self.rotate_vertical * self.sensitivity * dt;
-
-        // 重置旋转值为 0。没有鼠标移动发生时,摄像机就停止旋转。
-        self.rotate_horizontal = 0.0;
-        self.rotate_vertical = 0.0;
-
-        // 保持摄像机的角度不要太高/太低。
-        if camera.pitch < -SAFE_FRAC_PI_2 {
-            camera.pitch = -SAFE_FRAC_PI_2;
-        } else if camera.pitch > SAFE_FRAC_PI_2 {
-            camera.pitch = SAFE_FRAC_PI_2;
-        }
-    }
-}

清理 lib.rs

首先,我们从 lib.rs 中删除 CameraCameraController,然后导入 camera.rs

rust
mod model;
-mod texture;
-mod camera; // 新增!

接着更新 update_view_proj 以使用新的 CameraProjection

rust
impl CameraUniform {
-    // ...
-
-    // 更新!
-    fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
-        self.view_position = camera.position.extend(1.0).into();
-        self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
-    }
-}

我们还要修改 State 来使用新的 CameraCameraProjectionProjection,再添加一个mouse_pressed 字段来存储鼠标是否被按下:

rust
struct State {
-    // ...
-    camera: camera::Camera, // 更新!
-    projection: camera::Projection, // 新增!
-    camera_controller: camera::CameraController, // 更新!
-    // ...
-    // 新增!
-    mouse_pressed: bool,
-}

别忘了需要导入 winit::dpi::PhysicalPosition

然后更新 new() 函数:

rust
impl State {
-    async fn new(window: &Window) -> Self {
-        // ...
-
-        // 更新!
-        let camera = camera::Camera::new((0.0, 5.0, 10.0), -90.0, -20.0);
-        let projection = camera::Projection::new(config.width, config.height, 45.0, 0.1, 100.0);
-        let camera_controller = camera::CameraController::new(4.0, 0.4);
-
-        // ...
-
-        camera_uniform.update_view_proj(&camera, &projection); // 更新!
-
-        // ...
-
-        Self {
-            // ...
-            camera,
-            projection, // 新增!
-            camera_controller,
-            // ...
-            mouse_pressed: false, // 新增!
-        }
-    }
-}

接着在 resize 函数中更新投影矩阵 projection

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-    // 更新!
-    self.projection.resize(new_size.width, new_size.height);
-    // ...
-}

事件输入函数 input() 也需要被更新。 到目前为止,我们一直在使用 WindowEvent 来控制摄像机,这很有效,但它并不是最好的解决方案。winit 文档告诉我们,操作系统通常会对 CursorMoved 事件的数据进行转换,以实现光标加速等效果。

现在为了解决这个问题,可以修改 input() 函数来处理 DeviceEvent 而不是 WindowEvent,但是在 macOS 和 WASM 上,键盘和按键事件不会被当作 DeviceEvent 发送出来。 做为替代方案,我们删除 input() 中的 CursorMoved 检查,并在 run() 函数中手动调用 camera_controller.process_mouse()

rust
// 更新!
-fn input(&mut self, event: &WindowEvent) -> bool {
-    match event {
-        WindowEvent::KeyboardInput {
-            input:
-                KeyboardInput {
-                    virtual_keycode: Some(key),
-                    state,
-                    ..
-                },
-            ..
-        } => self.camera_controller.process_keyboard(*key, *state),
-        WindowEvent::MouseWheel { delta, .. } => {
-            self.camera_controller.process_scroll(delta);
-            true
-        }
-        WindowEvent::MouseInput {
-            button: MouseButton::Left,
-            state,
-            ..
-        } => {
-            self.mouse_pressed = *state == ElementState::Pressed;
-            true
-        }
-        _ => false,
-    }
-}

下面是对事件循环代理(event_loop)的 run() 函数的修改:

rust
fn main() {
-    // ...
-    event_loop.run(move |event, _, control_flow| {
-        *control_flow = ControlFlow::Poll;
-        match event {
-            // ...
-            // 新增!
-            Event::DeviceEvent {
-                event: DeviceEvent::MouseMotion{ delta, },
-                .. // 我们现在没有用到 device_id
-            } => if state.mouse_pressed {
-                state.camera_controller.process_mouse(delta.0, delta.1)
-            }
-            // 更新!
-            Event::WindowEvent {
-                ref event,
-                window_id,
-            } if window_id == state.app.view.id() && !state.input(event) => {
-                match event {
-                    #[cfg(not(target_arch="wasm32"))]
-                    WindowEvent::CloseRequested
-                    | WindowEvent::KeyboardInput {
-                        input:
-                            KeyboardInput {
-                                state: ElementState::Pressed,
-                                virtual_keycode: Some(VirtualKeyCode::Escape),
-                                ..
-                            },
-                        ..
-                    } => *control_flow = ControlFlow::Exit,
-                    WindowEvent::Resized(physical_size) => {
-                        state.resize(*physical_size);
-                    }
-                    WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
-                        state.resize(**new_inner_size);
-                    }
-                    _ => {}
-                }
-            }
-            // ...
-        }
-    });
-}

update 函数需要多解释一下:CameraController 上的 update_camera 函数有一个参数 dt,它是之间的时间差(delta time,也可以说是时间间隔),用来辅助实现摄像机的平滑移动,使其不被帧速率所锁定。所以将它作为一个参数传入 update

rust
fn update(&mut self, dt: instant::Duration) {
-    // 更新!
-    self.camera_controller.update_camera(&mut self.camera, dt);
-    self.camera_uniform.update_view_proj(&self.camera, &self.projection);
-
-    // ..
-}

既然如此,我们也用 dt 来平滑光源的旋转:

rust
self.light_uniform.position =
-    (glam::Quat::from_axis_angle(glam::Vec3::Y, (60.0 * dt.as_secs_f32()).to_radians())
-    * old_position).into(); // 更新!

让我们在 main 函数中来实现 dt 的具体计算:

rust
fn main() {
-    // ...
-    let mut state = State::new(&window).await;
-    let mut last_render_time = instant::Instant::now();  // 新增!
-    event_loop.run(move |event, _, control_flow| {
-        *control_flow = ControlFlow::Poll;
-        match event {
-            // ...
-            // 更新!
-            Event::RedrawRequested(window_id) if window_id == state.app.view.id() => {
-                let now = instant::Instant::now();
-                let dt = now - last_render_time;
-                last_render_time = now;
-                state.update(dt);
-                // ...
-            }
-            _ => {}
-        }
-    });
-}

现在,我们应该可以自由控制摄像机了:

./screenshot.png

',42);function y(C,A,i,b,m,u){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[F,n(a,{example:"tutorial12_camera"}),n(l)])}const f=p(D,[["render",y]]);export{B as __pageData,f as default}; diff --git a/assets/intermediate_tutorial12-camera_index.md.bb97ed34.lean.js b/assets/intermediate_tutorial12-camera_index.md.bb97ed34.lean.js deleted file mode 100644 index 7fc76424e..000000000 --- a/assets/intermediate_tutorial12-camera_index.md.bb97ed34.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as p,E as s,o,c as e,J as n,S as r}from"./chunks/framework.adbf3c9e.js";const c="/learn-wgpu-zh/assets/left_right_hand.5c9b4bc7.gif",t="/learn-wgpu-zh/assets/screenshot.342a32a0.png",B=JSON.parse('{"title":"更好的摄像机","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial12-camera/index.md","filePath":"intermediate/tutorial12-camera/index.md","lastUpdated":1701933923000}'),D={name:"intermediate/tutorial12-camera/index.md"},F=r("",42);function y(C,A,i,b,m,u){const a=s("WasmExample"),l=s("AutoGithubLink");return o(),e("div",null,[F,n(a,{example:"tutorial12_camera"}),n(l)])}const f=p(D,[["render",y]]);export{B as __pageData,f as default}; diff --git a/assets/intermediate_tutorial13-terrain_index.md.cf92cfdc.js b/assets/intermediate_tutorial13-terrain_index.md.cf92cfdc.js deleted file mode 100644 index 4528a4a81..000000000 --- a/assets/intermediate_tutorial13-terrain_index.md.cf92cfdc.js +++ /dev/null @@ -1,103 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/figure_no-fbm.f932d989.png",o="/learn-wgpu-zh/assets/figure_fbm.0a4b2e8c.png",e="/learn-wgpu-zh/assets/figure_spiky.a86510ba.png",r="/learn-wgpu-zh/assets/figure_work-groups.8aea47c7.jpg",m=JSON.parse('{"title":"程序地形","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial13-terrain/index.md","filePath":"intermediate/tutorial13-terrain/index.md","lastUpdated":1701933923000}'),c={name:"intermediate/tutorial13-terrain/index.md"},t=l(`

程序地形

到目前为止,我们一直在一个空旷的场景里渲染模型。如果只是想测试着色代码,这是非常好的,但大多数应用程序会想让屏幕上填充更多有趣的元素。 你可以用各种方法来处理此问题,比如,在 Blender 中创建一堆模型,然后把它们加载到场景中。如果你有一些像样的艺术技巧和一些耐心,这是很有效的方法。我在这两个方面都很欠缺,所以让我们通过代码来制作一些看起来不错的东西。

正如本文的名字所示,我们将创建一个地形(Terrain)。现在,创建地形网格的经典方法是使用预先生成的噪声纹理(Noise Texture),并对其进行采样,以获得网格中每个点的高度值。这是一个相当有效的方法,但我选择了直接使用计算着色器来生成噪声。让我们开始吧!

计算着色器

计算着色器(Compute Shader)允许你利用 GPU 的并行计算能力完成任意任务。虽然它也可以用于渲染任务,但通常用于与绘制三角形和像素没有直接关系的任务,比如,物理模拟、图像滤镜、创建程序纹理、运行神经网络等等。我稍后会详细介绍它们的工作原理,但现在只需用它们来为我们的地形创建顶点和索引缓冲区。

噪声函数

让我们从计算着色器的代码开始,创建一个名为 terrain.wgsl 的新文件,在文件内先实现一个噪声函数(Noise Function),然后再创建着色器的入口函数。具体代码如下:

rust
// ============================
-// 地形生成
-// ============================
-
-// https://gist.github.com/munrocket/236ed5ba7e409b8bdf1ff6eca5dcdc39
-//  MIT License. © Ian McEwan, Stefan Gustavson, Munrocket
-// - Less condensed glsl implementation with comments can be found at https://weber.itn.liu.se/~stegu/jgt2012/article.pdf
-
-fn permute3(x: vec3f) -> vec3f { return (((x * 34.) + 1.) * x) % vec3f(289.); }
-
-fn snoise2(v: vec2f) -> f32 {
-  let C = vec4f(0.211324865405187, 0.366025403784439, -0.577350269189626, 0.024390243902439);
-  var i: vec2f = floor(v + dot(v, C.yy));
-  let x0 = v - i + dot(i, C.xx);
-  // I flipped the condition here from > to < as it fixed some artifacting I was observing
-  var i1: vec2f = select(vec2f(1., 0.), vec2f(0., 1.), (x0.x < x0.y));
-  var x12: vec4f = x0.xyxy + C.xxzz - vec4f(i1, 0., 0.);
-  i = i % vec2f(289.);
-  let p = permute3(permute3(i.y + vec3f(0., i1.y, 1.)) + i.x + vec3f(0., i1.x, 1.));
-  var m: vec3f = max(0.5 -
-      vec3f(dot(x0, x0), dot(x12.xy, x12.xy), dot(x12.zw, x12.zw)), vec3f(0.));
-  m = m * m;
-  m = m * m;
-  let x = 2. * fract(p * C.www) - 1.;
-  let h = abs(x) - 0.5;
-  let ox = floor(x + 0.5);
-  let a0 = x - ox;
-  m = m * (1.79284291400159 - 0.85373472095314 * (a0 * a0 + h * h));
-  let g = vec3f(a0.x * x0.x + h.x * x0.y, a0.yz * x12.xz + h.yz * x12.yw);
-  return 130. * dot(m, g);
-}

部分读者可能已经认出这是 Simplex 噪声(特别是 OpenSimplex 噪声)的一个实现。我承认没有真正理解 OpenSimplex 噪声背后的数学原理。它的基本原理类似于 Perlin 噪声,但不是一个正方形网格,而是六边形网格,这消除了在正方形网格上产生噪声的一些伪影。我也不是这方面的专家,所以总结一下:permute3() 接收一个 vec3 并返回一个伪随机的 vec3snoise2() 接收一个 vec2 并返回一个 [-1, 1] 之间的浮点数。如果你想了解更多关于噪声函数的信息,请查看这篇文章来自 The Book of Shaders。代码是用 GLSL 编写的,但概念是一样的。

从下面的渲染结果可以看出,直接使用 snoise 的输出来生成地形的高度值,地表往往过于平滑。虽然这可能就是你想要的,但它看起来不像是自然界的地形。

smooth terrain

为了使地形更加粗糙,我们将使用一种叫做分形布朗运动的技术。这种技术的工作原理是对噪声函数进行多次采样,每次将强度减半,同时将噪声的频率提高一倍。 这意味着地形的整体形状保持平滑,同时拥有更清晰的细节,得到的效果将是下面这样:

more organic terrain

这个函数的代码其实很简单:

rust
fn fbm(p: vec2f) -> f32 {
-    let NUM_OCTAVES: u32 = 5u;
-    var x = p * 0.01;
-    var v = 0.0;
-    var a = 0.5;
-    let shift = vec2f(100.0);
-    let cs = vec2f(cos(0.5), sin(0.5));
-    let rot = mat2x2<f32>(cs.x, cs.y, -cs.y, cs.x);
-
-    for (var i=0u; i<NUM_OCTAVES; i=i+1u) {
-        v = v + a * snoise2(x);
-        x = rot * x * 2.0 + shift;
-        a = a * 0.5;
-    }
-
-    return v;
-}

让我们稍微回顾一下:

  • NUM_OCTAVES 常数设定噪声级别。更高的级别将给地形网格增加更多的细节,但级别越高,得到的回报将递减,我发现 5 是一个好数字。
  • p 乘以 0.01 用来“放大”噪声函数。这是因为我们的网格将是 1x1 的四边形,而 simplex 噪声函数在每步进一次时类似于白噪声。我们来看到直接使用 p 是什么样子的:spiky terrain
  • a 变量是在给定的噪声级别下的噪声振幅。
  • shiftrot 用于减少生成的噪声中的失真。其中一个失真现象是,在 0,0 处,无论你如何缩放 psnoise 的输出都是一样的。

生成网格

为了生成地形网格,需要向着色器传递一些信息:

rust
struct ChunkData {
-    chunk_size: vec2u,
-    chunk_corner: vec2<i32>,
-    min_max_height: vec2f,
-}
-
-struct Vertex {
-    @location(0) position: vec3f,
-    @location(1) normal: vec3f,
-}
-
-struct VertexBuffer {
-    data: array<Vertex>, // stride: 32
-}
-
-struct IndexBuffer {
-    data: array<u32>,
-}
-
-@group(0) @binding(0) var<uniform> chunk_data: ChunkData;
-@group(0)@binding(1) var<storage, read_write> vertices: VertexBuffer;
-@group(0)@binding(2) var<storage, read_write> indices: IndexBuffer;

我们传递给色器的 uniform 缓冲区,其中包括四边形网格的大小 chunk_size,噪声算法的起始点 chunk_corner ,以及地形的 min_max_height

顶点和索引缓冲区作为 storage 缓冲区传入,并启用 read_write 访问模式来支持数据的读取与写入。我们将在 Rust 中创建这些缓冲区,并在执行计算着色器时将其绑定。

着色器的下一个部分是在网格上生成一个点,以及该点的一个顶点:

rust
fn terrain_point(p: vec2f) -> vec3f {
-    return vec3f(
-        p.x,
-        mix(chunk_data.min_max_height.x,chunk_data.min_max_height.y, fbm(p)),
-        p.y,
-    );
-}
-
-fn terrain_vertex(p: vec2f) -> Vertex {
-    let v = terrain_point(p);
-
-    let tpx = terrain_point(p + vec2f(0.1, 0.0)) - v;
-    let tpz = terrain_point(p + vec2f(0.0, 0.1)) - v;
-    let tnx = terrain_point(p + vec2f(-0.1, 0.0)) - v;
-    let tnz = terrain_point(p + vec2f(0.0, -0.1)) - v;
-
-    let pn = normalize(cross(tpz, tpx));
-    let nn = normalize(cross(tnz, tnx));
-
-    let n = (pn + nn) * 0.5;
-
-    return Vertex(v, n);
-}

terrain_point 函数接收地形上的一个 XZ 点,并返回一个 vec3,其中 y 值在最小和最大高度之间。

terrain_vertex 使用 terrain_point 来获得它的位置,同时通过对附近的 4 个点进行采样,并使用叉积来计算顶点法线。

你应该注意到了 Vertex 结构体不包括纹理坐标字段。我们可以通过使用顶点的 XZ 坐标,并让纹理采样器在 X 和 Y 轴上镜像纹理来轻松地创建纹理坐标,但以这种方式进行纹理采样时,高度图往往会有拉伸现象。

我们将在未来的教程中介绍一种叫做三平面映射的方法来给地形贴图。但现在我们只使用一个程序纹理,它将在渲染地形的片元着色器中被创建。

现在我们可以在地形表面获得一个实际的顶点数据,并用来填充顶点和索引缓冲区了。我们将创建一个 gen_terrain() 函数作为计算着色器的入口:

rust
@compute @workgroup_size(64)
-fn gen_terrain(
-    @builtin(global_invocation_id) gid: vec3<u32>
-) {
-    // snipped...
-}

@stage(compute) 注释指定了 gen_terrain 是一个计算着色器入口。

workgroup_size() 指定 GPU 可以为每个工作组(workgroup)分配的一组调用,这一组调用会同时执行着色器入口函数,并共享对工作组地址空间中着色器变量的访问。 我们在编写计算着色器的时候指定工作组的大小,它有 3 个维度的参数,因为工作组是一个 3D 网格,但如果不指定它们,则默认为 1。 换句话说,workgroup_size(64) 相当于 workgroup_size(64, 1, 1)

global_invocation_id 是一个 3D 索引。这可能看起来很奇怪,但你可以把工作组看作是工作组的 3D 网格。这些工作组有一个内部的工作者网格。global_invocation_id 就是相对于所有其他工作组的当前工作者的 id。

从视觉上看,工作组的网格看起来会是这样的:

work group grid

把计算着色器想象成一个在一堆嵌套的 for 循环中运行的函数,但每个循环都是并行执行的,这可能会有帮助。它看起来会像这样:

for wgx in num_workgroups.x:
-    for wgy in num_workgroups.y:
-        for wgz in num_workgroups.z:
-            var local_invocation_id = (wgx, wgy, wgz)
-            for x in workgroup_size.x:
-                for y in workgroup_size.x:
-                    for z in workgroup_size.x:
-                        var global_invocation_id = local_invocation_id * workgroup_size + (x, y, z);
-                        gen_terrain(global_invocation_id)

如果想了解更多关于工作组的信息请查看 WGSL 文档

TODO:

  • Note changes to create_render_pipeline
  • Mention swizzle feature for cgmath
  • Compare workgroups and workgroups sizes to nested for loops
    • Maybe make a diagram in blender?
  • Change to camera movement speed
`,37),D=[t];function F(y,C,A,i,b,u){return n(),a("div",null,D)}const g=s(c,[["render",F]]);export{m as __pageData,g as default}; diff --git a/assets/intermediate_tutorial13-terrain_index.md.cf92cfdc.lean.js b/assets/intermediate_tutorial13-terrain_index.md.cf92cfdc.lean.js deleted file mode 100644 index 11c89be1e..000000000 --- a/assets/intermediate_tutorial13-terrain_index.md.cf92cfdc.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as s,o as n,c as a,S as l}from"./chunks/framework.adbf3c9e.js";const p="/learn-wgpu-zh/assets/figure_no-fbm.f932d989.png",o="/learn-wgpu-zh/assets/figure_fbm.0a4b2e8c.png",e="/learn-wgpu-zh/assets/figure_spiky.a86510ba.png",r="/learn-wgpu-zh/assets/figure_work-groups.8aea47c7.jpg",m=JSON.parse('{"title":"程序地形","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial13-terrain/index.md","filePath":"intermediate/tutorial13-terrain/index.md","lastUpdated":1701933923000}'),c={name:"intermediate/tutorial13-terrain/index.md"},t=l("",37),D=[t];function F(y,C,A,i,b,u){return n(),a("div",null,D)}const g=s(c,[["render",F]]);export{m as __pageData,g as default}; diff --git a/assets/intermediate_tutorial13-terrain_index.md.hp3fmBWu.js b/assets/intermediate_tutorial13-terrain_index.md.hp3fmBWu.js new file mode 100644 index 000000000..c96863581 --- /dev/null +++ b/assets/intermediate_tutorial13-terrain_index.md.hp3fmBWu.js @@ -0,0 +1,103 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const h="/learn-wgpu-zh/assets/figure_no-fbm.WygYK_Pb.png",k="/learn-wgpu-zh/assets/figure_fbm.vQ-eUzh_.png",p="/learn-wgpu-zh/assets/figure_spiky.4lru4RHQ.png",l="/learn-wgpu-zh/assets/figure_work-groups.-0RxSLqe.jpg",u=JSON.parse('{"title":"程序地形","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial13-terrain/index.md","filePath":"intermediate/tutorial13-terrain/index.md","lastUpdated":1703303099000}'),t={name:"intermediate/tutorial13-terrain/index.md"},e=n(`

程序地形

到目前为止,我们一直在一个空旷的场景里渲染模型。如果只是想测试着色代码,这是非常好的,但大多数应用程序会想让屏幕上填充更多有趣的元素。 你可以用各种方法来处理此问题,比如,在 Blender 中创建一堆模型,然后把它们加载到场景中。如果你有一些像样的艺术技巧和一些耐心,这是很有效的方法。我在这两个方面都很欠缺,所以让我们通过代码来制作一些看起来不错的东西。

正如本文的名字所示,我们将创建一个地形(Terrain)。现在,创建地形网格的经典方法是使用预先生成的噪声纹理(Noise Texture),并对其进行采样,以获得网格中每个点的高度值。这是一个相当有效的方法,但我选择了直接使用计算着色器来生成噪声。让我们开始吧!

计算着色器

计算着色器(Compute Shader)允许你利用 GPU 的并行计算能力完成任意任务。虽然它也可以用于渲染任务,但通常用于与绘制三角形和像素没有直接关系的任务,比如,物理模拟、图像滤镜、创建程序纹理、运行神经网络等等。我稍后会详细介绍它们的工作原理,但现在只需用它们来为我们的地形创建顶点和索引缓冲区。

噪声函数

让我们从计算着色器的代码开始,创建一个名为 terrain.wgsl 的新文件,在文件内先实现一个噪声函数(Noise Function),然后再创建着色器的入口函数。具体代码如下:

rust
// ============================
+// 地形生成
+// ============================
+
+// https://gist.github.com/munrocket/236ed5ba7e409b8bdf1ff6eca5dcdc39
+//  MIT License. © Ian McEwan, Stefan Gustavson, Munrocket
+// - Less condensed glsl implementation with comments can be found at https://weber.itn.liu.se/~stegu/jgt2012/article.pdf
+
+fn permute3(x: vec3f) -> vec3f { return (((x * 34.) + 1.) * x) % vec3f(289.); }
+
+fn snoise2(v: vec2f) -> f32 {
+  let C = vec4f(0.211324865405187, 0.366025403784439, -0.577350269189626, 0.024390243902439);
+  var i: vec2f = floor(v + dot(v, C.yy));
+  let x0 = v - i + dot(i, C.xx);
+  // I flipped the condition here from > to < as it fixed some artifacting I was observing
+  var i1: vec2f = select(vec2f(1., 0.), vec2f(0., 1.), (x0.x < x0.y));
+  var x12: vec4f = x0.xyxy + C.xxzz - vec4f(i1, 0., 0.);
+  i = i % vec2f(289.);
+  let p = permute3(permute3(i.y + vec3f(0., i1.y, 1.)) + i.x + vec3f(0., i1.x, 1.));
+  var m: vec3f = max(0.5 -
+      vec3f(dot(x0, x0), dot(x12.xy, x12.xy), dot(x12.zw, x12.zw)), vec3f(0.));
+  m = m * m;
+  m = m * m;
+  let x = 2. * fract(p * C.www) - 1.;
+  let h = abs(x) - 0.5;
+  let ox = floor(x + 0.5);
+  let a0 = x - ox;
+  m = m * (1.79284291400159 - 0.85373472095314 * (a0 * a0 + h * h));
+  let g = vec3f(a0.x * x0.x + h.x * x0.y, a0.yz * x12.xz + h.yz * x12.yw);
+  return 130. * dot(m, g);
+}

部分读者可能已经认出这是 Simplex 噪声(特别是 OpenSimplex 噪声)的一个实现。我承认没有真正理解 OpenSimplex 噪声背后的数学原理。它的基本原理类似于 Perlin 噪声,但不是一个正方形网格,而是六边形网格,这消除了在正方形网格上产生噪声的一些伪影。我也不是这方面的专家,所以总结一下:permute3() 接收一个 vec3 并返回一个伪随机的 vec3snoise2() 接收一个 vec2 并返回一个 [-1, 1] 之间的浮点数。如果你想了解更多关于噪声函数的信息,请查看这篇文章来自 The Book of Shaders。代码是用 GLSL 编写的,但概念是一样的。

从下面的渲染结果可以看出,直接使用 snoise 的输出来生成地形的高度值,地表往往过于平滑。虽然这可能就是你想要的,但它看起来不像是自然界的地形。

smooth terrain

为了使地形更加粗糙,我们将使用一种叫做分形布朗运动的技术。这种技术的工作原理是对噪声函数进行多次采样,每次将强度减半,同时将噪声的频率提高一倍。 这意味着地形的整体形状保持平滑,同时拥有更清晰的细节,得到的效果将是下面这样:

more organic terrain

这个函数的代码其实很简单:

rust
fn fbm(p: vec2f) -> f32 {
+    let NUM_OCTAVES: u32 = 5u;
+    var x = p * 0.01;
+    var v = 0.0;
+    var a = 0.5;
+    let shift = vec2f(100.0);
+    let cs = vec2f(cos(0.5), sin(0.5));
+    let rot = mat2x2<f32>(cs.x, cs.y, -cs.y, cs.x);
+
+    for (var i=0u; i<NUM_OCTAVES; i=i+1u) {
+        v = v + a * snoise2(x);
+        x = rot * x * 2.0 + shift;
+        a = a * 0.5;
+    }
+
+    return v;
+}

让我们稍微回顾一下:

  • NUM_OCTAVES 常数设定噪声级别。更高的级别将给地形网格增加更多的细节,但级别越高,得到的回报将递减,我发现 5 是一个好数字。
  • p 乘以 0.01 用来“放大”噪声函数。这是因为我们的网格将是 1x1 的四边形,而 simplex 噪声函数在每步进一次时类似于白噪声。我们来看到直接使用 p 是什么样子的:spiky terrain
  • a 变量是在给定的噪声级别下的噪声振幅。
  • shiftrot 用于减少生成的噪声中的失真。其中一个失真现象是,在 0,0 处,无论你如何缩放 psnoise 的输出都是一样的。

生成网格

为了生成地形网格,需要向着色器传递一些信息:

rust
struct ChunkData {
+    chunk_size: vec2u,
+    chunk_corner: vec2<i32>,
+    min_max_height: vec2f,
+}
+
+struct Vertex {
+    @location(0) position: vec3f,
+    @location(1) normal: vec3f,
+}
+
+struct VertexBuffer {
+    data: array<Vertex>, // stride: 32
+}
+
+struct IndexBuffer {
+    data: array<u32>,
+}
+
+@group(0) @binding(0) var<uniform> chunk_data: ChunkData;
+@group(0)@binding(1) var<storage, read_write> vertices: VertexBuffer;
+@group(0)@binding(2) var<storage, read_write> indices: IndexBuffer;

我们传递给色器的 uniform 缓冲区,其中包括四边形网格的大小 chunk_size,噪声算法的起始点 chunk_corner ,以及地形的 min_max_height

顶点和索引缓冲区作为 storage 缓冲区传入,并启用 read_write 访问模式来支持数据的读取与写入。我们将在 Rust 中创建这些缓冲区,并在执行计算着色器时将其绑定。

着色器的下一个部分是在网格上生成一个点,以及该点的一个顶点:

rust
fn terrain_point(p: vec2f) -> vec3f {
+    return vec3f(
+        p.x,
+        mix(chunk_data.min_max_height.x,chunk_data.min_max_height.y, fbm(p)),
+        p.y,
+    );
+}
+
+fn terrain_vertex(p: vec2f) -> Vertex {
+    let v = terrain_point(p);
+
+    let tpx = terrain_point(p + vec2f(0.1, 0.0)) - v;
+    let tpz = terrain_point(p + vec2f(0.0, 0.1)) - v;
+    let tnx = terrain_point(p + vec2f(-0.1, 0.0)) - v;
+    let tnz = terrain_point(p + vec2f(0.0, -0.1)) - v;
+
+    let pn = normalize(cross(tpz, tpx));
+    let nn = normalize(cross(tnz, tnx));
+
+    let n = (pn + nn) * 0.5;
+
+    return Vertex(v, n);
+}

terrain_point 函数接收地形上的一个 XZ 点,并返回一个 vec3,其中 y 值在最小和最大高度之间。

terrain_vertex 使用 terrain_point 来获得它的位置,同时通过对附近的 4 个点进行采样,并使用叉积来计算顶点法线。

你应该注意到了 Vertex 结构体不包括纹理坐标字段。我们可以通过使用顶点的 XZ 坐标,并让纹理采样器在 X 和 Y 轴上镜像纹理来轻松地创建纹理坐标,但以这种方式进行纹理采样时,高度图往往会有拉伸现象。

我们将在未来的教程中介绍一种叫做三平面映射的方法来给地形贴图。但现在我们只使用一个程序纹理,它将在渲染地形的片元着色器中被创建。

现在我们可以在地形表面获得一个实际的顶点数据,并用来填充顶点和索引缓冲区了。我们将创建一个 gen_terrain() 函数作为计算着色器的入口:

rust
@compute @workgroup_size(64)
+fn gen_terrain(
+    @builtin(global_invocation_id) gid: vec3<u32>
+) {
+    // snipped...
+}

@stage(compute) 注释指定了 gen_terrain 是一个计算着色器入口。

workgroup_size() 指定 GPU 可以为每个工作组(workgroup)分配的一组调用,这一组调用会同时执行着色器入口函数,并共享对工作组地址空间中着色器变量的访问。 我们在编写计算着色器的时候指定工作组的大小,它有 3 个维度的参数,因为工作组是一个 3D 网格,但如果不指定它们,则默认为 1。 换句话说,workgroup_size(64) 相当于 workgroup_size(64, 1, 1)

global_invocation_id 是一个 3D 索引。这可能看起来很奇怪,但你可以把工作组看作是工作组的 3D 网格。这些工作组有一个内部的工作者网格。global_invocation_id 就是相对于所有其他工作组的当前工作者的 id。

从视觉上看,工作组的网格看起来会是这样的:

work group grid

把计算着色器想象成一个在一堆嵌套的 for 循环中运行的函数,但每个循环都是并行执行的,这可能会有帮助。它看起来会像这样:

for wgx in num_workgroups.x:
+    for wgy in num_workgroups.y:
+        for wgz in num_workgroups.z:
+            var local_invocation_id = (wgx, wgy, wgz)
+            for x in workgroup_size.x:
+                for y in workgroup_size.x:
+                    for z in workgroup_size.x:
+                        var global_invocation_id = local_invocation_id * workgroup_size + (x, y, z);
+                        gen_terrain(global_invocation_id)

如果想了解更多关于工作组的信息请查看 WGSL 文档

TODO:

  • Note changes to create_render_pipeline
  • Mention swizzle feature for cgmath
  • Compare workgroups and workgroups sizes to nested for loops
    • Maybe make a diagram in blender?
  • Change to camera movement speed
`,37),r=[e];function E(d,g,y,F,c,o){return i(),a("div",null,r)}const D=s(t,[["render",E]]);export{u as __pageData,D as default}; diff --git a/assets/intermediate_tutorial13-terrain_index.md.hp3fmBWu.lean.js b/assets/intermediate_tutorial13-terrain_index.md.hp3fmBWu.lean.js new file mode 100644 index 000000000..a0c2b8c53 --- /dev/null +++ b/assets/intermediate_tutorial13-terrain_index.md.hp3fmBWu.lean.js @@ -0,0 +1 @@ +import{_ as s,o as i,c as a,R as n}from"./chunks/framework.bMtwhlie.js";const h="/learn-wgpu-zh/assets/figure_no-fbm.WygYK_Pb.png",k="/learn-wgpu-zh/assets/figure_fbm.vQ-eUzh_.png",p="/learn-wgpu-zh/assets/figure_spiky.4lru4RHQ.png",l="/learn-wgpu-zh/assets/figure_work-groups.-0RxSLqe.jpg",u=JSON.parse('{"title":"程序地形","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/tutorial13-terrain/index.md","filePath":"intermediate/tutorial13-terrain/index.md","lastUpdated":1703303099000}'),t={name:"intermediate/tutorial13-terrain/index.md"},e=n("",37),r=[e];function E(d,g,y,F,c,o){return i(),a("div",null,r)}const D=s(t,[["render",E]]);export{u as __pageData,D as default}; diff --git a/assets/intermediate_vertex-animation_index.md.45cd4c66.js b/assets/intermediate_vertex-animation_index.md.19WOMCj-.js similarity index 81% rename from assets/intermediate_vertex-animation_index.md.45cd4c66.js rename to assets/intermediate_vertex-animation_index.md.19WOMCj-.js index 9cab2d289..15eadc87c 100644 --- a/assets/intermediate_vertex-animation_index.md.45cd4c66.js +++ b/assets/intermediate_vertex-animation_index.md.19WOMCj-.js @@ -1 +1 @@ -import{_ as r,E as a,o as s,c as d,J as t,k as e,a as n}from"./chunks/framework.adbf3c9e.js";const v=JSON.parse('{"title":"顶点动画","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/index.md","filePath":"intermediate/vertex-animation/index.md","lastUpdated":1701933923000}'),c={name:"intermediate/vertex-animation/index.md"},m=e("h1",{id:"顶点动画",tabindex:"-1"},[n("顶点动画 "),e("a",{class:"header-anchor",href:"#顶点动画","aria-label":'Permalink to "顶点动画"'},"​")],-1),l=e("h2",{id:"示例-顶点网格模拟翻页动画",tabindex:"-1"},[n("示例:顶点网格模拟翻页动画 "),e("a",{class:"header-anchor",href:"#示例-顶点网格模拟翻页动画","aria-label":'Permalink to "示例:顶点网格模拟翻页动画"'},"​")],-1);function _(p,x,h,u,f,b){const o=a("WebGPUExample"),i=a("AutoGithubLink");return s(),d("div",null,[m,l,t(o,{example:"vertex_animation",autoLoad:"{true}"}),t(i)])}const P=r(c,[["render",_]]);export{v as __pageData,P as default}; +import{_ as r,D as a,o as s,c as d,I as t,k as e,a as n}from"./chunks/framework.bMtwhlie.js";const v=JSON.parse('{"title":"顶点动画","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/index.md","filePath":"intermediate/vertex-animation/index.md","lastUpdated":1703303099000}'),c={name:"intermediate/vertex-animation/index.md"},m=e("h1",{id:"顶点动画",tabindex:"-1"},[n("顶点动画 "),e("a",{class:"header-anchor",href:"#顶点动画","aria-label":'Permalink to "顶点动画"'},"​")],-1),l=e("h2",{id:"示例-顶点网格模拟翻页动画",tabindex:"-1"},[n("示例:顶点网格模拟翻页动画 "),e("a",{class:"header-anchor",href:"#示例-顶点网格模拟翻页动画","aria-label":'Permalink to "示例:顶点网格模拟翻页动画"'},"​")],-1);function _(p,x,h,u,f,b){const o=a("WebGPUExample"),i=a("AutoGithubLink");return s(),d("div",null,[m,l,t(o,{example:"vertex_animation",autoLoad:"{true}"}),t(i)])}const P=r(c,[["render",_]]);export{v as __pageData,P as default}; diff --git a/assets/intermediate_vertex-animation_index.md.45cd4c66.lean.js b/assets/intermediate_vertex-animation_index.md.19WOMCj-.lean.js similarity index 81% rename from assets/intermediate_vertex-animation_index.md.45cd4c66.lean.js rename to assets/intermediate_vertex-animation_index.md.19WOMCj-.lean.js index 9cab2d289..15eadc87c 100644 --- a/assets/intermediate_vertex-animation_index.md.45cd4c66.lean.js +++ b/assets/intermediate_vertex-animation_index.md.19WOMCj-.lean.js @@ -1 +1 @@ -import{_ as r,E as a,o as s,c as d,J as t,k as e,a as n}from"./chunks/framework.adbf3c9e.js";const v=JSON.parse('{"title":"顶点动画","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/index.md","filePath":"intermediate/vertex-animation/index.md","lastUpdated":1701933923000}'),c={name:"intermediate/vertex-animation/index.md"},m=e("h1",{id:"顶点动画",tabindex:"-1"},[n("顶点动画 "),e("a",{class:"header-anchor",href:"#顶点动画","aria-label":'Permalink to "顶点动画"'},"​")],-1),l=e("h2",{id:"示例-顶点网格模拟翻页动画",tabindex:"-1"},[n("示例:顶点网格模拟翻页动画 "),e("a",{class:"header-anchor",href:"#示例-顶点网格模拟翻页动画","aria-label":'Permalink to "示例:顶点网格模拟翻页动画"'},"​")],-1);function _(p,x,h,u,f,b){const o=a("WebGPUExample"),i=a("AutoGithubLink");return s(),d("div",null,[m,l,t(o,{example:"vertex_animation",autoLoad:"{true}"}),t(i)])}const P=r(c,[["render",_]]);export{v as __pageData,P as default}; +import{_ as r,D as a,o as s,c as d,I as t,k as e,a as n}from"./chunks/framework.bMtwhlie.js";const v=JSON.parse('{"title":"顶点动画","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/index.md","filePath":"intermediate/vertex-animation/index.md","lastUpdated":1703303099000}'),c={name:"intermediate/vertex-animation/index.md"},m=e("h1",{id:"顶点动画",tabindex:"-1"},[n("顶点动画 "),e("a",{class:"header-anchor",href:"#顶点动画","aria-label":'Permalink to "顶点动画"'},"​")],-1),l=e("h2",{id:"示例-顶点网格模拟翻页动画",tabindex:"-1"},[n("示例:顶点网格模拟翻页动画 "),e("a",{class:"header-anchor",href:"#示例-顶点网格模拟翻页动画","aria-label":'Permalink to "示例:顶点网格模拟翻页动画"'},"​")],-1);function _(p,x,h,u,f,b){const o=a("WebGPUExample"),i=a("AutoGithubLink");return s(),d("div",null,[m,l,t(o,{example:"vertex_animation",autoLoad:"{true}"}),t(i)])}const P=r(c,[["render",_]]);export{v as __pageData,P as default}; diff --git a/assets/intermediate_vertex-animation_universal-animation-formula.md.e078e665.js b/assets/intermediate_vertex-animation_universal-animation-formula.md.e078e665.js deleted file mode 100644 index 9270ee471..000000000 --- a/assets/intermediate_vertex-animation_universal-animation-formula.md.e078e665.js +++ /dev/null @@ -1,53 +0,0 @@ -import{_ as o,E as s,o as e,c as r,J as n,S as a}from"./chunks/framework.adbf3c9e.js";const d=JSON.parse('{"title":"万能动画公式","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/universal-animation-formula.md","filePath":"intermediate/vertex-animation/universal-animation-formula.md","lastUpdated":1701933923000}'),t={name:"intermediate/vertex-animation/universal-animation-formula.md"},c=a(`

万能动画公式

要实现不同形态/形状之间的动态变换,核心算法很简单,就是通过构造同等数量的顶点/控制点来实现。

在进行动态变换时,通常不同形态或形状之间的顶点数量会不相等。为了使两边的顶点能够一一对应起来,我们可以通过随机或插值的方式来补充顶点。这种方式不会破坏顶点数较少一边的造型,相当于某些点有了分身。通过对对应顶点的插值计算,就能够实现形态的变换。

此万能动画公式的优点在于它足够简单且通用。无论是对于简单的形状变换还是复杂的动态效果,都可以通过构造同等数量的顶点来实现。而且,运用不同的插值算法,还能灵活地控制形态变换的程度和速度。

示例:Hilbert 曲线

此示例能正常运行在桌面端及 Firefox Nightly.

但 Chrome 118 上存在管线验证的 bug,导致会报如下警告而无法正常运行:

log
Attribute offset (12) with format VertexFormat::Float32x3 (size: 12) doesn't fit in the vertex buffer stride (12).
- - While validating attributes[1].
- - While validating buffers[0].
`,6),D=a(`

代码实现

Hilbert 曲线是一种连续、自避免且自相似的空间填充曲线。

每升一个维度,曲线的顶点数就多 4 倍,基于这个规律,我们用上面的万能动画公式来完成升维/降维变换动画:

rust
pub struct HilbertCurveApp {
-    // 当前曲线与目标曲线的顶点缓冲区
-    vertex_buffers: Vec<wgpu::Buffer>,
-    // 当前曲线的顶点总数
-    curve_vertex_count: usize,
-    // 当前动画帧的索引,用于设置缓冲区的动态偏移
-    animate_index: u32,
-    // 每一个动画阶段的总帧数
-    draw_count: u32,
-    // 目标曲线维度
-    curve_dimention: u32,
-    // 是否为升维动画
-    is_animation_up: bool,
-}

创建两个 ping-pong 顶点缓冲区,它们的大小一样:

rust
let mut vertex_buffers: Vec<wgpu::Buffer> = Vec::with_capacity(2);
-for _ in 0..2 {
-    let buf = app.device.create_buffer(&wgpu::BufferDescriptor {
-        size,
-        usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
-        label: None,
-        mapped_at_creation: false,
-    });
-    vertex_buffers.push(buf);
-}

render() 函数中基于动画迭代情况填充/更新顶点缓冲区:

rust
let mut target = HilbertCurve::new(self.curve_dimention);
-let start = if self.is_animation_up {
-    let mut start = HilbertCurve::new(self.curve_dimention - 1);
-    // 把顶点数翻 4 倍来对应目标维度曲线
-    start.four_times_vertices();
-    start
-} else {
-    target.four_times_vertices();
-    HilbertCurve::new(self.curve_dimention + 1)
-};
-// 更新顶点数
-self.curve_vertex_count = target.vertices.len();
-// 填充顶点 buffer
-for (buf, curve) in self.vertex_buffers.iter().zip(vec![start, target].iter()) {
-    self.app
-        .queue
-        .write_buffer(buf, 0, bytemuck::cast_slice(&curve.vertices));
-}

着色器中完成顶点位置的插值计算:

wgsl
struct HilbertUniform {
-    // 接近目标的比例
-    near_target_ratio: f32,
-};
-@group(0) @binding(0) var<uniform> mvp_mat: MVPMatUniform;
-@group(1) @binding(0) var<uniform> hilbert: HilbertUniform;
-
-@vertex
-fn vs_main(@location(0) pos: vec3f, @location(1) target_pos: vec3f) -> @builtin(position) vec4f {
-   let new_pos = pos + (target_pos - pos) * hilbert.near_target_ratio;
-   return mvp_mat.mvp * vec4<f32>(new_pos, 1.0);
-}

查看完整源码

`,11);function F(y,i,C,A,u,b){const l=s("WebGPUExample"),p=s("AutoGithubLink");return e(),r("div",null,[c,n(l,{example:"hilbert_curve",autoLoad:"{true}"}),D,n(p,{customCodePath:"intermediate/hilbert-curve"})])}const f=o(t,[["render",F]]);export{d as __pageData,f as default}; diff --git a/assets/intermediate_vertex-animation_universal-animation-formula.md.e078e665.lean.js b/assets/intermediate_vertex-animation_universal-animation-formula.md.e078e665.lean.js deleted file mode 100644 index 5c92c7f29..000000000 --- a/assets/intermediate_vertex-animation_universal-animation-formula.md.e078e665.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as o,E as s,o as e,c as r,J as n,S as a}from"./chunks/framework.adbf3c9e.js";const d=JSON.parse('{"title":"万能动画公式","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/universal-animation-formula.md","filePath":"intermediate/vertex-animation/universal-animation-formula.md","lastUpdated":1701933923000}'),t={name:"intermediate/vertex-animation/universal-animation-formula.md"},c=a("",6),D=a("",11);function F(y,i,C,A,u,b){const l=s("WebGPUExample"),p=s("AutoGithubLink");return e(),r("div",null,[c,n(l,{example:"hilbert_curve",autoLoad:"{true}"}),D,n(p,{customCodePath:"intermediate/hilbert-curve"})])}const f=o(t,[["render",F]]);export{d as __pageData,f as default}; diff --git a/assets/intermediate_vertex-animation_universal-animation-formula.md.nYbvkNKu.js b/assets/intermediate_vertex-animation_universal-animation-formula.md.nYbvkNKu.js new file mode 100644 index 000000000..597f85446 --- /dev/null +++ b/assets/intermediate_vertex-animation_universal-animation-formula.md.nYbvkNKu.js @@ -0,0 +1,53 @@ +import{_ as l,D as s,o as p,c as k,I as i,R as a}from"./chunks/framework.bMtwhlie.js";const o=JSON.parse('{"title":"万能动画公式","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/universal-animation-formula.md","filePath":"intermediate/vertex-animation/universal-animation-formula.md","lastUpdated":1703303099000}'),t={name:"intermediate/vertex-animation/universal-animation-formula.md"},e=a(`

万能动画公式

要实现不同形态/形状之间的动态变换,核心算法很简单,就是通过构造同等数量的顶点/控制点来实现。

在进行动态变换时,通常不同形态或形状之间的顶点数量会不相等。为了使两边的顶点能够一一对应起来,我们可以通过随机或插值的方式来补充顶点。这种方式不会破坏顶点数较少一边的造型,相当于某些点有了分身。通过对对应顶点的插值计算,就能够实现形态的变换。

此万能动画公式的优点在于它足够简单且通用。无论是对于简单的形状变换还是复杂的动态效果,都可以通过构造同等数量的顶点来实现。而且,运用不同的插值算法,还能灵活地控制形态变换的程度和速度。

示例:Hilbert 曲线

此示例能正常运行在桌面端及 Firefox Nightly.

但 Chrome 118 上存在管线验证的 bug,导致会报如下警告而无法正常运行:

log
Attribute offset (12) with format VertexFormat::Float32x3 (size: 12) doesn't fit in the vertex buffer stride (12).
+ - While validating attributes[1].
+ - While validating buffers[0].
`,6),r=a(`

代码实现

Hilbert 曲线是一种连续、自避免且自相似的空间填充曲线。

每升一个维度,曲线的顶点数就多 4 倍,基于这个规律,我们用上面的万能动画公式来完成升维/降维变换动画:

rust
pub struct HilbertCurveApp {
+    // 当前曲线与目标曲线的顶点缓冲区
+    vertex_buffers: Vec<wgpu::Buffer>,
+    // 当前曲线的顶点总数
+    curve_vertex_count: usize,
+    // 当前动画帧的索引,用于设置缓冲区的动态偏移
+    animate_index: u32,
+    // 每一个动画阶段的总帧数
+    draw_count: u32,
+    // 目标曲线维度
+    curve_dimention: u32,
+    // 是否为升维动画
+    is_animation_up: bool,
+}

创建两个 ping-pong 顶点缓冲区,它们的大小一样:

rust
let mut vertex_buffers: Vec<wgpu::Buffer> = Vec::with_capacity(2);
+for _ in 0..2 {
+    let buf = app.device.create_buffer(&wgpu::BufferDescriptor {
+        size,
+        usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
+        label: None,
+        mapped_at_creation: false,
+    });
+    vertex_buffers.push(buf);
+}

render() 函数中基于动画迭代情况填充/更新顶点缓冲区:

rust
let mut target = HilbertCurve::new(self.curve_dimention);
+let start = if self.is_animation_up {
+    let mut start = HilbertCurve::new(self.curve_dimention - 1);
+    // 把顶点数翻 4 倍来对应目标维度曲线
+    start.four_times_vertices();
+    start
+} else {
+    target.four_times_vertices();
+    HilbertCurve::new(self.curve_dimention + 1)
+};
+// 更新顶点数
+self.curve_vertex_count = target.vertices.len();
+// 填充顶点 buffer
+for (buf, curve) in self.vertex_buffers.iter().zip(vec![start, target].iter()) {
+    self.app
+        .queue
+        .write_buffer(buf, 0, bytemuck::cast_slice(&curve.vertices));
+}

着色器中完成顶点位置的插值计算:

wgsl
struct HilbertUniform {
+    // 接近目标的比例
+    near_target_ratio: f32,
+};
+@group(0) @binding(0) var<uniform> mvp_mat: MVPMatUniform;
+@group(1) @binding(0) var<uniform> hilbert: HilbertUniform;
+
+@vertex
+fn vs_main(@location(0) pos: vec3f, @location(1) target_pos: vec3f) -> @builtin(position) vec4f {
+   let new_pos = pos + (target_pos - pos) * hilbert.near_target_ratio;
+   return mvp_mat.mvp * vec4<f32>(new_pos, 1.0);
+}

查看完整源码

`,11);function E(d,g,y,F,c,u){const n=s("WebGPUExample"),h=s("AutoGithubLink");return p(),k("div",null,[e,i(n,{example:"hilbert_curve",autoLoad:"{true}"}),r,i(h,{customCodePath:"intermediate/hilbert-curve"})])}const m=l(t,[["render",E]]);export{o as __pageData,m as default}; diff --git a/assets/intermediate_vertex-animation_universal-animation-formula.md.nYbvkNKu.lean.js b/assets/intermediate_vertex-animation_universal-animation-formula.md.nYbvkNKu.lean.js new file mode 100644 index 000000000..e5ed413a2 --- /dev/null +++ b/assets/intermediate_vertex-animation_universal-animation-formula.md.nYbvkNKu.lean.js @@ -0,0 +1 @@ +import{_ as l,D as s,o as p,c as k,I as i,R as a}from"./chunks/framework.bMtwhlie.js";const o=JSON.parse('{"title":"万能动画公式","description":"","frontmatter":{},"headers":[],"relativePath":"intermediate/vertex-animation/universal-animation-formula.md","filePath":"intermediate/vertex-animation/universal-animation-formula.md","lastUpdated":1703303099000}'),t={name:"intermediate/vertex-animation/universal-animation-formula.md"},e=a("",6),r=a("",11);function E(d,g,y,F,c,u){const n=s("WebGPUExample"),h=s("AutoGithubLink");return p(),k("div",null,[e,i(n,{example:"hilbert_curve",autoLoad:"{true}"}),r,i(h,{customCodePath:"intermediate/hilbert-curve"})])}const m=l(t,[["render",E]]);export{o as __pageData,m as default}; diff --git a/assets/left_right_hand.5c9b4bc7.gif b/assets/left_right_hand._ZtWXizh.gif similarity index 100% rename from assets/left_right_hand.5c9b4bc7.gif rename to assets/left_right_hand._ZtWXizh.gif diff --git a/assets/lib.a4a41bda.png b/assets/lib.uClQAVCC.png similarity index 100% rename from assets/lib.a4a41bda.png rename to assets/lib.uClQAVCC.png diff --git a/assets/light-in-scene.630f9dca.png b/assets/light-in-scene.J0TJXHw7.png similarity index 100% rename from assets/light-in-scene.630f9dca.png rename to assets/light-in-scene.J0TJXHw7.png diff --git a/assets/links.b1c5f8bb.png b/assets/links.ykRZQt6t.png similarity index 100% rename from assets/links.b1c5f8bb.png rename to assets/links.ykRZQt6t.png diff --git a/assets/local_invocation_id.2af66c96.png b/assets/local_invocation_id.JaKnNG8K.png similarity index 100% rename from assets/local_invocation_id.2af66c96.png rename to assets/local_invocation_id.JaKnNG8K.png diff --git a/assets/name.8d9431f3.png b/assets/name.E7OVs5to.png similarity index 100% rename from assets/name.8d9431f3.png rename to assets/name.E7OVs5to.png diff --git a/assets/no-clear.304e22f0.png b/assets/no-clear.CuVpOO_N.png similarity index 100% rename from assets/no-clear.304e22f0.png rename to assets/no-clear.CuVpOO_N.png diff --git a/assets/no_srgb.c60c78b6.png b/assets/no_srgb.TzdPNppd.png similarity index 100% rename from assets/no_srgb.c60c78b6.png rename to assets/no_srgb.TzdPNppd.png diff --git a/assets/normal-scale-issue.0959834f.png b/assets/normal-scale-issue.o_duVY_I.png similarity index 100% rename from assets/normal-scale-issue.0959834f.png rename to assets/normal-scale-issue.o_duVY_I.png diff --git a/assets/normal_diagram.35def195.png b/assets/normal_diagram._dkbBVWT.png similarity index 100% rename from assets/normal_diagram.35def195.png rename to assets/normal_diagram._dkbBVWT.png diff --git a/assets/normal_mapping_correct.c594f67a.png b/assets/normal_mapping_correct.EKHmPX5_.png similarity index 100% rename from assets/normal_mapping_correct.c594f67a.png rename to assets/normal_mapping_correct.EKHmPX5_.png diff --git a/assets/normal_mapping_wrong.a2c5f143.png b/assets/normal_mapping_wrong.1lc7KaJ-.png similarity index 100% rename from assets/normal_mapping_wrong.a2c5f143.png rename to assets/normal_mapping_wrong.1lc7KaJ-.png diff --git a/assets/normal_not_rotated.7327fe66.png b/assets/normal_not_rotated.X_bk5vl-.png similarity index 100% rename from assets/normal_not_rotated.7327fe66.png rename to assets/normal_not_rotated.X_bk5vl-.png diff --git a/assets/on_android.fd40f73c.png b/assets/on_android.ZlmWizZN.png similarity index 100% rename from assets/on_android.fd40f73c.png rename to assets/on_android.ZlmWizZN.png diff --git a/assets/on_ios.6db96077.png b/assets/on_ios.tACZQwGC.png similarity index 100% rename from assets/on_ios.6db96077.png rename to assets/on_ios.tACZQwGC.png diff --git a/assets/option.5b8443fb.png b/assets/option.tgsWiNAn.png similarity index 100% rename from assets/option.5b8443fb.png rename to assets/option.tgsWiNAn.png diff --git a/assets/output.d9531c62.gif b/assets/output.U2CgAQ0R.gif similarity index 100% rename from assets/output.d9531c62.gif rename to assets/output.U2CgAQ0R.gif diff --git a/assets/pentagon.23871f8a.png b/assets/pentagon.GLgl43gd.png similarity index 100% rename from assets/pentagon.23871f8a.png rename to assets/pentagon.GLgl43gd.png diff --git a/assets/pong.9a7ca7b9.png b/assets/pong.ct65uIIn.png similarity index 100% rename from assets/pong.9a7ca7b9.png rename to assets/pong.ct65uIIn.png diff --git a/assets/project.2c0f8089.png b/assets/project.OJieJOtf.png similarity index 100% rename from assets/project.2c0f8089.png rename to assets/project.OJieJOtf.png diff --git a/assets/realtime-left.fceb4675.jpg b/assets/realtime-left.nZSwmtCo.jpg similarity index 100% rename from assets/realtime-left.fceb4675.jpg rename to assets/realtime-left.nZSwmtCo.jpg diff --git a/assets/realtime.9dd2da17.jpg b/assets/realtime.i8oUMXdC.jpg similarity index 100% rename from assets/realtime.9dd2da17.jpg rename to assets/realtime.i8oUMXdC.jpg diff --git a/assets/render_doc_output.7cc352a9.png b/assets/render_doc_output.aiG7KCiC.png similarity index 100% rename from assets/render_doc_output.7cc352a9.png rename to assets/render_doc_output.aiG7KCiC.png diff --git a/assets/resource-left.1d783628.jpg b/assets/resource-left.a2vJQEkx.jpg similarity index 100% rename from assets/resource-left.1d783628.jpg rename to assets/resource-left.a2vJQEkx.jpg diff --git a/assets/resource-right.c404daaa.jpg b/assets/resource-right.Kw0F1aVF.jpg similarity index 100% rename from assets/resource-right.c404daaa.jpg rename to assets/resource-right.Kw0F1aVF.jpg diff --git a/assets/result.e293ca98.png b/assets/result.Ny0pk3iA.png similarity index 100% rename from assets/result.e293ca98.png rename to assets/result.Ny0pk3iA.png diff --git a/assets/results.d52e3856.png b/assets/results.XwEj8aho.png similarity index 100% rename from assets/results.d52e3856.png rename to assets/results.XwEj8aho.png diff --git a/assets/rightside-up.018bc290.png b/assets/rightside-up.yUas2Hsc.png similarity index 100% rename from assets/rightside-up.018bc290.png rename to assets/rightside-up.yUas2Hsc.png diff --git a/assets/run.84b2b947.png b/assets/run.anexA9XQ.png similarity index 100% rename from assets/run.84b2b947.png rename to assets/run.anexA9XQ.png diff --git a/assets/screenshot.342a32a0.png b/assets/screenshot.hdnOjorV.png similarity index 100% rename from assets/screenshot.342a32a0.png rename to assets/screenshot.hdnOjorV.png diff --git a/assets/search.e005d804.png b/assets/search.XvqND0E-.png similarity index 100% rename from assets/search.e005d804.png rename to assets/search.XvqND0E-.png diff --git a/assets/showcase_alignment.md.7bc0b30c.js b/assets/showcase_alignment.md.7bc0b30c.js deleted file mode 100644 index 34c37298b..000000000 --- a/assets/showcase_alignment.md.7bc0b30c.js +++ /dev/null @@ -1,21 +0,0 @@ -import{_ as s,o as e,c as n,S as a}from"./chunks/framework.adbf3c9e.js";const y=JSON.parse('{"title":"Memory Layout in WGSL","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/alignment.md","filePath":"showcase/alignment.md","lastUpdated":1701933923000}'),t={name:"showcase/alignment.md"},l=a(`

Memory Layout in WGSL

This page is currently being reworked. I want to understand the topics a bit better, but as 0.12 is out I want to release what I have for now.

Alignment of vertex and index buffers

Vertex buffers require defining a VertexBufferLayout, so the memory alignment is whatever you tell WebGPU it should be. This can be really convenient for keeping down memory usage on the GPU.

The Index Buffer uses the alignment of whatever primitive type you specify via the IndexFormat you pass into RenderEncoder::set_index_buffer().

Alignment of Uniform and Storage buffers

GPUs are designed to process thousands of pixels in parallel. In order to achieve this, some sacrifices had to be made. Graphics hardware likes to have all the bytes you intend on processing aligned by powers of 2. The exact specifics of why this is are beyond my level of knowledge, but it's important to know so that you can troubleshoot why your shaders aren't working.

Let's take a look at the following table:


TypeAlignment in BytesSize in Bytes
scalar (i32, u32, f32)44
vec2<T>88
vec3<T>1612
vec4<T>1616

You can see for vec3 the alignment is the next power of 2 from the size, 16. This can catch beginners (and even veterans) off guard as it's not the most intuitive. This becomes especially important when we start laying out structs. Take the light struct from the lighting tutorial:

You can see the full table of the alignments in section 4.3.7.1 of the WGSL spec

rust
struct Light {
-    position: vec3f,
-    color: vec3f,
-}

So what's the alignment of this struct? Your first guess would be that it's the sum of the alignments of the individual fields. That might make sense if we were in Rust-land, but in shader-land, it's a little more involved. The alignment for a given struct is given by the following equation:

// S is the struct in question
-// M is a member of the struct
-AlignOf(S) = max(AlignOfMember(S, M1), ... , AlignOfMember(S, Mn))

Basically, the alignment of the struct is the maximum of the alignments of the members of the struct. This means that:

AlignOf(Light)
-    = max(AlignOfMember(Light, position), AlignOfMember(Light, color))
-    = max(16, 16)
-    = 16

This is why the LightUniform has those padding fields. WGPU won't accept it if the data is not aligned correctly.

How to deal with alignment issues

In general, 16 is the max alignment you'll see. In that case, you might think that we should be able to do something like the following:

rust
#[repr(C, align(16))]
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct LightUniform {
-    position: [f32; 3],
-    color: [f32; 3],
-}

But this won't compile. The bytemuck crate doesn't work with structs with implicit padding bytes. Rust can't guarantee that the memory between the fields has been initialized properly. This gave me an error when I tried it:

error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
-   --> code/intermediate/tutorial10-lighting/src/main.rs:246:8
-    |
-246 | struct LightUniform {
-    |        ^^^^^^^^^^^^
-    |
-    = note: source type: \`LightUniform\` (256 bits)
-    = note: target type: \`_::{closure#0}::TypeWithoutPadding\` (192 bits)

Additional resources

If you're looking for more information check out the write-up by @teoxoy.

`,25),o=[l];function r(p,i,c,d,h,u){return e(),n("div",null,o)}const b=s(t,[["render",r]]);export{y as __pageData,b as default}; diff --git a/assets/showcase_alignment.md.7bc0b30c.lean.js b/assets/showcase_alignment.md.7bc0b30c.lean.js deleted file mode 100644 index 1e622b1ec..000000000 --- a/assets/showcase_alignment.md.7bc0b30c.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as s,o as e,c as n,S as a}from"./chunks/framework.adbf3c9e.js";const y=JSON.parse('{"title":"Memory Layout in WGSL","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/alignment.md","filePath":"showcase/alignment.md","lastUpdated":1701933923000}'),t={name:"showcase/alignment.md"},l=a("",25),o=[l];function r(p,i,c,d,h,u){return e(),n("div",null,o)}const b=s(t,[["render",r]]);export{y as __pageData,b as default}; diff --git a/assets/showcase_alignment.md.WbW77ARR.js b/assets/showcase_alignment.md.WbW77ARR.js new file mode 100644 index 000000000..fa69de221 --- /dev/null +++ b/assets/showcase_alignment.md.WbW77ARR.js @@ -0,0 +1,21 @@ +import{_ as s,o as e,c as a,R as i}from"./chunks/framework.bMtwhlie.js";const k=JSON.parse('{"title":"Memory Layout in WGSL","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/alignment.md","filePath":"showcase/alignment.md","lastUpdated":1703303099000}'),n={name:"showcase/alignment.md"},t=i(`

Memory Layout in WGSL

This page is currently being reworked. I want to understand the topics a bit better, but as 0.12 is out I want to release what I have for now.

Alignment of vertex and index buffers

Vertex buffers require defining a VertexBufferLayout, so the memory alignment is whatever you tell WebGPU it should be. This can be really convenient for keeping down memory usage on the GPU.

The Index Buffer uses the alignment of whatever primitive type you specify via the IndexFormat you pass into RenderEncoder::set_index_buffer().

Alignment of Uniform and Storage buffers

GPUs are designed to process thousands of pixels in parallel. In order to achieve this, some sacrifices had to be made. Graphics hardware likes to have all the bytes you intend on processing aligned by powers of 2. The exact specifics of why this is are beyond my level of knowledge, but it's important to know so that you can troubleshoot why your shaders aren't working.

Let's take a look at the following table:


TypeAlignment in BytesSize in Bytes
scalar (i32, u32, f32)44
vec2<T>88
vec3<T>1612
vec4<T>1616

You can see for vec3 the alignment is the next power of 2 from the size, 16. This can catch beginners (and even veterans) off guard as it's not the most intuitive. This becomes especially important when we start laying out structs. Take the light struct from the lighting tutorial:

You can see the full table of the alignments in section 4.3.7.1 of the WGSL spec

rust
struct Light {
+    position: vec3f,
+    color: vec3f,
+}

So what's the alignment of this struct? Your first guess would be that it's the sum of the alignments of the individual fields. That might make sense if we were in Rust-land, but in shader-land, it's a little more involved. The alignment for a given struct is given by the following equation:

// S is the struct in question
+// M is a member of the struct
+AlignOf(S) = max(AlignOfMember(S, M1), ... , AlignOfMember(S, Mn))

Basically, the alignment of the struct is the maximum of the alignments of the members of the struct. This means that:

AlignOf(Light)
+    = max(AlignOfMember(Light, position), AlignOfMember(Light, color))
+    = max(16, 16)
+    = 16

This is why the LightUniform has those padding fields. WGPU won't accept it if the data is not aligned correctly.

How to deal with alignment issues

In general, 16 is the max alignment you'll see. In that case, you might think that we should be able to do something like the following:

rust
#[repr(C, align(16))]
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct LightUniform {
+    position: [f32; 3],
+    color: [f32; 3],
+}

But this won't compile. The bytemuck crate doesn't work with structs with implicit padding bytes. Rust can't guarantee that the memory between the fields has been initialized properly. This gave me an error when I tried it:

error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
+   --> code/intermediate/tutorial10-lighting/src/main.rs:246:8
+    |
+246 | struct LightUniform {
+    |        ^^^^^^^^^^^^
+    |
+    = note: source type: \`LightUniform\` (256 bits)
+    = note: target type: \`_::{closure#0}::TypeWithoutPadding\` (192 bits)

Additional resources

If you're looking for more information check out the write-up by @teoxoy.

`,25),l=[t];function r(p,h,o,d,c,u){return e(),a("div",null,l)}const m=s(n,[["render",r]]);export{k as __pageData,m as default}; diff --git a/assets/showcase_alignment.md.WbW77ARR.lean.js b/assets/showcase_alignment.md.WbW77ARR.lean.js new file mode 100644 index 000000000..cd9737d79 --- /dev/null +++ b/assets/showcase_alignment.md.WbW77ARR.lean.js @@ -0,0 +1 @@ +import{_ as s,o as e,c as a,R as i}from"./chunks/framework.bMtwhlie.js";const k=JSON.parse('{"title":"Memory Layout in WGSL","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/alignment.md","filePath":"showcase/alignment.md","lastUpdated":1703303099000}'),n={name:"showcase/alignment.md"},t=i("",25),l=[t];function r(p,h,o,d,c,u){return e(),a("div",null,l)}const m=s(n,[["render",r]]);export{k as __pageData,m as default}; diff --git a/assets/showcase_compute_index.md.f0a051d7.js b/assets/showcase_compute_index.md.f0a051d7.js deleted file mode 100644 index a5fffa17f..000000000 --- a/assets/showcase_compute_index.md.f0a051d7.js +++ /dev/null @@ -1,95 +0,0 @@ -import{_ as n,E as a,o as e,c as l,J as p,S as o}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/corruption.634c88d7.png",r="/learn-wgpu-zh/assets/black_triangles.7cbbe4d6.png",c="/learn-wgpu-zh/assets/render_doc_output.7cc352a9.png",i="/learn-wgpu-zh/assets/results.d52e3856.png",g=JSON.parse('{"title":"Compute Example: Tangents and Bitangents","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/compute/index.md","filePath":"showcase/compute/index.md","lastUpdated":1701933923000}'),C={name:"showcase/compute/index.md"},y=o(`

Compute Example: Tangents and Bitangents

This proved more difficult than I anticipated. The first problem I encountered was some vertex data corruption due to the shader reading my vertex data incorrectly. I was using the ModelVertex struct I used in the normal mapping tutorial.

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-pub struct ModelVertex {
-    position: [f32; 3],
-    tex_coords: [f32; 2],
-    normal: [f32; 3],
-    tangent: [f32; 3],
-    bitangent: [f32; 3],
-}

This structure works perfectly fine when used as a vertex buffer. Using it as a storage buffer proved less convenient. My previous code used a GLSL struct similar to my ModelVertex.

shader
struct ModelVertex {
-    vec3 position;
-    vec2 tex_coords;
-    vec3 normal;
-    vec3 tangent;
-    vec3 bitangent;
-};

At first glance, this seems just fine, but OpenGL experts would likely see a problem with the structure. Our fields aren't aligned properly to support the std430 alignment that storage buffers require. I won't get into detail but you can check out the alignment showcase if you want to know more. To summarize, the vec2 for the tex_coords was messing up the byte alignment, corrupting the vertex data resulting in the following:

./corruption.png

I could have fixed this by adding a padding field after tex_coords on the Rust side, but that would require modifying the VertexBufferLayout. I ended up solving this problem by using the components of the vectors directly which resulted in a struct like this:

shader
struct ModelVertex {
-    float x; float y; float z;
-    float uv; float uw;
-    float nx; float ny; float nz;
-    float tx; float ty; float tz;
-    float bx; float by; float bz;
-};

Since std430 will use the alignment of the largest element of the struct, using all floats means the struct will be aligned to 4 bytes. This is alignment matches what ModelVertex uses in Rust. This was kind of a pain to work with, but it fixed the corruption issue.

The second problem required me to rethink how I was computing the tangent and bitangent. The previous algorithm I was using only computed the tangent and bitangent for each triangle and set all the vertices in that triangle to use the same tangent and bitangent. While this is fine in a single-threaded context, the code breaks down when trying to compute the triangles in parallel. The reason is that multiple triangles can share the same vertices. This means that when we go to save the resulting tangents, we inevitably end up trying to write to the same vertex from multiple different threads which is a big no no. You can see the issue with this method below:

./black_triangles.png

Those black triangles were the result of multiple GPU threads trying to modify the same vertices. Looking at the data in Render Doc I could see that the tangents and bitangents were garbage numbers such as NaN.

./render_doc_output.png

While on the CPU we could introduce a synchronization primitive such as a Mutex to fix this issue, AFAIK there isn't really such a thing on the GPU. Instead, I decided to swap my code to work with each vertex individually. There are some hurdles with that, but those will be easier to explain in code. Let's start with the main function.

shader
void main() {
-    uint vertexIndex = gl_GlobalInvocationID.x;
-    ModelVertex result = calcTangentBitangent(vertexIndex);
-    dstVertices[vertexIndex] = result;
-}

We use the gl_GlobalInvocationID.x to get the index of the vertex we want to compute the tangents for. I opted to put the actual calculation into its own method. Let's take a look at that.

shader
ModelVertex calcTangentBitangent(uint vertexIndex) {
-    ModelVertex v = srcVertices[vertexIndex];
-
-    vec3 tangent = vec3(0);
-    vec3 bitangent = vec3(0);
-    uint trianglesIncluded = 0;
-
-    // Find the triangles that use v
-    //  * Loop over every triangle (i + 3)
-    for (uint i = 0; i < numIndices; i += 3) {
-        uint index0 = indices[i];
-        uint index1 = indices[i+1];
-        uint index2 = indices[i+2];
-
-        // Only perform the calculation if one of the indices
-        // matches our vertexIndex
-        if (index0 == vertexIndex || index1 == vertexIndex || index2 == vertexIndex) {
-            ModelVertex v0 = srcVertices[index0];
-            ModelVertex v1 = srcVertices[index1];
-            ModelVertex v2 = srcVertices[index2];
-
-            vec3 pos0 = getPos(v0);
-            vec3 pos1 = getPos(v1);
-            vec3 pos2 = getPos(v2);
-
-            vec2 uv0 = getUV(v0);
-            vec2 uv1 = getUV(v1);
-            vec2 uv2 = getUV(v2);
-
-            vec3 delta_pos1 = pos1 - pos0;
-            vec3 delta_pos2 = pos2 - pos0;
-
-            vec2 delta_uv1 = uv1 - uv0;
-            vec2 delta_uv2 = uv2 - uv0;
-
-            float r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
-            tangent += (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
-            bitangent += (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r; 
-            trianglesIncluded += 1;
-        }
-        
-    }
-
-    // Average the tangent and bitangents
-    if (trianglesIncluded > 0) {
-        tangent /= trianglesIncluded;
-        bitangent /= trianglesIncluded;
-        tangent = normalize(tangent);
-        bitangent = normalize(bitangent);
-    }
-
-    // Save the results
-    v.tx = tangent.x;
-    v.ty = tangent.y;
-    v.tz = tangent.z;
-    v.bx = bitangent.x;
-    v.by = bitangent.y;
-    v.bz = bitangent.z;
-
-    return v;
-}

Possible Improvements

Looping over every triangle for every vertex is likely raising some red flags for some of you. In a single-threaded context, this algorithm would end up being O(N*M). As we are utilizing the high number of threads available to our GPU, this is less of an issue, but it still means our GPU is burning more cycles than it needs to.

One way I came up with to possibly improve performance is to store the index of each triangle in a hash map like structure with the vertex index as keys. Here's some pseudo code:

rust
for t in 0..indices.len() / 3 {
-    triangle_map[indices[t * 3]].push(t);
-    triangle_map.push((indices[t * 3 + 1], t);
-    triangle_map.push((indices[t * 3 + 2], t);
-}

We'd then need to flatten this structure to pass it to the GPU. We'd also need a second array to index the first.

rust
for (i, (_v, t_list)) in triangle_map.iter().enumerate() {
-    triangle_map_indices.push(TriangleMapIndex { 
-        start: i,
-        len: t_list.len(),
-    });
-    flat_triangle_map.extend(t_list);
-}

I ultimately decided against this method as it was more complicated, and I haven't had time to benchmark it to see if it's faster than the simple method.

Results

The tangents and bitangents are now getting calculated correctly and on the GPU!

./results.png

',28);function A(u,D,d,b,F,m){const s=a("AutoGithubLink");return e(),l("div",null,[y,p(s)])}const v=n(C,[["render",A]]);export{g as __pageData,v as default}; diff --git a/assets/showcase_compute_index.md.f0a051d7.lean.js b/assets/showcase_compute_index.md.f0a051d7.lean.js deleted file mode 100644 index 38b72abde..000000000 --- a/assets/showcase_compute_index.md.f0a051d7.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as n,E as a,o as e,c as l,J as p,S as o}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/corruption.634c88d7.png",r="/learn-wgpu-zh/assets/black_triangles.7cbbe4d6.png",c="/learn-wgpu-zh/assets/render_doc_output.7cc352a9.png",i="/learn-wgpu-zh/assets/results.d52e3856.png",g=JSON.parse('{"title":"Compute Example: Tangents and Bitangents","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/compute/index.md","filePath":"showcase/compute/index.md","lastUpdated":1701933923000}'),C={name:"showcase/compute/index.md"},y=o("",28);function A(u,D,d,b,F,m){const s=a("AutoGithubLink");return e(),l("div",null,[y,p(s)])}const v=n(C,[["render",A]]);export{g as __pageData,v as default}; diff --git a/assets/showcase_compute_index.md.toRpHUeP.js b/assets/showcase_compute_index.md.toRpHUeP.js new file mode 100644 index 000000000..f5721858d --- /dev/null +++ b/assets/showcase_compute_index.md.toRpHUeP.js @@ -0,0 +1,95 @@ +import{_ as i,D as a,o as n,c as e,I as l,R as t}from"./chunks/framework.bMtwhlie.js";const p="/learn-wgpu-zh/assets/corruption.i47wUppK.png",h="/learn-wgpu-zh/assets/black_triangles.n_V-Q8yK.png",k="/learn-wgpu-zh/assets/render_doc_output.aiG7KCiC.png",r="/learn-wgpu-zh/assets/results.XwEj8aho.png",C=JSON.parse('{"title":"Compute Example: Tangents and Bitangents","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/compute/index.md","filePath":"showcase/compute/index.md","lastUpdated":1703303099000}'),E={name:"showcase/compute/index.md"},d=t(`

Compute Example: Tangents and Bitangents

This proved more difficult than I anticipated. The first problem I encountered was some vertex data corruption due to the shader reading my vertex data incorrectly. I was using the ModelVertex struct I used in the normal mapping tutorial.

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+pub struct ModelVertex {
+    position: [f32; 3],
+    tex_coords: [f32; 2],
+    normal: [f32; 3],
+    tangent: [f32; 3],
+    bitangent: [f32; 3],
+}

This structure works perfectly fine when used as a vertex buffer. Using it as a storage buffer proved less convenient. My previous code used a GLSL struct similar to my ModelVertex.

shader
struct ModelVertex {
+    vec3 position;
+    vec2 tex_coords;
+    vec3 normal;
+    vec3 tangent;
+    vec3 bitangent;
+};

At first glance, this seems just fine, but OpenGL experts would likely see a problem with the structure. Our fields aren't aligned properly to support the std430 alignment that storage buffers require. I won't get into detail but you can check out the alignment showcase if you want to know more. To summarize, the vec2 for the tex_coords was messing up the byte alignment, corrupting the vertex data resulting in the following:

./corruption.png

I could have fixed this by adding a padding field after tex_coords on the Rust side, but that would require modifying the VertexBufferLayout. I ended up solving this problem by using the components of the vectors directly which resulted in a struct like this:

shader
struct ModelVertex {
+    float x; float y; float z;
+    float uv; float uw;
+    float nx; float ny; float nz;
+    float tx; float ty; float tz;
+    float bx; float by; float bz;
+};

Since std430 will use the alignment of the largest element of the struct, using all floats means the struct will be aligned to 4 bytes. This is alignment matches what ModelVertex uses in Rust. This was kind of a pain to work with, but it fixed the corruption issue.

The second problem required me to rethink how I was computing the tangent and bitangent. The previous algorithm I was using only computed the tangent and bitangent for each triangle and set all the vertices in that triangle to use the same tangent and bitangent. While this is fine in a single-threaded context, the code breaks down when trying to compute the triangles in parallel. The reason is that multiple triangles can share the same vertices. This means that when we go to save the resulting tangents, we inevitably end up trying to write to the same vertex from multiple different threads which is a big no no. You can see the issue with this method below:

./black_triangles.png

Those black triangles were the result of multiple GPU threads trying to modify the same vertices. Looking at the data in Render Doc I could see that the tangents and bitangents were garbage numbers such as NaN.

./render_doc_output.png

While on the CPU we could introduce a synchronization primitive such as a Mutex to fix this issue, AFAIK there isn't really such a thing on the GPU. Instead, I decided to swap my code to work with each vertex individually. There are some hurdles with that, but those will be easier to explain in code. Let's start with the main function.

shader
void main() {
+    uint vertexIndex = gl_GlobalInvocationID.x;
+    ModelVertex result = calcTangentBitangent(vertexIndex);
+    dstVertices[vertexIndex] = result;
+}

We use the gl_GlobalInvocationID.x to get the index of the vertex we want to compute the tangents for. I opted to put the actual calculation into its own method. Let's take a look at that.

shader
ModelVertex calcTangentBitangent(uint vertexIndex) {
+    ModelVertex v = srcVertices[vertexIndex];
+
+    vec3 tangent = vec3(0);
+    vec3 bitangent = vec3(0);
+    uint trianglesIncluded = 0;
+
+    // Find the triangles that use v
+    //  * Loop over every triangle (i + 3)
+    for (uint i = 0; i < numIndices; i += 3) {
+        uint index0 = indices[i];
+        uint index1 = indices[i+1];
+        uint index2 = indices[i+2];
+
+        // Only perform the calculation if one of the indices
+        // matches our vertexIndex
+        if (index0 == vertexIndex || index1 == vertexIndex || index2 == vertexIndex) {
+            ModelVertex v0 = srcVertices[index0];
+            ModelVertex v1 = srcVertices[index1];
+            ModelVertex v2 = srcVertices[index2];
+
+            vec3 pos0 = getPos(v0);
+            vec3 pos1 = getPos(v1);
+            vec3 pos2 = getPos(v2);
+
+            vec2 uv0 = getUV(v0);
+            vec2 uv1 = getUV(v1);
+            vec2 uv2 = getUV(v2);
+
+            vec3 delta_pos1 = pos1 - pos0;
+            vec3 delta_pos2 = pos2 - pos0;
+
+            vec2 delta_uv1 = uv1 - uv0;
+            vec2 delta_uv2 = uv2 - uv0;
+
+            float r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
+            tangent += (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
+            bitangent += (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r; 
+            trianglesIncluded += 1;
+        }
+        
+    }
+
+    // Average the tangent and bitangents
+    if (trianglesIncluded > 0) {
+        tangent /= trianglesIncluded;
+        bitangent /= trianglesIncluded;
+        tangent = normalize(tangent);
+        bitangent = normalize(bitangent);
+    }
+
+    // Save the results
+    v.tx = tangent.x;
+    v.ty = tangent.y;
+    v.tz = tangent.z;
+    v.bx = bitangent.x;
+    v.by = bitangent.y;
+    v.bz = bitangent.z;
+
+    return v;
+}

Possible Improvements

Looping over every triangle for every vertex is likely raising some red flags for some of you. In a single-threaded context, this algorithm would end up being O(N*M). As we are utilizing the high number of threads available to our GPU, this is less of an issue, but it still means our GPU is burning more cycles than it needs to.

One way I came up with to possibly improve performance is to store the index of each triangle in a hash map like structure with the vertex index as keys. Here's some pseudo code:

rust
for t in 0..indices.len() / 3 {
+    triangle_map[indices[t * 3]].push(t);
+    triangle_map.push((indices[t * 3 + 1], t);
+    triangle_map.push((indices[t * 3 + 2], t);
+}

We'd then need to flatten this structure to pass it to the GPU. We'd also need a second array to index the first.

rust
for (i, (_v, t_list)) in triangle_map.iter().enumerate() {
+    triangle_map_indices.push(TriangleMapIndex { 
+        start: i,
+        len: t_list.len(),
+    });
+    flat_triangle_map.extend(t_list);
+}

I ultimately decided against this method as it was more complicated, and I haven't had time to benchmark it to see if it's faster than the simple method.

Results

The tangents and bitangents are now getting calculated correctly and on the GPU!

./results.png

',28);function g(c,o,y,u,b,m){const s=a("AutoGithubLink");return n(),e("div",null,[d,l(s)])}const v=i(E,[["render",g]]);export{C as __pageData,v as default}; diff --git a/assets/showcase_compute_index.md.toRpHUeP.lean.js b/assets/showcase_compute_index.md.toRpHUeP.lean.js new file mode 100644 index 000000000..abbdee191 --- /dev/null +++ b/assets/showcase_compute_index.md.toRpHUeP.lean.js @@ -0,0 +1 @@ +import{_ as i,D as a,o as n,c as e,I as l,R as t}from"./chunks/framework.bMtwhlie.js";const p="/learn-wgpu-zh/assets/corruption.i47wUppK.png",h="/learn-wgpu-zh/assets/black_triangles.n_V-Q8yK.png",k="/learn-wgpu-zh/assets/render_doc_output.aiG7KCiC.png",r="/learn-wgpu-zh/assets/results.XwEj8aho.png",C=JSON.parse('{"title":"Compute Example: Tangents and Bitangents","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/compute/index.md","filePath":"showcase/compute/index.md","lastUpdated":1703303099000}'),E={name:"showcase/compute/index.md"},d=t("",28);function g(c,o,y,u,b,m){const s=a("AutoGithubLink");return n(),e("div",null,[d,l(s)])}const v=i(E,[["render",g]]);export{C as __pageData,v as default}; diff --git a/assets/showcase_gifs_index.md.7LiwhyX_.js b/assets/showcase_gifs_index.md.7LiwhyX_.js new file mode 100644 index 000000000..671f9a0a6 --- /dev/null +++ b/assets/showcase_gifs_index.md.7LiwhyX_.js @@ -0,0 +1,124 @@ +import{_ as i,D as a,o as n,c as p,I as h,R as k}from"./chunks/framework.bMtwhlie.js";const l="/learn-wgpu-zh/assets/output.U2CgAQ0R.gif",u=JSON.parse('{"title":"生成 GIF 动图","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/gifs/index.md","filePath":"showcase/gifs/index.md","lastUpdated":1703303099000}'),t={name:"showcase/gifs/index.md"},e=k(`

生成 GIF 动图

假如你想要展示一个自己实现的,漂亮的 WebGPU 模拟动画,当然可以录制一个视频,但如果只是想在微博或朋友圈以九宫格来展示呢?

这,就是 GIF 的用武之地。

另外,GIF 的发音是 GHIF,而不是 JIF,因为 JIF 不仅是花生酱,它也是一种不同的图像格式

如何制作 GIF?

我们使用 gif 包创建一个函数来对渲染的图像进行编码:

rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
+    use gif::{Frame, Encoder, Repeat, SetParameter};
+
+    let mut image = std::fs::File::create(path)?;
+    let mut encoder = Encoder::new(&mut image, size, size, &[])?;
+    encoder.set(Repeat::Infinite)?;
+
+    for mut frame in frames {
+        encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
+    }
+
+    Ok(())
+}

上面的函数所需要的参数是 GIF 的帧数,它应该运行多快,以及 GIF 的大小。

如何生成帧数据?

如果看过离屏渲染案例,你就知道我们可以直接渲染到一个纹理。我们将创建一个用于渲染的纹理和一个用于复制纹理的纹素数据的缓冲区

rust
// 创建一个用于渲染的纹理
+let texture_size = 256u32;
+let rt_desc = wgpu::TextureDescriptor {
+    size: wgpu::Extent3d {
+        width: texture_size,
+        height: texture_size,
+        depth_or_array_layers: 1,
+    },
+    mip_level_count: 1,
+    sample_count: 1,
+    dimension: wgpu::TextureDimension::D2,
+    format: wgpu::TextureFormat::Rgba8UnormSrgb,
+    usage: wgpu::TextureUsages::COPY_SRC
+        | wgpu::TextureUsages::RENDER_ATTACHMENT,
+    label: None,
+    view_formats: &[],
+};
+let render_target = framework::Texture::from_descriptor(&device, rt_desc);
+
+// wgpu 需要使用 wgpu::COPY_BYTES_PER_ROW_ALIGNMENT 对齐纹理 -> 缓冲区的复制
+// 因此,我们需要同时保存 padded_bytes_per_row 和 unpadded_bytes_per_row
+let pixel_size = mem::size_of::<[u8;4]>() as u32;
+let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
+let unpadded_bytes_per_row = pixel_size * texture_size;
+let padding = (align - unpadded_bytes_per_row % align) % align;
+let padded_bytes_per_row = unpadded_bytes_per_row + padding;
+
+// 创建一个用于复制纹素数据的缓冲区
+let buffer_size = (padded_bytes_per_row * texture_size) as wgpu::BufferAddress;
+let buffer_desc = wgpu::BufferDescriptor {
+    size: buffer_size,
+    usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
+    label: Some("Output Buffer"),
+    mapped_at_creation: false,
+};
+let output_buffer = device.create_buffer(&buffer_desc);

现在,我们可以渲染一帧了,然后把这个帧缓冲区数据(也就是我们上面创建的纹理的纹素数据)复制到一个 Vec<u8> 数组。

rust
let mut frames = Vec::new();
+
+for c in &colors {
+    let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+        label: None,
+    });
+
+    let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+        label: Some("GIF Pass"),
+        color_attachments: &[
+            wgpu::RenderPassColorAttachment {
+                view: &render_target.view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(
+                        wgpu::Color {
+                            r: c[0],
+                            g: c[1],
+                            b: c[2],
+                            a: 1.0,
+                        }
+                    ),
+                    store: wgpu::StoreOp::Store
+                },
+            }
+        ],
+        ..Default::default()
+    });
+
+    rpass.set_pipeline(&render_pipeline);
+    rpass.draw(0..3, 0..1);
+
+    drop(rpass);
+
+    encoder.copy_texture_to_buffer(
+        wgpu::ImageCopyTexture {
+            texture: &render_target.texture,
+            mip_level: 0,
+            origin: wgpu::Origin3d::ZERO,
+        },
+        wgpu::ImageCopyBuffer {
+            buffer: &output_buffer,
+            layout: wgpu::ImageDataLayout {
+                offset: 0,
+                bytes_per_row: padded_bytes_per_row,
+                rows_per_image: texture_size,
+            }
+        },
+        render_target.desc.size
+    );
+
+    queue.submit(std::iter::once(encoder.finish()));
+
+    // 创建一个缓冲区数据异步映射
+    let buffer_slice = output_buffer.slice(..);
+    let request = buffer_slice.map_async(wgpu::MapMode::Read);
+    // 等待 GPU 完成上面的任务
+    device.poll(wgpu::Maintain::Wait);
+    let result = request.await;
+
+    match result {
+        Ok(()) => {
+            let padded_data = buffer_slice.get_mapped_range();
+            let data = padded_data
+                .chunks(padded_bytes_per_row as _)
+                .map(|chunk| { &chunk[..unpadded_bytes_per_row as _]})
+                .flatten()
+                .map(|x| { *x })
+                .collect::<Vec<_>>();
+            drop(padded_data);
+            output_buffer.unmap();
+            frames.push(data);
+        }
+        _ => { eprintln!("Something went wrong") }
+    }
+
+}

完成后,就可以将我们的帧数据传递给 save_gif() 函数了:

rust
save_gif("output.gif", &mut frames, 1, texture_size as u16).unwrap();

我们还可以使用纹理数组来做优化,并一次发送所有绘制命令。 但上面的简单程序就是生成 GIF 动图的全部要点了,运行示例代码将得到以下 GIF 图:

./output.gif

',17);function r(E,d,g,y,F,c){const s=a("AutoGithubLink");return n(),p("div",null,[e,h(s)])}const A=i(t,[["render",r]]);export{u as __pageData,A as default}; diff --git a/assets/showcase_gifs_index.md.7LiwhyX_.lean.js b/assets/showcase_gifs_index.md.7LiwhyX_.lean.js new file mode 100644 index 000000000..7dba9369e --- /dev/null +++ b/assets/showcase_gifs_index.md.7LiwhyX_.lean.js @@ -0,0 +1 @@ +import{_ as i,D as a,o as n,c as p,I as h,R as k}from"./chunks/framework.bMtwhlie.js";const l="/learn-wgpu-zh/assets/output.U2CgAQ0R.gif",u=JSON.parse('{"title":"生成 GIF 动图","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/gifs/index.md","filePath":"showcase/gifs/index.md","lastUpdated":1703303099000}'),t={name:"showcase/gifs/index.md"},e=k("",17);function r(E,d,g,y,F,c){const s=a("AutoGithubLink");return n(),p("div",null,[e,h(s)])}const A=i(t,[["render",r]]);export{u as __pageData,A as default}; diff --git a/assets/showcase_gifs_index.md.af0d950b.js b/assets/showcase_gifs_index.md.af0d950b.js deleted file mode 100644 index 1c6d68ffd..000000000 --- a/assets/showcase_gifs_index.md.af0d950b.js +++ /dev/null @@ -1,124 +0,0 @@ -import{_ as n,E as a,o as p,c as l,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const r="/learn-wgpu-zh/assets/output.d9531c62.gif",m=JSON.parse('{"title":"生成 GIF 动图","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/gifs/index.md","filePath":"showcase/gifs/index.md","lastUpdated":1701933923000}'),c={name:"showcase/gifs/index.md"},t=e(`

生成 GIF 动图

假如你想要展示一个自己实现的,漂亮的 WebGPU 模拟动画,当然可以录制一个视频,但如果只是想在微博或朋友圈以九宫格来展示呢?

这,就是 GIF 的用武之地。

另外,GIF 的发音是 GHIF,而不是 JIF,因为 JIF 不仅是花生酱,它也是一种不同的图像格式

如何制作 GIF?

我们使用 gif 包创建一个函数来对渲染的图像进行编码:

rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
-    use gif::{Frame, Encoder, Repeat, SetParameter};
-
-    let mut image = std::fs::File::create(path)?;
-    let mut encoder = Encoder::new(&mut image, size, size, &[])?;
-    encoder.set(Repeat::Infinite)?;
-
-    for mut frame in frames {
-        encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
-    }
-
-    Ok(())
-}

上面的函数所需要的参数是 GIF 的帧数,它应该运行多快,以及 GIF 的大小。

如何生成帧数据?

如果看过离屏渲染案例,你就知道我们可以直接渲染到一个纹理。我们将创建一个用于渲染的纹理和一个用于复制纹理的纹素数据的缓冲区

rust
// 创建一个用于渲染的纹理
-let texture_size = 256u32;
-let rt_desc = wgpu::TextureDescriptor {
-    size: wgpu::Extent3d {
-        width: texture_size,
-        height: texture_size,
-        depth_or_array_layers: 1,
-    },
-    mip_level_count: 1,
-    sample_count: 1,
-    dimension: wgpu::TextureDimension::D2,
-    format: wgpu::TextureFormat::Rgba8UnormSrgb,
-    usage: wgpu::TextureUsages::COPY_SRC
-        | wgpu::TextureUsages::RENDER_ATTACHMENT,
-    label: None,
-    view_formats: &[],
-};
-let render_target = framework::Texture::from_descriptor(&device, rt_desc);
-
-// wgpu 需要使用 wgpu::COPY_BYTES_PER_ROW_ALIGNMENT 对齐纹理 -> 缓冲区的复制
-// 因此,我们需要同时保存 padded_bytes_per_row 和 unpadded_bytes_per_row
-let pixel_size = mem::size_of::<[u8;4]>() as u32;
-let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
-let unpadded_bytes_per_row = pixel_size * texture_size;
-let padding = (align - unpadded_bytes_per_row % align) % align;
-let padded_bytes_per_row = unpadded_bytes_per_row + padding;
-
-// 创建一个用于复制纹素数据的缓冲区
-let buffer_size = (padded_bytes_per_row * texture_size) as wgpu::BufferAddress;
-let buffer_desc = wgpu::BufferDescriptor {
-    size: buffer_size,
-    usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
-    label: Some("Output Buffer"),
-    mapped_at_creation: false,
-};
-let output_buffer = device.create_buffer(&buffer_desc);

现在,我们可以渲染一帧了,然后把这个帧缓冲区数据(也就是我们上面创建的纹理的纹素数据)复制到一个 Vec<u8> 数组。

rust
let mut frames = Vec::new();
-
-for c in &colors {
-    let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-        label: None,
-    });
-
-    let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-        label: Some("GIF Pass"),
-        color_attachments: &[
-            wgpu::RenderPassColorAttachment {
-                view: &render_target.view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(
-                        wgpu::Color {
-                            r: c[0],
-                            g: c[1],
-                            b: c[2],
-                            a: 1.0,
-                        }
-                    ),
-                    store: wgpu::StoreOp::Store
-                },
-            }
-        ],
-        ..Default::default()
-    });
-
-    rpass.set_pipeline(&render_pipeline);
-    rpass.draw(0..3, 0..1);
-
-    drop(rpass);
-
-    encoder.copy_texture_to_buffer(
-        wgpu::ImageCopyTexture {
-            texture: &render_target.texture,
-            mip_level: 0,
-            origin: wgpu::Origin3d::ZERO,
-        },
-        wgpu::ImageCopyBuffer {
-            buffer: &output_buffer,
-            layout: wgpu::ImageDataLayout {
-                offset: 0,
-                bytes_per_row: padded_bytes_per_row,
-                rows_per_image: texture_size,
-            }
-        },
-        render_target.desc.size
-    );
-
-    queue.submit(std::iter::once(encoder.finish()));
-
-    // 创建一个缓冲区数据异步映射
-    let buffer_slice = output_buffer.slice(..);
-    let request = buffer_slice.map_async(wgpu::MapMode::Read);
-    // 等待 GPU 完成上面的任务
-    device.poll(wgpu::Maintain::Wait);
-    let result = request.await;
-
-    match result {
-        Ok(()) => {
-            let padded_data = buffer_slice.get_mapped_range();
-            let data = padded_data
-                .chunks(padded_bytes_per_row as _)
-                .map(|chunk| { &chunk[..unpadded_bytes_per_row as _]})
-                .flatten()
-                .map(|x| { *x })
-                .collect::<Vec<_>>();
-            drop(padded_data);
-            output_buffer.unmap();
-            frames.push(data);
-        }
-        _ => { eprintln!("Something went wrong") }
-    }
-
-}

完成后,就可以将我们的帧数据传递给 save_gif() 函数了:

rust
save_gif("output.gif", &mut frames, 1, texture_size as u16).unwrap();

我们还可以使用纹理数组来做优化,并一次发送所有绘制命令。 但上面的简单程序就是生成 GIF 动图的全部要点了,运行示例代码将得到以下 GIF 图:

./output.gif

',17);function F(D,y,C,A,i,b){const s=a("AutoGithubLink");return p(),l("div",null,[t,o(s)])}const B=n(c,[["render",F]]);export{m as __pageData,B as default}; diff --git a/assets/showcase_gifs_index.md.af0d950b.lean.js b/assets/showcase_gifs_index.md.af0d950b.lean.js deleted file mode 100644 index 853d5925f..000000000 --- a/assets/showcase_gifs_index.md.af0d950b.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as n,E as a,o as p,c as l,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const r="/learn-wgpu-zh/assets/output.d9531c62.gif",m=JSON.parse('{"title":"生成 GIF 动图","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/gifs/index.md","filePath":"showcase/gifs/index.md","lastUpdated":1701933923000}'),c={name:"showcase/gifs/index.md"},t=e("",17);function F(D,y,C,A,i,b){const s=a("AutoGithubLink");return p(),l("div",null,[t,o(s)])}const B=n(c,[["render",F]]);export{m as __pageData,B as default}; diff --git a/assets/showcase_pong_index.md.1eac932a.js b/assets/showcase_pong_index.md.1eac932a.js deleted file mode 100644 index 402304f8f..000000000 --- a/assets/showcase_pong_index.md.1eac932a.js +++ /dev/null @@ -1,260 +0,0 @@ -import{_ as n,E as a,o as l,c as p,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/pong.9a7ca7b9.png",m=JSON.parse('{"title":"Pong","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/pong/index.md","filePath":"showcase/pong/index.md","lastUpdated":1701933923000}'),r={name:"showcase/pong/index.md"},c=e('

Pong

Practically the "Hello World!" of games. Pong has been remade thousands of times. I know Pong. You know Pong. We all know Pong. That being said, this time I wanted to put in a little more effort than most people do. This showcase has a basic menu system, sounds, and different game states.

The architecture is not the best as I prescribed to the "get things done" mentality. If I were to redo this project, I'd change a lot of things. Regardless, let's get into the postmortem.

The Architecture

I was messing around with separating state from the render code. It ended up similar to an Entity Component System model.

I had a State class with all of the objects in the scene. This included the ball and the paddles, as well as the text for the scores and even the menu. State also included a game_state field of type GameState.

rust
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
-pub enum GameState {
-    MainMenu,
-    Serving,
-    Playing,
-    GameOver,
-    Quiting,
-}

The State class didn't have any methods on it as I was taking a more data-oriented approach. Instead, I created a System trait and created multiple structs that implemented it.

rust
pub trait System {
-    #[allow(unused_variables)]
-    fn start(&mut self, state: &mut state::State) {}
-    fn update_state(
-        &self,
-        input: &input::Input,
-        state: &mut state::State,
-        events: &mut Vec<state::Event>,
-    );
-}

The systems would be in charge of controlling updating the different objects' states (position, visibility, etc), as well as updating the game_state field. I created all the systems on startup and used a match on game_state to determine which ones should be allowed to run (the visiblity_system always runs as it is always needed).

rust
visiblity_system.update_state(&input, &mut state, &mut events);
-match state.game_state {
-    state::GameState::MainMenu => {
-        menu_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::Serving {
-            serving_system.start(&mut state);
-        }
-    },
-    state::GameState::Serving => {
-        serving_system.update_state(&input, &mut state, &mut events);
-        play_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::Playing {
-            play_system.start(&mut state);
-        }
-    },
-    state::GameState::Playing => {
-        ball_system.update_state(&input, &mut state, &mut events);
-        play_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::Serving {
-            serving_system.start(&mut state);
-        } else if state.game_state == state::GameState::GameOver {
-            game_over_system.start(&mut state);
-        }
-    },
-    state::GameState::GameOver => {
-        game_over_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::MainMenu {
-            menu_system.start(&mut state);
-        }
-    },
-    state::GameState::Quiting => {},
-}

It's definitely not the cleanest code, but it works.

I ended up having 6 systems in total.

  1. I added the VisibilitySystem near the end of development. Up to that point, all the systems had to set the visible field of the objects. That was a pain and cluttered the logic. Instead, I decided to create the VisiblitySystem to handle that.

  2. The MenuSystem handled controlling what text was focused, and what would happen when the user pressed the enter key. If the Play button was focused, pressing enter would change game_state to GameState::Serving which would start the game. The Quit button would shift to GameState::Quiting.

  3. The ServingSystem sets the ball's position to (0.0, 0.0), updates the score texts, and shifts into GameState::Playing after a timer.

  4. The PlaySystem controls the players. It allows them to move and keeps them from leaving the play space. This system runs on both GameState::Playing as well as GameState::Serving. I did this to allow the players to reposition themselves before the serve. The PlaySystem also will shift into GameState::GameOver when one of the players' scores is greater than 2.

  5. The BallSystem system controls the ball's movement as well as its bouncing of walls/players. It also updates the score and shifts to GameState::Serving when the ball goes off the side of the screen.

  6. The GameOver system updates the win_text and shifts to GameState::MainMenu after a delay.

I found the system approach quite nice to work with. My implementation wasn't the best, but I would like to work with it again. I might even implement my own ECS.

Input

The System trait, originally had a process_input method. This became a problem when I was implementing allowing players to move between serves. The players would get stuck when the game_state switched from Serving to Playing as the inputs were getting stuck. I only called process_input on systems that were currently in use. Changing that would be finicky, so I decided to move all the input code into its own struct.

rust
use winit::event::{VirtualKeyCode, ElementState};
-
-#[derive(Debug, Default)]
-pub struct Input {
-    pub p1_up_pressed: bool,
-    pub p1_down_pressed: bool,
-    pub p2_up_pressed: bool,
-    pub p2_down_pressed: bool,
-    pub enter_pressed: bool,
-}
-
-impl Input {
-    pub fn new() -> Self {
-        Default::default()
-    }
-
-    pub fn update(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
-        let pressed = state == ElementState::Pressed;
-        match key {
-            VirtualKeyCode::Up => {
-                self.p2_up_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::Down => {
-                self.p2_down_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::W => {
-                self.p1_up_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::S => {
-                self.p1_down_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::Return => {
-                self.enter_pressed = pressed;
-                true
-            }
-            _ => false
-        }
-    }
-
-    pub fn ui_up_pressed(&self) -> bool {
-        self.p1_up_pressed || self.p2_up_pressed
-    }
-
-    pub fn ui_down_pressed(&self) -> bool {
-        self.p1_down_pressed || self.p2_down_pressed
-    }
-}

This works really well. I simply pass this struct into the update_state method.

Render

I used wgpu_glyph for the text and white quads for the ball and paddles. There's not much to say here, it's Pong after all.

I did mess around with batching, however. It was totally overkill for this project, but it was a good learning experience. Here's the code if you're interested.

rust
pub struct QuadBufferBuilder {
-    vertex_data: Vec<Vertex>,
-    index_data: Vec<u32>,
-    current_quad: u32,
-}
-
-impl QuadBufferBuilder {
-    pub fn new() -> Self {
-        Self {
-            vertex_data: Vec::new(),
-            index_data: Vec::new(),
-            current_quad: 0,
-        }
-    }
-
-    pub fn push_ball(self, ball: &state::Ball) -> Self {
-        if ball.visible {
-            let min_x = ball.position.x - ball.radius;
-            let min_y = ball.position.y - ball.radius;
-            let max_x = ball.position.x + ball.radius;
-            let max_y = ball.position.y + ball.radius;
-
-            self.push_quad(min_x, min_y, max_x, max_y)
-        } else {
-            self
-        }
-    }
-
-    pub fn push_player(self, player: &state::Player) -> Self {
-        if player.visible {
-            self.push_quad(
-                player.position.x - player.size.x * 0.5,
-                player.position.y - player.size.y * 0.5,
-                player.position.x + player.size.x * 0.5,
-                player.position.y + player.size.y * 0.5,
-            )
-        } else {
-            self
-        }
-    }
-
-    pub fn push_quad(mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) -> Self {
-        self.vertex_data.extend(&[
-            Vertex {
-                position: (min_x, min_y).into(),
-            },
-            Vertex {
-                position: (max_x, min_y).into(),
-            },
-            Vertex {
-                position: (max_x, max_y).into(),
-            },
-            Vertex {
-                position: (min_x, max_y).into(),
-            },
-        ]);
-        self.index_data.extend(&[
-            self.current_quad * 4 + 0,
-            self.current_quad * 4 + 1,
-            self.current_quad * 4 + 2,
-            self.current_quad * 4 + 0,
-            self.current_quad * 4 + 2,
-            self.current_quad * 4 + 3,
-        ]);
-        self.current_quad += 1;
-        self
-    }
-
-    pub fn build(self, device: &wgpu::Device) -> (StagingBuffer, StagingBuffer, u32) {
-        (
-            StagingBuffer::new(device, &self.vertex_data),
-            StagingBuffer::new(device, &self.index_data),
-            self.index_data.len() as u32,
-        )
-    }
-}

Sound

I used rodio for sound. I created a SoundPack class to store the sounds. Deciding how to get the sounds to play took some thinking. I chose to pass in a Vec<state::Event> into the update_state method. The system would then push an event to the Vec. The Event enum is listed below.

rust
#[derive(Debug, Copy, Clone)]
-pub enum Event {
-    ButtonPressed,
-    FocusChanged,
-    BallBounce(cgmath::Vector2<f32>),
-    Score(u32),
-}

I was going to have BallBounce play a positioned sound using a SpatialSink, but I was getting clipping issues, and I wanted to be done with the project. Aside from that, the events system worked nicely.

WASM Support

This example works on the web, but there are a few steps that I needed to take to make things work. The first one was that I needed to switch to using a lib.rs instead of just main.rs. I opted to use wasm-pack to create the web assembly. I could have kept the old format by using wasm-bindgen directly, but I ran into issues with using the wrong version of wasm-bindgen, so I elected to stick with wasm-pack.

In order for wasm-pack to work properly I first needed to add some dependencies:

toml
[dependencies]
-cfg-if = "1"
-env_logger = "0.10"
-winit = "0.28.7"
-anyhow = "1.0"
-bytemuck = { version = "1.14", features = [ "derive" ] }
-cgmath = "0.18"
-pollster = "0.3"
-wgpu = { version = "0.16", features = ["spirv"]}
-wgpu_glyph = "0.17"
-rand = "0.8"
-rodio = { version = "0.15", default-features = false, features = ["wav"] }
-log = "0.4"
-instant = "0.1"
-
-[target.'cfg(target_arch = "wasm32")'.dependencies]
-console_error_panic_hook = "0.1.6"
-console_log = "1.0"
-getrandom = { version = "0.2", features = ["js"] }
-rodio = { version = "0.15", default-features = false, features = ["wasm-bindgen", "wav"] }
-wasm-bindgen-futures = "0.4.20"
-wasm-bindgen = "0.2.87"
-web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-]}
-wgpu = { version = "0.16", features = ["spirv", "webgl"]}
-
-[build-dependencies]
-anyhow = "1.0"
-fs_extra = "1.3"
-glob = "0.3"
-rayon = "1.8"
-naga = { version = "0.11", features = ["glsl-in", "spv-out", "wgsl-out"]}

I'll highlight a few of these:

  • rand: If you want to use rand on the web, you need to include getrandom directly and enable its js feature.
  • rodio: I had to disable all of the features for the WASM build, and then enabled them separately. The mp3 feature specifically wasn't working for me. There might have been a workaround, but since I'm not using mp3 in this example I just elected to only use wav.
  • instant: This crate is basically just a wrapper around std::time::Instant. In a normal build, it's just a type alias. In web builds it uses the browser's time functions.
  • cfg-if: This is a convenient crate for making platform-specific code less horrible to write.
  • env_logger and console_log: env_logger doesn't work on web assembly so we need to use a different logger. console_log is the one used in the web assembly tutorials, so I went with that one.
  • wasm-bindgen: This crate is the glue that makes Rust code work on the web. If you are building using the wasm-bindgen command you need to make sure that the command version of wasm-bindgen matches the version in Cargo.toml exactly otherwise you'll have problems. If you use wasm-pack it will download the appropriate wasm-bindgen binary to use for your crate.
  • web-sys: This has functions and types that allow you to use different methods available in js such as "getElementById()".

Now that that's out of the way let's talk about some code. First, we need to create a function that will start our event loop.

rust
#[cfg(target_arch="wasm32")]
-use wasm_bindgen::prelude::*;
-
-#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
-pub fn start() {
-    // Snipped...
-}

The wasm_bindgen(start) tell's wasm-bindgen that this function should be started as soon as the web assembly module is loaded by javascript. Most of the code inside this function is the same as what you'd find in other examples on this site, but there is some specific stuff we need to do on the web.

rust
cfg_if::cfg_if! {
-    if #[cfg(target_arch = "wasm32")] {
-        console_log::init_with_level(log::Level::Warn).expect("Could't initialize logger");
-        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
-    } else {
-        env_logger::init();
-    }
-}

This code should run before you try to do anything significant. It sets up the logger based on what architecture you're building for. Most architectures will use env_logger. The wasm32 architecture will use console_log. It's also important that we tell Rust to forward panics to javascript. If we didn't do this we would have no idea when our Rust code panics.

Next, we create a window. Much of it is like we've done before, but since we are supporting fullscreen we need to do some extra steps.

rust
let event_loop = EventLoop::new();
-let monitor = event_loop.primary_monitor().unwrap();
-let video_mode = monitor.video_modes().next();
-let size = video_mode.clone().map_or(PhysicalSize::new(800, 600), |vm| vm.size());
-let window = WindowBuilder::new()
-    .with_visible(false)
-    .with_title("Pong")
-    .with_fullscreen(video_mode.map(|vm| Fullscreen::Exclusive(vm)))
-    .build(&event_loop)
-    .unwrap();
-
-// WASM builds don't have access to monitor information, so
-// we should specify a fallback resolution
-if window.fullscreen().is_none() {
-    window.set_inner_size(PhysicalSize::new(512, 512));
-}

We then have to do some web-specific stuff if we are on that platform.

rust
#[cfg(target_arch = "wasm32")]
-{
-    use winit::platform::web::WindowExtWebSys;
-    web_sys::window()
-        .and_then(|win| win.document())
-        .and_then(|doc| {
-            let dst = doc.get_element_by_id("wasm-example")?;
-            let canvas = web_sys::Element::from(window.canvas());
-            dst.append_child(&canvas).ok()?;
-
-            // Request fullscreen, if denied, continue as normal
-            match canvas.request_fullscreen() {
-                Ok(_) => {},
-                Err(_) => ()
-            }
-
-            Some(())
-        })
-        .expect("Couldn't append canvas to document body.");
-}

Everything else works the same.

Summary

A fun project to work on. It was overly architected, and kinda hard to make changes, but a good experience nonetheless.

Try the code down below! (Controls currently require a keyboard.)

`,47);function D(F,y,C,A,i,u){const s=a("WasmExample");return l(),p("div",null,[c,o(s,{example:"pong"})])}const d=n(r,[["render",D]]);export{m as __pageData,d as default}; diff --git a/assets/showcase_pong_index.md.1eac932a.lean.js b/assets/showcase_pong_index.md.1eac932a.lean.js deleted file mode 100644 index f18ab8238..000000000 --- a/assets/showcase_pong_index.md.1eac932a.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as n,E as a,o as l,c as p,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const t="/learn-wgpu-zh/assets/pong.9a7ca7b9.png",m=JSON.parse('{"title":"Pong","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/pong/index.md","filePath":"showcase/pong/index.md","lastUpdated":1701933923000}'),r={name:"showcase/pong/index.md"},c=e("",47);function D(F,y,C,A,i,u){const s=a("WasmExample");return l(),p("div",null,[c,o(s,{example:"pong"})])}const d=n(r,[["render",D]]);export{m as __pageData,d as default}; diff --git a/assets/showcase_pong_index.md.oLV3LpsH.js b/assets/showcase_pong_index.md.oLV3LpsH.js new file mode 100644 index 000000000..29edfa1d8 --- /dev/null +++ b/assets/showcase_pong_index.md.oLV3LpsH.js @@ -0,0 +1,260 @@ +import{_ as i,D as a,o as n,c as l,I as h,R as p}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/pong.ct65uIIn.png",u=JSON.parse('{"title":"Pong","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/pong/index.md","filePath":"showcase/pong/index.md","lastUpdated":1703303099000}'),k={name:"showcase/pong/index.md"},e=p('

Pong

Practically the "Hello World!" of games. Pong has been remade thousands of times. I know Pong. You know Pong. We all know Pong. That being said, this time I wanted to put in a little more effort than most people do. This showcase has a basic menu system, sounds, and different game states.

The architecture is not the best as I prescribed to the "get things done" mentality. If I were to redo this project, I'd change a lot of things. Regardless, let's get into the postmortem.

The Architecture

I was messing around with separating state from the render code. It ended up similar to an Entity Component System model.

I had a State class with all of the objects in the scene. This included the ball and the paddles, as well as the text for the scores and even the menu. State also included a game_state field of type GameState.

rust
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum GameState {
+    MainMenu,
+    Serving,
+    Playing,
+    GameOver,
+    Quiting,
+}

The State class didn't have any methods on it as I was taking a more data-oriented approach. Instead, I created a System trait and created multiple structs that implemented it.

rust
pub trait System {
+    #[allow(unused_variables)]
+    fn start(&mut self, state: &mut state::State) {}
+    fn update_state(
+        &self,
+        input: &input::Input,
+        state: &mut state::State,
+        events: &mut Vec<state::Event>,
+    );
+}

The systems would be in charge of controlling updating the different objects' states (position, visibility, etc), as well as updating the game_state field. I created all the systems on startup and used a match on game_state to determine which ones should be allowed to run (the visiblity_system always runs as it is always needed).

rust
visiblity_system.update_state(&input, &mut state, &mut events);
+match state.game_state {
+    state::GameState::MainMenu => {
+        menu_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::Serving {
+            serving_system.start(&mut state);
+        }
+    },
+    state::GameState::Serving => {
+        serving_system.update_state(&input, &mut state, &mut events);
+        play_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::Playing {
+            play_system.start(&mut state);
+        }
+    },
+    state::GameState::Playing => {
+        ball_system.update_state(&input, &mut state, &mut events);
+        play_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::Serving {
+            serving_system.start(&mut state);
+        } else if state.game_state == state::GameState::GameOver {
+            game_over_system.start(&mut state);
+        }
+    },
+    state::GameState::GameOver => {
+        game_over_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::MainMenu {
+            menu_system.start(&mut state);
+        }
+    },
+    state::GameState::Quiting => {},
+}

It's definitely not the cleanest code, but it works.

I ended up having 6 systems in total.

  1. I added the VisibilitySystem near the end of development. Up to that point, all the systems had to set the visible field of the objects. That was a pain and cluttered the logic. Instead, I decided to create the VisiblitySystem to handle that.

  2. The MenuSystem handled controlling what text was focused, and what would happen when the user pressed the enter key. If the Play button was focused, pressing enter would change game_state to GameState::Serving which would start the game. The Quit button would shift to GameState::Quiting.

  3. The ServingSystem sets the ball's position to (0.0, 0.0), updates the score texts, and shifts into GameState::Playing after a timer.

  4. The PlaySystem controls the players. It allows them to move and keeps them from leaving the play space. This system runs on both GameState::Playing as well as GameState::Serving. I did this to allow the players to reposition themselves before the serve. The PlaySystem also will shift into GameState::GameOver when one of the players' scores is greater than 2.

  5. The BallSystem system controls the ball's movement as well as its bouncing of walls/players. It also updates the score and shifts to GameState::Serving when the ball goes off the side of the screen.

  6. The GameOver system updates the win_text and shifts to GameState::MainMenu after a delay.

I found the system approach quite nice to work with. My implementation wasn't the best, but I would like to work with it again. I might even implement my own ECS.

Input

The System trait, originally had a process_input method. This became a problem when I was implementing allowing players to move between serves. The players would get stuck when the game_state switched from Serving to Playing as the inputs were getting stuck. I only called process_input on systems that were currently in use. Changing that would be finicky, so I decided to move all the input code into its own struct.

rust
use winit::event::{VirtualKeyCode, ElementState};
+
+#[derive(Debug, Default)]
+pub struct Input {
+    pub p1_up_pressed: bool,
+    pub p1_down_pressed: bool,
+    pub p2_up_pressed: bool,
+    pub p2_down_pressed: bool,
+    pub enter_pressed: bool,
+}
+
+impl Input {
+    pub fn new() -> Self {
+        Default::default()
+    }
+
+    pub fn update(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
+        let pressed = state == ElementState::Pressed;
+        match key {
+            VirtualKeyCode::Up => {
+                self.p2_up_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::Down => {
+                self.p2_down_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::W => {
+                self.p1_up_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::S => {
+                self.p1_down_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::Return => {
+                self.enter_pressed = pressed;
+                true
+            }
+            _ => false
+        }
+    }
+
+    pub fn ui_up_pressed(&self) -> bool {
+        self.p1_up_pressed || self.p2_up_pressed
+    }
+
+    pub fn ui_down_pressed(&self) -> bool {
+        self.p1_down_pressed || self.p2_down_pressed
+    }
+}

This works really well. I simply pass this struct into the update_state method.

Render

I used wgpu_glyph for the text and white quads for the ball and paddles. There's not much to say here, it's Pong after all.

I did mess around with batching, however. It was totally overkill for this project, but it was a good learning experience. Here's the code if you're interested.

rust
pub struct QuadBufferBuilder {
+    vertex_data: Vec<Vertex>,
+    index_data: Vec<u32>,
+    current_quad: u32,
+}
+
+impl QuadBufferBuilder {
+    pub fn new() -> Self {
+        Self {
+            vertex_data: Vec::new(),
+            index_data: Vec::new(),
+            current_quad: 0,
+        }
+    }
+
+    pub fn push_ball(self, ball: &state::Ball) -> Self {
+        if ball.visible {
+            let min_x = ball.position.x - ball.radius;
+            let min_y = ball.position.y - ball.radius;
+            let max_x = ball.position.x + ball.radius;
+            let max_y = ball.position.y + ball.radius;
+
+            self.push_quad(min_x, min_y, max_x, max_y)
+        } else {
+            self
+        }
+    }
+
+    pub fn push_player(self, player: &state::Player) -> Self {
+        if player.visible {
+            self.push_quad(
+                player.position.x - player.size.x * 0.5,
+                player.position.y - player.size.y * 0.5,
+                player.position.x + player.size.x * 0.5,
+                player.position.y + player.size.y * 0.5,
+            )
+        } else {
+            self
+        }
+    }
+
+    pub fn push_quad(mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) -> Self {
+        self.vertex_data.extend(&[
+            Vertex {
+                position: (min_x, min_y).into(),
+            },
+            Vertex {
+                position: (max_x, min_y).into(),
+            },
+            Vertex {
+                position: (max_x, max_y).into(),
+            },
+            Vertex {
+                position: (min_x, max_y).into(),
+            },
+        ]);
+        self.index_data.extend(&[
+            self.current_quad * 4 + 0,
+            self.current_quad * 4 + 1,
+            self.current_quad * 4 + 2,
+            self.current_quad * 4 + 0,
+            self.current_quad * 4 + 2,
+            self.current_quad * 4 + 3,
+        ]);
+        self.current_quad += 1;
+        self
+    }
+
+    pub fn build(self, device: &wgpu::Device) -> (StagingBuffer, StagingBuffer, u32) {
+        (
+            StagingBuffer::new(device, &self.vertex_data),
+            StagingBuffer::new(device, &self.index_data),
+            self.index_data.len() as u32,
+        )
+    }
+}

Sound

I used rodio for sound. I created a SoundPack class to store the sounds. Deciding how to get the sounds to play took some thinking. I chose to pass in a Vec<state::Event> into the update_state method. The system would then push an event to the Vec. The Event enum is listed below.

rust
#[derive(Debug, Copy, Clone)]
+pub enum Event {
+    ButtonPressed,
+    FocusChanged,
+    BallBounce(cgmath::Vector2<f32>),
+    Score(u32),
+}

I was going to have BallBounce play a positioned sound using a SpatialSink, but I was getting clipping issues, and I wanted to be done with the project. Aside from that, the events system worked nicely.

WASM Support

This example works on the web, but there are a few steps that I needed to take to make things work. The first one was that I needed to switch to using a lib.rs instead of just main.rs. I opted to use wasm-pack to create the web assembly. I could have kept the old format by using wasm-bindgen directly, but I ran into issues with using the wrong version of wasm-bindgen, so I elected to stick with wasm-pack.

In order for wasm-pack to work properly I first needed to add some dependencies:

toml
[dependencies]
+cfg-if = "1"
+env_logger = "0.10"
+winit = "0.28.7"
+anyhow = "1.0"
+bytemuck = { version = "1.14", features = [ "derive" ] }
+cgmath = "0.18"
+pollster = "0.3"
+wgpu = { version = "0.16", features = ["spirv"]}
+wgpu_glyph = "0.17"
+rand = "0.8"
+rodio = { version = "0.15", default-features = false, features = ["wav"] }
+log = "0.4"
+instant = "0.1"
+
+[target.'cfg(target_arch = "wasm32")'.dependencies]
+console_error_panic_hook = "0.1.6"
+console_log = "1.0"
+getrandom = { version = "0.2", features = ["js"] }
+rodio = { version = "0.15", default-features = false, features = ["wasm-bindgen", "wav"] }
+wasm-bindgen-futures = "0.4.20"
+wasm-bindgen = "0.2.87"
+web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+]}
+wgpu = { version = "0.16", features = ["spirv", "webgl"]}
+
+[build-dependencies]
+anyhow = "1.0"
+fs_extra = "1.3"
+glob = "0.3"
+rayon = "1.8"
+naga = { version = "0.11", features = ["glsl-in", "spv-out", "wgsl-out"]}

I'll highlight a few of these:

  • rand: If you want to use rand on the web, you need to include getrandom directly and enable its js feature.
  • rodio: I had to disable all of the features for the WASM build, and then enabled them separately. The mp3 feature specifically wasn't working for me. There might have been a workaround, but since I'm not using mp3 in this example I just elected to only use wav.
  • instant: This crate is basically just a wrapper around std::time::Instant. In a normal build, it's just a type alias. In web builds it uses the browser's time functions.
  • cfg-if: This is a convenient crate for making platform-specific code less horrible to write.
  • env_logger and console_log: env_logger doesn't work on web assembly so we need to use a different logger. console_log is the one used in the web assembly tutorials, so I went with that one.
  • wasm-bindgen: This crate is the glue that makes Rust code work on the web. If you are building using the wasm-bindgen command you need to make sure that the command version of wasm-bindgen matches the version in Cargo.toml exactly otherwise you'll have problems. If you use wasm-pack it will download the appropriate wasm-bindgen binary to use for your crate.
  • web-sys: This has functions and types that allow you to use different methods available in js such as "getElementById()".

Now that that's out of the way let's talk about some code. First, we need to create a function that will start our event loop.

rust
#[cfg(target_arch="wasm32")]
+use wasm_bindgen::prelude::*;
+
+#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
+pub fn start() {
+    // Snipped...
+}

The wasm_bindgen(start) tell's wasm-bindgen that this function should be started as soon as the web assembly module is loaded by javascript. Most of the code inside this function is the same as what you'd find in other examples on this site, but there is some specific stuff we need to do on the web.

rust
cfg_if::cfg_if! {
+    if #[cfg(target_arch = "wasm32")] {
+        console_log::init_with_level(log::Level::Warn).expect("Could't initialize logger");
+        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
+    } else {
+        env_logger::init();
+    }
+}

This code should run before you try to do anything significant. It sets up the logger based on what architecture you're building for. Most architectures will use env_logger. The wasm32 architecture will use console_log. It's also important that we tell Rust to forward panics to javascript. If we didn't do this we would have no idea when our Rust code panics.

Next, we create a window. Much of it is like we've done before, but since we are supporting fullscreen we need to do some extra steps.

rust
let event_loop = EventLoop::new();
+let monitor = event_loop.primary_monitor().unwrap();
+let video_mode = monitor.video_modes().next();
+let size = video_mode.clone().map_or(PhysicalSize::new(800, 600), |vm| vm.size());
+let window = WindowBuilder::new()
+    .with_visible(false)
+    .with_title("Pong")
+    .with_fullscreen(video_mode.map(|vm| Fullscreen::Exclusive(vm)))
+    .build(&event_loop)
+    .unwrap();
+
+// WASM builds don't have access to monitor information, so
+// we should specify a fallback resolution
+if window.fullscreen().is_none() {
+    window.set_inner_size(PhysicalSize::new(512, 512));
+}

We then have to do some web-specific stuff if we are on that platform.

rust
#[cfg(target_arch = "wasm32")]
+{
+    use winit::platform::web::WindowExtWebSys;
+    web_sys::window()
+        .and_then(|win| win.document())
+        .and_then(|doc| {
+            let dst = doc.get_element_by_id("wasm-example")?;
+            let canvas = web_sys::Element::from(window.canvas());
+            dst.append_child(&canvas).ok()?;
+
+            // Request fullscreen, if denied, continue as normal
+            match canvas.request_fullscreen() {
+                Ok(_) => {},
+                Err(_) => ()
+            }
+
+            Some(())
+        })
+        .expect("Couldn't append canvas to document body.");
+}

Everything else works the same.

Summary

A fun project to work on. It was overly architected, and kinda hard to make changes, but a good experience nonetheless.

Try the code down below! (Controls currently require a keyboard.)

`,47);function r(E,d,g,y,F,c){const s=a("WasmExample");return n(),l("div",null,[e,h(s,{example:"pong"})])}const b=i(k,[["render",r]]);export{u as __pageData,b as default}; diff --git a/assets/showcase_pong_index.md.oLV3LpsH.lean.js b/assets/showcase_pong_index.md.oLV3LpsH.lean.js new file mode 100644 index 000000000..e24bdcec9 --- /dev/null +++ b/assets/showcase_pong_index.md.oLV3LpsH.lean.js @@ -0,0 +1 @@ +import{_ as i,D as a,o as n,c as l,I as h,R as p}from"./chunks/framework.bMtwhlie.js";const t="/learn-wgpu-zh/assets/pong.ct65uIIn.png",u=JSON.parse('{"title":"Pong","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/pong/index.md","filePath":"showcase/pong/index.md","lastUpdated":1703303099000}'),k={name:"showcase/pong/index.md"},e=p("",47);function r(E,d,g,y,F,c){const s=a("WasmExample");return n(),l("div",null,[e,h(s,{example:"pong"})])}const b=i(k,[["render",r]]);export{u as __pageData,b as default}; diff --git a/assets/showcase_threading.md.a67a1647.js b/assets/showcase_threading.md.a67a1647.js deleted file mode 100644 index 96bffba63..000000000 --- a/assets/showcase_threading.md.a67a1647.js +++ /dev/null @@ -1,88 +0,0 @@ -import{_ as n,E as a,o as l,c as p,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const m=JSON.parse('{"title":"Multi-threading with Wgpu and Rayon","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/threading.md","filePath":"showcase/threading.md","lastUpdated":1701933923000}'),t={name:"showcase/threading.md"},r=e(`

Multi-threading with Wgpu and Rayon

This example has not been tested on WASM. Rayon has support for multi threading on WASM via wasm-bindgen-rayon, though that implementation is only currently working on Chrome-based browsers. Because of this I've elected not to make a WASM version of this tutorial at this time.

The main selling point of Vulkan, DirectX 12, Metal, and by extension Wgpu is that these APIs is that they designed from the ground up to be thread safe. Up to this point, we have been doing everything on a single thread. That's about to change.

This example is based on the code for tutorial12-camera

I won't go into what threads are in this tutorial. That is a full CS course in and of itself. All we'll be covering is using threading to make loading resources faster.

We won't go over multithreading rendering as we don't have enough different types of objects to justify that yet. This will change in a coming tutorial

Parallelizing loading models and textures

Currently, we load the materials and meshes of our model one at a time. This is a perfect opportunity for multithreading! All our changes will be in model.rs. Let's first start with the materials. We'll convert the regular for loop into a par_iter().map().

rust
// resources.rs
-
-#[cfg(not(target_arch="wasm32"))]
-use rayon::iter::IntoParallelIterator;
-
-impl Model {
-    pub fn load<P: AsRef<Path>>(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        layout: &wgpu::BindGroupLayout,
-        path: P,
-    ) -> Result<Self> {
-        // ...
-        // UPDATED!
-        let materials = obj_materials.par_iter().map(|mat| {
-            // We can also parallelize loading the textures!
-            let mut textures = [
-                (containing_folder.join(&mat.diffuse_texture), false),
-                (containing_folder.join(&mat.normal_texture), true),
-            ].par_iter().map(|(texture_path, is_normal_map)| {
-                texture::Texture::load(device, queue, texture_path, *is_normal_map)
-            }).collect::<Result<Vec<_>>>()?;
-            
-            // Pop removes from the end of the list.
-            let normal_texture = textures.pop().unwrap();
-            let diffuse_texture = textures.pop().unwrap();
-
-            Ok(Material::new(
-                device,
-                &mat.name,
-                diffuse_texture,
-                normal_texture,
-                layout,
-            ))
-        }).collect::<Result<Vec<Material>>>()?;
-        // ...
-    }
-    // ...
-}

Next, we can update the meshes to be loaded in parallel.

rust
impl Model {
-    pub fn load<P: AsRef<Path>>(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        layout: &wgpu::BindGroupLayout,
-        path: P,
-    ) -> Result<Self> {
-        // ...
-        // UPDATED!
-        let meshes = obj_models.par_iter().map(|m| {
-            let mut vertices = (0..m.mesh.positions.len() / 3).into_par_iter().map(|i| {
-                ModelVertex {
-                    position: [
-                        m.mesh.positions[i * 3],
-                        m.mesh.positions[i * 3 + 1],
-                        m.mesh.positions[i * 3 + 2],
-                    ].into(),
-                    tex_coords: [
-                        m.mesh.texcoords[i * 2], 
-                        m.mesh.texcoords[i * 2 + 1]
-                    ].into(),
-                    normal: [
-                        m.mesh.normals[i * 3],
-                        m.mesh.normals[i * 3 + 1],
-                        m.mesh.normals[i * 3 + 2],
-                    ].into(),
-                    // We'll calculate these later
-                    tangent: [0.0; 3].into(),
-                    bitangent: [0.0; 3].into(),
-                }
-            }).collect::<Vec<_>>();
-            // ...
-            let index_buffer = device.create_buffer_init(
-                &wgpu::util::BufferInitDescriptor {
-                    label: Some(&format!("{:?} Index Buffer", m.name)), // UPDATED!
-                    contents: bytemuck::cast_slice(&m.mesh.indices),
-                    usage: wgpu::BufferUsages::INDEX,
-                }
-            );
-            // ...
-            // UPDATED!
-            Ok(Mesh {
-                // ...
-            })
-        }).collect::<Result<Vec<_>>>()?;
-        // ...
-    }
-    // ...
-}

We've parallelized loading the meshes, and making the vertex array for them. Probably a bit overkill, but rayon should prevent us from using too many threads.

You'll notice that we didn't use rayon for calculating the tangent, and bitangent. I tried to get it to work, but I was having trouble finding a way to do it without multiple mutable references to vertices. I don't feel like introducing a std::sync::Mutex, so I'll leave it for now.

This is honestly a better job for a compute shader, as the model data is going to get loaded into a buffer anyway.

It's that easy!

Most of the wgpu types are Send + Sync, so we can use them in threads without much trouble. It was so easy, that I feel like this tutorial is too short! I'll just leave off with a speed comparison between the previous model loading code and the current code.

Elapsed (Original): 309.596382ms
-Elapsed (Threaded): 199.645027ms

We're not loading that many resources, so the speedup is minimal. We'll be doing more stuff with threading, but this is a good introduction.

`,16);function c(D,F,y,C,i,A){const s=a("AutoGithubLink");return l(),p("div",null,[r,o(s)])}const b=n(t,[["render",c]]);export{m as __pageData,b as default}; diff --git a/assets/showcase_threading.md.a67a1647.lean.js b/assets/showcase_threading.md.a67a1647.lean.js deleted file mode 100644 index ef7bc82d8..000000000 --- a/assets/showcase_threading.md.a67a1647.lean.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as n,E as a,o as l,c as p,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const m=JSON.parse('{"title":"Multi-threading with Wgpu and Rayon","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/threading.md","filePath":"showcase/threading.md","lastUpdated":1701933923000}'),t={name:"showcase/threading.md"},r=e("",16);function c(D,F,y,C,i,A){const s=a("AutoGithubLink");return l(),p("div",null,[r,o(s)])}const b=n(t,[["render",c]]);export{m as __pageData,b as default}; diff --git a/assets/showcase_threading.md.yFNXGQkQ.js b/assets/showcase_threading.md.yFNXGQkQ.js new file mode 100644 index 000000000..7f93207bc --- /dev/null +++ b/assets/showcase_threading.md.yFNXGQkQ.js @@ -0,0 +1,88 @@ +import{_ as i,D as a,o as n,c as h,I as l,R as p}from"./chunks/framework.bMtwhlie.js";const o=JSON.parse('{"title":"Multi-threading with Wgpu and Rayon","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/threading.md","filePath":"showcase/threading.md","lastUpdated":1703303099000}'),k={name:"showcase/threading.md"},t=p(`

Multi-threading with Wgpu and Rayon

This example has not been tested on WASM. Rayon has support for multi threading on WASM via wasm-bindgen-rayon, though that implementation is only currently working on Chrome-based browsers. Because of this I've elected not to make a WASM version of this tutorial at this time.

The main selling point of Vulkan, DirectX 12, Metal, and by extension Wgpu is that these APIs is that they designed from the ground up to be thread safe. Up to this point, we have been doing everything on a single thread. That's about to change.

This example is based on the code for tutorial12-camera

I won't go into what threads are in this tutorial. That is a full CS course in and of itself. All we'll be covering is using threading to make loading resources faster.

We won't go over multithreading rendering as we don't have enough different types of objects to justify that yet. This will change in a coming tutorial

Parallelizing loading models and textures

Currently, we load the materials and meshes of our model one at a time. This is a perfect opportunity for multithreading! All our changes will be in model.rs. Let's first start with the materials. We'll convert the regular for loop into a par_iter().map().

rust
// resources.rs
+
+#[cfg(not(target_arch="wasm32"))]
+use rayon::iter::IntoParallelIterator;
+
+impl Model {
+    pub fn load<P: AsRef<Path>>(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        layout: &wgpu::BindGroupLayout,
+        path: P,
+    ) -> Result<Self> {
+        // ...
+        // UPDATED!
+        let materials = obj_materials.par_iter().map(|mat| {
+            // We can also parallelize loading the textures!
+            let mut textures = [
+                (containing_folder.join(&mat.diffuse_texture), false),
+                (containing_folder.join(&mat.normal_texture), true),
+            ].par_iter().map(|(texture_path, is_normal_map)| {
+                texture::Texture::load(device, queue, texture_path, *is_normal_map)
+            }).collect::<Result<Vec<_>>>()?;
+            
+            // Pop removes from the end of the list.
+            let normal_texture = textures.pop().unwrap();
+            let diffuse_texture = textures.pop().unwrap();
+
+            Ok(Material::new(
+                device,
+                &mat.name,
+                diffuse_texture,
+                normal_texture,
+                layout,
+            ))
+        }).collect::<Result<Vec<Material>>>()?;
+        // ...
+    }
+    // ...
+}

Next, we can update the meshes to be loaded in parallel.

rust
impl Model {
+    pub fn load<P: AsRef<Path>>(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        layout: &wgpu::BindGroupLayout,
+        path: P,
+    ) -> Result<Self> {
+        // ...
+        // UPDATED!
+        let meshes = obj_models.par_iter().map(|m| {
+            let mut vertices = (0..m.mesh.positions.len() / 3).into_par_iter().map(|i| {
+                ModelVertex {
+                    position: [
+                        m.mesh.positions[i * 3],
+                        m.mesh.positions[i * 3 + 1],
+                        m.mesh.positions[i * 3 + 2],
+                    ].into(),
+                    tex_coords: [
+                        m.mesh.texcoords[i * 2], 
+                        m.mesh.texcoords[i * 2 + 1]
+                    ].into(),
+                    normal: [
+                        m.mesh.normals[i * 3],
+                        m.mesh.normals[i * 3 + 1],
+                        m.mesh.normals[i * 3 + 2],
+                    ].into(),
+                    // We'll calculate these later
+                    tangent: [0.0; 3].into(),
+                    bitangent: [0.0; 3].into(),
+                }
+            }).collect::<Vec<_>>();
+            // ...
+            let index_buffer = device.create_buffer_init(
+                &wgpu::util::BufferInitDescriptor {
+                    label: Some(&format!("{:?} Index Buffer", m.name)), // UPDATED!
+                    contents: bytemuck::cast_slice(&m.mesh.indices),
+                    usage: wgpu::BufferUsages::INDEX,
+                }
+            );
+            // ...
+            // UPDATED!
+            Ok(Mesh {
+                // ...
+            })
+        }).collect::<Result<Vec<_>>>()?;
+        // ...
+    }
+    // ...
+}

We've parallelized loading the meshes, and making the vertex array for them. Probably a bit overkill, but rayon should prevent us from using too many threads.

You'll notice that we didn't use rayon for calculating the tangent, and bitangent. I tried to get it to work, but I was having trouble finding a way to do it without multiple mutable references to vertices. I don't feel like introducing a std::sync::Mutex, so I'll leave it for now.

This is honestly a better job for a compute shader, as the model data is going to get loaded into a buffer anyway.

It's that easy!

Most of the wgpu types are Send + Sync, so we can use them in threads without much trouble. It was so easy, that I feel like this tutorial is too short! I'll just leave off with a speed comparison between the previous model loading code and the current code.

Elapsed (Original): 309.596382ms
+Elapsed (Threaded): 199.645027ms

We're not loading that many resources, so the speedup is minimal. We'll be doing more stuff with threading, but this is a good introduction.

`,16);function e(r,E,d,g,y,F){const s=a("AutoGithubLink");return n(),h("div",null,[t,l(s)])}const u=i(k,[["render",e]]);export{o as __pageData,u as default}; diff --git a/assets/showcase_threading.md.yFNXGQkQ.lean.js b/assets/showcase_threading.md.yFNXGQkQ.lean.js new file mode 100644 index 000000000..b17c04a23 --- /dev/null +++ b/assets/showcase_threading.md.yFNXGQkQ.lean.js @@ -0,0 +1 @@ +import{_ as i,D as a,o as n,c as h,I as l,R as p}from"./chunks/framework.bMtwhlie.js";const o=JSON.parse('{"title":"Multi-threading with Wgpu and Rayon","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/threading.md","filePath":"showcase/threading.md","lastUpdated":1703303099000}'),k={name:"showcase/threading.md"},t=p("",16);function e(r,E,d,g,y,F){const s=a("AutoGithubLink");return n(),h("div",null,[t,l(s)])}const u=i(k,[["render",e]]);export{o as __pageData,u as default}; diff --git a/assets/showcase_windowless_index.md.La71QnH2.js b/assets/showcase_windowless_index.md.La71QnH2.js new file mode 100644 index 000000000..fd298e700 --- /dev/null +++ b/assets/showcase_windowless_index.md.La71QnH2.js @@ -0,0 +1,174 @@ +import{_ as i,D as a,o as n,c as p,I as l,R as h}from"./chunks/framework.bMtwhlie.js";const k="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAFq0lEQVR4nO3U2XVcRxBEQcovOiJf5IY8kl8SRLAPAXAweEvvGfFRFlTeP/786+9/vwGRBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEINj3b/+83G8v9/vLJZEABBMABCBUGX8hApkEIJQA8D8BCPRx/IUI5BGAQAJAIQBhPht/IQJZBCCMAPCWAAT5avyFCOQQgCACwEcCEOLo+AsRyCAAIQSARwQgwNnxFyKwPwEIIAB8RgA2d3X8hQjsTQA2JwA8IwAbuzv+QgT2JQAbEwC+IgCbqjX+QgT2JACbEgCOEIAN1R5/IQL7EYANCQBHCcBmWo2/EIG9CMBmBIAzBGAjrcdfiMA+BGAjAsBZArCJXuMvRGAPArAJAeAKAdhA7/EXIrA+AdiAAHCVACxu1PgLEVibACxOALhDABY2evyFCKxLABYmANwlAIuaZfyFCKxJABYlANQgAAuabfyFCKxHABYkANQiAIuZdfyFCKxFABYjANQkAAuZffyFCKxDABYiANQmAItYZfyFCKxBABYhALQgAAtYbfyFCMxPABYgALQiAJNbdfyFCMxNACYnALQkABNbffyFCMxLACYmALQmAJPaZfyFCMxJACYlAPQgABPabfyFCMxHACYkAPQiAJPZdfyFCMxFACYjAPQkABPZffyFCMxDACYiAPQmAJNIGX8hAnMQgEkIACMIwATSxl+IwHgCMAEBYBQBGCx1/IUIjCUAgwmAAIwkAAOlj78QgXEEYCABeCUA4wjAIMb/ngiMIQCDCMB7AjCGAAxg/I+JQH8CMIAAPCYA/QlAZ8b/nAj0JQCdCcBzAtCXAHRk/MeIQD8C0JEAHCMA/QhAJ8Z/jgj0IQCdCMA5AtCHAHRg/NeIQHsC0IEAXCMA7QlAY8Z/jwi0JQCNCcA9AtCWADRk/HWIQDsC0JAA1CEA7QhAI8Zflwi0IQCNCEBdAtCGADRg/G2IQH0C0IAAtCEA9QlAZcbflgjUJQCVCUBbAlCXAFRk/H2IQD0CUJEA9CEA9QhAJcbflwjUIQCVCEBfAlCHAFRg/GOIwH0CUIEAjCEA9wnATcY/lgjcIwA3CcBYAnCPANxg/HMQgesE4AYBmIMAXCcAFxn/XETgGgG4SADmIgDXCMAFxj8nEThPAC4QgDkJwHkCcJLxz00EzhGAkwRgbgJwjgCcYPxrEIHjBOAEAViDABwnAAcZ/1pE4BgBOEgA1iIAxwjAAca/JhH4mgAcIABrEoCvCcAXjH9tIvCcAHxBANYmAM8JwBPGvwcR+JwAPCEAexCAzwnAJ4x/LyLwmAB8QgD2IgCPCcADxr8nEfidADwgAHsSgN8JwAfGvzcReE8APhCAvQnAewLwhvFnEIFfBOANAcggAL8IwE/Gn0UEXgnATwKQRQBeCcAL488kAgLwgwBkEgABeHkB40+WHoH4AEAyAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAINh/p/JoLhfYRNsAAAAASUVORK5CYII=",u=JSON.parse('{"title":"离屏渲染","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/windowless/index.md","filePath":"showcase/windowless/index.md","lastUpdated":1703303099000}'),t={name:"showcase/windowless/index.md"},e=h(`

离屏渲染

有时我们只是想利用 gpu,也许是要并行地计算一组大的数字; 也许是正在制作一部 3D 电影,并需要用路径追踪来创建一个看起来很真实的场景; 也许正在挖掘一种加密货币。在所有这些情况下,我们可能 不需要 从窗口看到正在发生的事情。

如何使用?

离屏渲染(Off-Screen Rendering, 也叫做 Windowless Rendering)其实很简单:事实上,我们不需要一个窗口(Window)来创建一个GPU 实例,不需要一个窗口来选择适配器,也不需要一个窗口来创建逻辑设备。我们只需要窗口来创建一个展示平面交换链(SwapChain)。所以,只要有了逻辑设备,就可以开始向 GPU 发送命令。

rust
let adapter = instance
+    .request_adapter(&wgpu::RequestAdapterOptions {
+        compatible_surface: Some(&surface),
+        ..Default::default()
+    })
+    .await
+    .unwrap();
+let (device, queue) = adapter
+    .request_device(&Default::default(), None)
+    .await
+    .unwrap();

离屏绘制一个三角形

虽然我们已经说过不需要看到 gpu 在做什么,但确实需要在某些时候看到结果。如果回顾一下关于 surface 的讨论,会发现我们是使用 surface.get_current_texture() 获取一个纹理来绘制。

现在,我们跳过这一步,自己创建纹理。这里需要注意的是,需要指定 TextureFormat::Rgba8UnormSrgb 为纹理像素格式而不是 surface.get_capabilities(&adapter).formats[0],因为 PNG 使用 RGBA 而不是 BGRA 像素格式:

rust
let texture_size = 256u32;
+
+let texture_desc = wgpu::TextureDescriptor {
+    size: wgpu::Extent3d {
+        width: texture_size,
+        height: texture_size,
+        depth_or_array_layers: 1,
+    },
+    mip_level_count: 1,
+    sample_count: 1,
+    dimension: wgpu::TextureDimension::D2,
+    format: wgpu::TextureFormat::Rgba8UnormSrgb,
+    usage: wgpu::TextureUsages::COPY_SRC
+        | wgpu::TextureUsages::RENDER_ATTACHMENT,
+    label: None,
+    view_formats: &[],
+};
+let texture = device.create_texture(&texture_desc);
+let texture_view = texture.create_view(&Default::default());

usage 字段的 RENDER_ATTACHMENT 位令 wgpu 可以渲染到此纹理,COPY_SRC 位令我们能够从纹理中提取数据,以便能够将其保存到文件中。

虽然我们可以使用这个纹理来绘制三角形,但还需要一些方法来获取它里面的像素。在纹理教程中,我们用一个缓冲区从一个文件中加载颜色数据,然后复制到另一个缓冲区。

我们要做的是反过来:从纹理中把数据复制到缓冲区,然后保存到文件中。我们得创建一个足够大的缓冲区来容纳数据:

rust
let u32_size = std::mem::size_of::<u32>() as u32;
+
+let output_buffer_size = (u32_size * texture_size * texture_size) as wgpu::BufferAddress;
+let output_buffer_desc = wgpu::BufferDescriptor {
+    size: output_buffer_size,
+    usage: wgpu::BufferUsages::COPY_DST
+        // MAP_READ 告诉 wpgu 我们要在 cpu 端读取此缓冲区
+        | wgpu::BufferUsages::MAP_READ,
+    label: None,
+    mapped_at_creation: false,
+};
+let output_buffer = device.create_buffer(&output_buffer_desc);

现在已经做好了离屏绘制的准备,让我们来绘制点东西试试。由于只是画一个三角形,可以重用管线教程中的着色器代码:

rust
// 顶点着色器
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+};
+
+@vertex
+fn vs_main(
+    @builtin(vertex_index) in_vertex_index: u32,
+) -> VertexOutput {
+    var out: VertexOutput;
+    let x = f32(1 - i32(in_vertex_index)) * 0.5;
+    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
+    out.clip_position = vec4f(x, y, 0.0, 1.0);
+    return out;
+}
+
+// 片元着色器
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(0.3, 0.2, 0.1, 1.0);
+}

然后用着色器来创建一个简单的渲染管线 RenderPipeline

rust
 let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
+            label: Some("Shader"),
+            source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
+        });
+
+let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+    label: Some("Render Pipeline Layout"),
+    bind_group_layouts: &[],
+    push_constant_ranges: &[],
+});
+
+let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    label: Some("Render Pipeline"),
+    layout: Some(&render_pipeline_layout),
+    vertex: wgpu::VertexState {
+        module: &shader,
+        entry_point: "vs_main",
+        buffers: &[],
+    },
+    fragment: Some(wgpu::FragmentState {
+        module: &fs_module,
+        entry_point: "main",
+        targets: &[Some(wgpu::ColorTargetState {
+            format: texture_desc.format,
+            alpha_blend: wgpu::BlendState::REPLACE,
+            color_blend: wgpu::BlendState::REPLACE,
+            write_mask: wgpu::ColorWrites::ALL,
+        })],
+    }),
+    primitive: wgpu::PrimitiveState {
+        topology: wgpu::PrimitiveTopology::TriangleList,
+        strip_index_format: None,
+        front_face: wgpu::FrontFace::Ccw,
+        cull_mode: Some(wgpu::Face::Back),
+        polygon_mode: wgpu::PolygonMode::Fill,
+    },
+    depth_stencil: None,
+    multisample: wgpu::MultisampleState {
+        count: 1,
+        mask: !0,
+        alpha_to_coverage_enabled: false,
+    },
+});

接着创建一个命令编码器 CommandEncoder

rust
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+    label: None,
+});

离屏渲染最关键的地方就是渲染通道 的设置了。一个渲染通道至少需要一个颜色附件,一个颜色附件需要绑定一个纹理视图。前面的教程我们一直使用的是交换链SwapChain)的纹理视图,但事实上任何纹理视图都可以,包括我们自己创建的 texture_view

rust
{
+    let render_pass_desc = wgpu::RenderPassDescriptor {
+        label: Some("Render Pass"),
+        color_attachments: &[
+            wgpu::RenderPassColorAttachment {
+                view: &texture_view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(wgpu::Color {
+                        r: 0.1,
+                        g: 0.2,
+                        b: 0.3,
+                        a: 1.0,
+                    }),
+                    store: wgpu::StoreOp::Store
+                },
+            }
+        ],
+        ..Default::default()
+    };
+    let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
+
+    render_pass.set_pipeline(&render_pipeline);
+    render_pass.draw(0..3, 0..1);
+}

让我们把绘制在纹理Texture)中的像素数据复制到 output_buffer 缓冲区

rust
encoder.copy_texture_to_buffer(
+    wgpu::ImageCopyTexture {
+        aspect: wgpu::TextureAspect::All,
+                texture: &texture,
+        mip_level: 0,
+        origin: wgpu::Origin3d::ZERO,
+    },
+    wgpu::ImageCopyBuffer {
+        buffer: &output_buffer,
+        layout: wgpu::ImageDataLayout {
+            offset: 0,
+            bytes_per_row: u32_size * texture_size,
+            rows_per_image: texture_size,
+        },
+    },
+    texture_desc.size,
+);

上面已经编码(Encode)好了所有的命令(Command),现在把它们提交给 GPU 来执行:

rust
queue.submit(Some(encoder.finish()));

从缓冲区中读取数据

为了从缓冲区中读取数据,首先需要对它进行映射(Map),然后执行 get_mapped_range() 就可以得到一个缓冲区视图BufferView)实例,它实质上就是一个 &[u8] 类型数据的容器:

rust
// 需要对映射变量设置范围,以便我们能够解除缓冲区的映射
+{
+    let buffer_slice = output_buffer.slice(..);
+
+    // 注意:我们必须在 await future 之前先创建映射,然后再调用 device.poll()。
+    // 否则,应用程序将停止响应。
+    let (tx, rx) = futures_intrusive::channel::shared::oneshot_channel();
+    buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
+        tx.send(result).unwrap();
+    });
+    device.poll(wgpu::Maintain::Wait);
+    rx.receive().await.unwrap().unwrap();
+
+    let data = buffer_slice.get_mapped_range();
+
+    use image::{ImageBuffer, Rgba};
+    let buffer =
+        ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap();
+    buffer.save("image.png").unwrap();
+
+}
+// 解除缓冲区映射
+output_buffer.unmap();

这个程序使用了 futures-intrusive,那也是 wgpu 的 demo 中使用的

Main 函数不能异步化

main() 做为程序的入口函数,它默认无法返回一个 Future(异步任务抽象单元),所以不能使用 async 关键字。我们将通过把代码封装到另一个函数中来解决此问题,这样就可以在 main()阻塞它(也就是等待函数真正执行完成)。异步函数被调用时会立即返回一个 Future 对象,此时函数内的任务可能还没有真正开始执行, 我们需要使用一个可以轮询 Future 的,比如pollster crate

有一些可以用来标注 main() 函数为异步,如 async-stdtokio。我选择不这样做,因为这两个包对咱们的项目来说都有点儿重了。当然,你可以使用你喜欢的任何异步包和设置。

rust
async fn run() {
+    // 离屏绘制代码...
+}
+
+fn main() {
+    pollster::block_on(run());
+}

现在运行代码,就会在项目根目录输出这样一张名为 image.png 的图像:

a brown triangle

',35);function r(E,d,g,y,F,A){const s=a("AutoGithubLink");return n(),p("div",null,[e,l(s)])}const b=i(t,[["render",r]]);export{u as __pageData,b as default}; diff --git a/assets/showcase_windowless_index.md.ad5627a0.lean.js b/assets/showcase_windowless_index.md.La71QnH2.lean.js similarity index 83% rename from assets/showcase_windowless_index.md.ad5627a0.lean.js rename to assets/showcase_windowless_index.md.La71QnH2.lean.js index d3d9548ab..59d2e75b9 100644 --- a/assets/showcase_windowless_index.md.ad5627a0.lean.js +++ b/assets/showcase_windowless_index.md.La71QnH2.lean.js @@ -1 +1 @@ -import{_ as n,E as a,o as l,c as p,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const r="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAFq0lEQVR4nO3U2XVcRxBEQcovOiJf5IY8kl8SRLAPAXAweEvvGfFRFlTeP/786+9/vwGRBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEINj3b/+83G8v9/vLJZEABBMABCBUGX8hApkEIJQA8D8BCPRx/IUI5BGAQAJAIQBhPht/IQJZBCCMAPCWAAT5avyFCOQQgCACwEcCEOLo+AsRyCAAIQSARwQgwNnxFyKwPwEIIAB8RgA2d3X8hQjsTQA2JwA8IwAbuzv+QgT2JQAbEwC+IgCbqjX+QgT2JACbEgCOEIAN1R5/IQL7EYANCQBHCcBmWo2/EIG9CMBmBIAzBGAjrcdfiMA+BGAjAsBZArCJXuMvRGAPArAJAeAKAdhA7/EXIrA+AdiAAHCVACxu1PgLEVibACxOALhDABY2evyFCKxLABYmANwlAIuaZfyFCKxJABYlANQgAAuabfyFCKxHABYkANQiAIuZdfyFCKxFABYjANQkAAuZffyFCKxDABYiANQmAItYZfyFCKxBABYhALQgAAtYbfyFCMxPABYgALQiAJNbdfyFCMxNACYnALQkABNbffyFCMxLACYmALQmAJPaZfyFCMxJACYlAPQgABPabfyFCMxHACYkAPQiAJPZdfyFCMxFACYjAPQkABPZffyFCMxDACYiAPQmAJNIGX8hAnMQgEkIACMIwATSxl+IwHgCMAEBYBQBGCx1/IUIjCUAgwmAAIwkAAOlj78QgXEEYCABeCUA4wjAIMb/ngiMIQCDCMB7AjCGAAxg/I+JQH8CMIAAPCYA/QlAZ8b/nAj0JQCdCcBzAtCXAHRk/MeIQD8C0JEAHCMA/QhAJ8Z/jgj0IQCdCMA5AtCHAHRg/NeIQHsC0IEAXCMA7QlAY8Z/jwi0JQCNCcA9AtCWADRk/HWIQDsC0JAA1CEA7QhAI8Zflwi0IQCNCEBdAtCGADRg/G2IQH0C0IAAtCEA9QlAZcbflgjUJQCVCUBbAlCXAFRk/H2IQD0CUJEA9CEA9QhAJcbflwjUIQCVCEBfAlCHAFRg/GOIwH0CUIEAjCEA9wnATcY/lgjcIwA3CcBYAnCPANxg/HMQgesE4AYBmIMAXCcAFxn/XETgGgG4SADmIgDXCMAFxj8nEThPAC4QgDkJwHkCcJLxz00EzhGAkwRgbgJwjgCcYPxrEIHjBOAEAViDABwnAAcZ/1pE4BgBOEgA1iIAxwjAAca/JhH4mgAcIABrEoCvCcAXjH9tIvCcAHxBANYmAM8JwBPGvwcR+JwAPCEAexCAzwnAJ4x/LyLwmAB8QgD2IgCPCcADxr8nEfidADwgAHsSgN8JwAfGvzcReE8APhCAvQnAewLwhvFnEIFfBOANAcggAL8IwE/Gn0UEXgnATwKQRQBeCcAL488kAgLwgwBkEgABeHkB40+WHoH4AEAyAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAINh/p/JoLhfYRNsAAAAASUVORK5CYII=",m=JSON.parse('{"title":"离屏渲染","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/windowless/index.md","filePath":"showcase/windowless/index.md","lastUpdated":1701933923000}'),c={name:"showcase/windowless/index.md"},t=e("",35);function F(D,A,y,C,i,u){const s=a("AutoGithubLink");return l(),p("div",null,[t,o(s)])}const B=n(c,[["render",F]]);export{m as __pageData,B as default}; +import{_ as i,D as a,o as n,c as p,I as l,R as h}from"./chunks/framework.bMtwhlie.js";const k="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAFq0lEQVR4nO3U2XVcRxBEQcovOiJf5IY8kl8SRLAPAXAweEvvGfFRFlTeP/786+9/vwGRBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEINj3b/+83G8v9/vLJZEABBMABCBUGX8hApkEIJQA8D8BCPRx/IUI5BGAQAJAIQBhPht/IQJZBCCMAPCWAAT5avyFCOQQgCACwEcCEOLo+AsRyCAAIQSARwQgwNnxFyKwPwEIIAB8RgA2d3X8hQjsTQA2JwA8IwAbuzv+QgT2JQAbEwC+IgCbqjX+QgT2JACbEgCOEIAN1R5/IQL7EYANCQBHCcBmWo2/EIG9CMBmBIAzBGAjrcdfiMA+BGAjAsBZArCJXuMvRGAPArAJAeAKAdhA7/EXIrA+AdiAAHCVACxu1PgLEVibACxOALhDABY2evyFCKxLABYmANwlAIuaZfyFCKxJABYlANQgAAuabfyFCKxHABYkANQiAIuZdfyFCKxFABYjANQkAAuZffyFCKxDABYiANQmAItYZfyFCKxBABYhALQgAAtYbfyFCMxPABYgALQiAJNbdfyFCMxNACYnALQkABNbffyFCMxLACYmALQmAJPaZfyFCMxJACYlAPQgABPabfyFCMxHACYkAPQiAJPZdfyFCMxFACYjAPQkABPZffyFCMxDACYiAPQmAJNIGX8hAnMQgEkIACMIwATSxl+IwHgCMAEBYBQBGCx1/IUIjCUAgwmAAIwkAAOlj78QgXEEYCABeCUA4wjAIMb/ngiMIQCDCMB7AjCGAAxg/I+JQH8CMIAAPCYA/QlAZ8b/nAj0JQCdCcBzAtCXAHRk/MeIQD8C0JEAHCMA/QhAJ8Z/jgj0IQCdCMA5AtCHAHRg/NeIQHsC0IEAXCMA7QlAY8Z/jwi0JQCNCcA9AtCWADRk/HWIQDsC0JAA1CEA7QhAI8Zflwi0IQCNCEBdAtCGADRg/G2IQH0C0IAAtCEA9QlAZcbflgjUJQCVCUBbAlCXAFRk/H2IQD0CUJEA9CEA9QhAJcbflwjUIQCVCEBfAlCHAFRg/GOIwH0CUIEAjCEA9wnATcY/lgjcIwA3CcBYAnCPANxg/HMQgesE4AYBmIMAXCcAFxn/XETgGgG4SADmIgDXCMAFxj8nEThPAC4QgDkJwHkCcJLxz00EzhGAkwRgbgJwjgCcYPxrEIHjBOAEAViDABwnAAcZ/1pE4BgBOEgA1iIAxwjAAca/JhH4mgAcIABrEoCvCcAXjH9tIvCcAHxBANYmAM8JwBPGvwcR+JwAPCEAexCAzwnAJ4x/LyLwmAB8QgD2IgCPCcADxr8nEfidADwgAHsSgN8JwAfGvzcReE8APhCAvQnAewLwhvFnEIFfBOANAcggAL8IwE/Gn0UEXgnATwKQRQBeCcAL488kAgLwgwBkEgABeHkB40+WHoH4AEAyAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAINh/p/JoLhfYRNsAAAAASUVORK5CYII=",u=JSON.parse('{"title":"离屏渲染","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/windowless/index.md","filePath":"showcase/windowless/index.md","lastUpdated":1703303099000}'),t={name:"showcase/windowless/index.md"},e=h("",35);function r(E,d,g,y,F,A){const s=a("AutoGithubLink");return n(),p("div",null,[e,l(s)])}const b=i(t,[["render",r]]);export{u as __pageData,b as default}; diff --git a/assets/showcase_windowless_index.md.ad5627a0.js b/assets/showcase_windowless_index.md.ad5627a0.js deleted file mode 100644 index de32e4624..000000000 --- a/assets/showcase_windowless_index.md.ad5627a0.js +++ /dev/null @@ -1,174 +0,0 @@ -import{_ as n,E as a,o as l,c as p,J as o,S as e}from"./chunks/framework.adbf3c9e.js";const r="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAFq0lEQVR4nO3U2XVcRxBEQcovOiJf5IY8kl8SRLAPAXAweEvvGfFRFlTeP/786+9/vwGRBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEINj3b/+83G8v9/vLJZEABBMABCBUGX8hApkEIJQA8D8BCPRx/IUI5BGAQAJAIQBhPht/IQJZBCCMAPCWAAT5avyFCOQQgCACwEcCEOLo+AsRyCAAIQSARwQgwNnxFyKwPwEIIAB8RgA2d3X8hQjsTQA2JwA8IwAbuzv+QgT2JQAbEwC+IgCbqjX+QgT2JACbEgCOEIAN1R5/IQL7EYANCQBHCcBmWo2/EIG9CMBmBIAzBGAjrcdfiMA+BGAjAsBZArCJXuMvRGAPArAJAeAKAdhA7/EXIrA+AdiAAHCVACxu1PgLEVibACxOALhDABY2evyFCKxLABYmANwlAIuaZfyFCKxJABYlANQgAAuabfyFCKxHABYkANQiAIuZdfyFCKxFABYjANQkAAuZffyFCKxDABYiANQmAItYZfyFCKxBABYhALQgAAtYbfyFCMxPABYgALQiAJNbdfyFCMxNACYnALQkABNbffyFCMxLACYmALQmAJPaZfyFCMxJACYlAPQgABPabfyFCMxHACYkAPQiAJPZdfyFCMxFACYjAPQkABPZffyFCMxDACYiAPQmAJNIGX8hAnMQgEkIACMIwATSxl+IwHgCMAEBYBQBGCx1/IUIjCUAgwmAAIwkAAOlj78QgXEEYCABeCUA4wjAIMb/ngiMIQCDCMB7AjCGAAxg/I+JQH8CMIAAPCYA/QlAZ8b/nAj0JQCdCcBzAtCXAHRk/MeIQD8C0JEAHCMA/QhAJ8Z/jgj0IQCdCMA5AtCHAHRg/NeIQHsC0IEAXCMA7QlAY8Z/jwi0JQCNCcA9AtCWADRk/HWIQDsC0JAA1CEA7QhAI8Zflwi0IQCNCEBdAtCGADRg/G2IQH0C0IAAtCEA9QlAZcbflgjUJQCVCUBbAlCXAFRk/H2IQD0CUJEA9CEA9QhAJcbflwjUIQCVCEBfAlCHAFRg/GOIwH0CUIEAjCEA9wnATcY/lgjcIwA3CcBYAnCPANxg/HMQgesE4AYBmIMAXCcAFxn/XETgGgG4SADmIgDXCMAFxj8nEThPAC4QgDkJwHkCcJLxz00EzhGAkwRgbgJwjgCcYPxrEIHjBOAEAViDABwnAAcZ/1pE4BgBOEgA1iIAxwjAAca/JhH4mgAcIABrEoCvCcAXjH9tIvCcAHxBANYmAM8JwBPGvwcR+JwAPCEAexCAzwnAJ4x/LyLwmAB8QgD2IgCPCcADxr8nEfidADwgAHsSgN8JwAfGvzcReE8APhCAvQnAewLwhvFnEIFfBOANAcggAL8IwE/Gn0UEXgnATwKQRQBeCcAL488kAgLwgwBkEgABeHkB40+WHoH4AEAyAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAIJgAQDABgGACAMEEAIIJAAQTAAgmABBMACCYAEAwAYBgAgDBBACCCQAEEwAIJgAQTAAgmABAMAGAYAIAwQQAggkABBMACCYAEEwAINh/p/JoLhfYRNsAAAAASUVORK5CYII=",m=JSON.parse('{"title":"离屏渲染","description":"","frontmatter":{},"headers":[],"relativePath":"showcase/windowless/index.md","filePath":"showcase/windowless/index.md","lastUpdated":1701933923000}'),c={name:"showcase/windowless/index.md"},t=e(`

离屏渲染

有时我们只是想利用 gpu,也许是要并行地计算一组大的数字; 也许是正在制作一部 3D 电影,并需要用路径追踪来创建一个看起来很真实的场景; 也许正在挖掘一种加密货币。在所有这些情况下,我们可能 不需要 从窗口看到正在发生的事情。

如何使用?

离屏渲染(Off-Screen Rendering, 也叫做 Windowless Rendering)其实很简单:事实上,我们不需要一个窗口(Window)来创建一个GPU 实例,不需要一个窗口来选择适配器,也不需要一个窗口来创建逻辑设备。我们只需要窗口来创建一个展示平面交换链(SwapChain)。所以,只要有了逻辑设备,就可以开始向 GPU 发送命令。

rust
let adapter = instance
-    .request_adapter(&wgpu::RequestAdapterOptions {
-        compatible_surface: Some(&surface),
-        ..Default::default()
-    })
-    .await
-    .unwrap();
-let (device, queue) = adapter
-    .request_device(&Default::default(), None)
-    .await
-    .unwrap();

离屏绘制一个三角形

虽然我们已经说过不需要看到 gpu 在做什么,但确实需要在某些时候看到结果。如果回顾一下关于 surface 的讨论,会发现我们是使用 surface.get_current_texture() 获取一个纹理来绘制。

现在,我们跳过这一步,自己创建纹理。这里需要注意的是,需要指定 TextureFormat::Rgba8UnormSrgb 为纹理像素格式而不是 surface.get_capabilities(&adapter).formats[0],因为 PNG 使用 RGBA 而不是 BGRA 像素格式:

rust
let texture_size = 256u32;
-
-let texture_desc = wgpu::TextureDescriptor {
-    size: wgpu::Extent3d {
-        width: texture_size,
-        height: texture_size,
-        depth_or_array_layers: 1,
-    },
-    mip_level_count: 1,
-    sample_count: 1,
-    dimension: wgpu::TextureDimension::D2,
-    format: wgpu::TextureFormat::Rgba8UnormSrgb,
-    usage: wgpu::TextureUsages::COPY_SRC
-        | wgpu::TextureUsages::RENDER_ATTACHMENT,
-    label: None,
-    view_formats: &[],
-};
-let texture = device.create_texture(&texture_desc);
-let texture_view = texture.create_view(&Default::default());

usage 字段的 RENDER_ATTACHMENT 位令 wgpu 可以渲染到此纹理,COPY_SRC 位令我们能够从纹理中提取数据,以便能够将其保存到文件中。

虽然我们可以使用这个纹理来绘制三角形,但还需要一些方法来获取它里面的像素。在纹理教程中,我们用一个缓冲区从一个文件中加载颜色数据,然后复制到另一个缓冲区。

我们要做的是反过来:从纹理中把数据复制到缓冲区,然后保存到文件中。我们得创建一个足够大的缓冲区来容纳数据:

rust
let u32_size = std::mem::size_of::<u32>() as u32;
-
-let output_buffer_size = (u32_size * texture_size * texture_size) as wgpu::BufferAddress;
-let output_buffer_desc = wgpu::BufferDescriptor {
-    size: output_buffer_size,
-    usage: wgpu::BufferUsages::COPY_DST
-        // MAP_READ 告诉 wpgu 我们要在 cpu 端读取此缓冲区
-        | wgpu::BufferUsages::MAP_READ,
-    label: None,
-    mapped_at_creation: false,
-};
-let output_buffer = device.create_buffer(&output_buffer_desc);

现在已经做好了离屏绘制的准备,让我们来绘制点东西试试。由于只是画一个三角形,可以重用管线教程中的着色器代码:

rust
// 顶点着色器
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-};
-
-@vertex
-fn vs_main(
-    @builtin(vertex_index) in_vertex_index: u32,
-) -> VertexOutput {
-    var out: VertexOutput;
-    let x = f32(1 - i32(in_vertex_index)) * 0.5;
-    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
-    out.clip_position = vec4f(x, y, 0.0, 1.0);
-    return out;
-}
-
-// 片元着色器
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(0.3, 0.2, 0.1, 1.0);
-}

然后用着色器来创建一个简单的渲染管线 RenderPipeline

rust
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
-            label: Some("Shader"),
-            source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
-        });
-
-let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-    label: Some("Render Pipeline Layout"),
-    bind_group_layouts: &[],
-    push_constant_ranges: &[],
-});
-
-let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    label: Some("Render Pipeline"),
-    layout: Some(&render_pipeline_layout),
-    vertex: wgpu::VertexState {
-        module: &shader,
-        entry_point: "vs_main",
-        buffers: &[],
-    },
-    fragment: Some(wgpu::FragmentState {
-        module: &fs_module,
-        entry_point: "main",
-        targets: &[Some(wgpu::ColorTargetState {
-            format: texture_desc.format,
-            alpha_blend: wgpu::BlendState::REPLACE,
-            color_blend: wgpu::BlendState::REPLACE,
-            write_mask: wgpu::ColorWrites::ALL,
-        })],
-    }),
-    primitive: wgpu::PrimitiveState {
-        topology: wgpu::PrimitiveTopology::TriangleList,
-        strip_index_format: None,
-        front_face: wgpu::FrontFace::Ccw,
-        cull_mode: Some(wgpu::Face::Back),
-        polygon_mode: wgpu::PolygonMode::Fill,
-    },
-    depth_stencil: None,
-    multisample: wgpu::MultisampleState {
-        count: 1,
-        mask: !0,
-        alpha_to_coverage_enabled: false,
-    },
-});

接着创建一个命令编码器 CommandEncoder

rust
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-    label: None,
-});

离屏渲染最关键的地方就是渲染通道 的设置了。一个渲染通道至少需要一个颜色附件,一个颜色附件需要绑定一个纹理视图。前面的教程我们一直使用的是交换链SwapChain)的纹理视图,但事实上任何纹理视图都可以,包括我们自己创建的 texture_view

rust
{
-    let render_pass_desc = wgpu::RenderPassDescriptor {
-        label: Some("Render Pass"),
-        color_attachments: &[
-            wgpu::RenderPassColorAttachment {
-                view: &texture_view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(wgpu::Color {
-                        r: 0.1,
-                        g: 0.2,
-                        b: 0.3,
-                        a: 1.0,
-                    }),
-                    store: wgpu::StoreOp::Store
-                },
-            }
-        ],
-        ..Default::default()
-    };
-    let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
-
-    render_pass.set_pipeline(&render_pipeline);
-    render_pass.draw(0..3, 0..1);
-}

让我们把绘制在纹理Texture)中的像素数据复制到 output_buffer 缓冲区

rust
encoder.copy_texture_to_buffer(
-    wgpu::ImageCopyTexture {
-        aspect: wgpu::TextureAspect::All,
-                texture: &texture,
-        mip_level: 0,
-        origin: wgpu::Origin3d::ZERO,
-    },
-    wgpu::ImageCopyBuffer {
-        buffer: &output_buffer,
-        layout: wgpu::ImageDataLayout {
-            offset: 0,
-            bytes_per_row: u32_size * texture_size,
-            rows_per_image: texture_size,
-        },
-    },
-    texture_desc.size,
-);

上面已经编码(Encode)好了所有的命令(Command),现在把它们提交给 GPU 来执行:

rust
queue.submit(Some(encoder.finish()));

从缓冲区中读取数据

为了从缓冲区中读取数据,首先需要对它进行映射(Map),然后执行 get_mapped_range() 就可以得到一个缓冲区视图BufferView)实例,它实质上就是一个 &[u8] 类型数据的容器:

rust
// 需要对映射变量设置范围,以便我们能够解除缓冲区的映射
-{
-    let buffer_slice = output_buffer.slice(..);
-
-    // 注意:我们必须在 await future 之前先创建映射,然后再调用 device.poll()。
-    // 否则,应用程序将停止响应。
-    let (tx, rx) = futures_intrusive::channel::shared::oneshot_channel();
-    buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
-        tx.send(result).unwrap();
-    });
-    device.poll(wgpu::Maintain::Wait);
-    rx.receive().await.unwrap().unwrap();
-
-    let data = buffer_slice.get_mapped_range();
-
-    use image::{ImageBuffer, Rgba};
-    let buffer =
-        ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap();
-    buffer.save("image.png").unwrap();
-
-}
-// 解除缓冲区映射
-output_buffer.unmap();

这个程序使用了 futures-intrusive,那也是 wgpu 的 demo 中使用的

Main 函数不能异步化

main() 做为程序的入口函数,它默认无法返回一个 Future(异步任务抽象单元),所以不能使用 async 关键字。我们将通过把代码封装到另一个函数中来解决此问题,这样就可以在 main()阻塞它(也就是等待函数真正执行完成)。异步函数被调用时会立即返回一个 Future 对象,此时函数内的任务可能还没有真正开始执行, 我们需要使用一个可以轮询 Future 的,比如pollster crate

有一些可以用来标注 main() 函数为异步,如 async-stdtokio。我选择不这样做,因为这两个包对咱们的项目来说都有点儿重了。当然,你可以使用你喜欢的任何异步包和设置。

rust
async fn run() {
-    // 离屏绘制代码...
-}
-
-fn main() {
-    pollster::block_on(run());
-}

现在运行代码,就会在项目根目录输出这样一张名为 image.png 的图像:

a brown triangle

',35);function F(D,A,y,C,i,u){const s=a("AutoGithubLink");return l(),p("div",null,[t,o(s)])}const B=n(c,[["render",F]]);export{m as __pageData,B as default}; diff --git a/assets/simuverse.md.54864002.js b/assets/simuverse.md.A_tJLgNI.js similarity index 59% rename from assets/simuverse.md.54864002.js rename to assets/simuverse.md.A_tJLgNI.js index fc3445d10..2df7f027f 100644 --- a/assets/simuverse.md.54864002.js +++ b/assets/simuverse.md.A_tJLgNI.js @@ -1 +1 @@ -import{_ as s,E as a,o as t,c as r,J as o}from"./chunks/framework.adbf3c9e.js";const f=JSON.parse('{"title":"","description":"","frontmatter":{"layout":"page","sidebar":false},"headers":[],"relativePath":"simuverse.md","filePath":"simuverse.md","lastUpdated":1701933923000}'),n={name:"simuverse.md"};function c(m,l,i,p,d,u){const e=a("WasmFullScreen");return t(),r("div",null,[o(e,{"wasm-name":"simuverse"})])}const v=s(n,[["render",c]]);export{f as __pageData,v as default}; +import{_ as s,D as a,o as t,c as r,I as o}from"./chunks/framework.bMtwhlie.js";const f=JSON.parse('{"title":"","description":"","frontmatter":{"layout":"page","sidebar":false},"headers":[],"relativePath":"simuverse.md","filePath":"simuverse.md","lastUpdated":1703303099000}'),n={name:"simuverse.md"};function c(m,l,i,p,d,u){const e=a("WasmFullScreen");return t(),r("div",null,[o(e,{"wasm-name":"simuverse"})])}const v=s(n,[["render",c]]);export{f as __pageData,v as default}; diff --git a/assets/simuverse.md.54864002.lean.js b/assets/simuverse.md.A_tJLgNI.lean.js similarity index 59% rename from assets/simuverse.md.54864002.lean.js rename to assets/simuverse.md.A_tJLgNI.lean.js index fc3445d10..2df7f027f 100644 --- a/assets/simuverse.md.54864002.lean.js +++ b/assets/simuverse.md.A_tJLgNI.lean.js @@ -1 +1 @@ -import{_ as s,E as a,o as t,c as r,J as o}from"./chunks/framework.adbf3c9e.js";const f=JSON.parse('{"title":"","description":"","frontmatter":{"layout":"page","sidebar":false},"headers":[],"relativePath":"simuverse.md","filePath":"simuverse.md","lastUpdated":1701933923000}'),n={name:"simuverse.md"};function c(m,l,i,p,d,u){const e=a("WasmFullScreen");return t(),r("div",null,[o(e,{"wasm-name":"simuverse"})])}const v=s(n,[["render",c]]);export{f as __pageData,v as default}; +import{_ as s,D as a,o as t,c as r,I as o}from"./chunks/framework.bMtwhlie.js";const f=JSON.parse('{"title":"","description":"","frontmatter":{"layout":"page","sidebar":false},"headers":[],"relativePath":"simuverse.md","filePath":"simuverse.md","lastUpdated":1703303099000}'),n={name:"simuverse.md"};function c(m,l,i,p,d,u){const e=a("WasmFullScreen");return t(),r("div",null,[o(e,{"wasm-name":"simuverse"})])}const v=s(n,[["render",c]]);export{f as __pageData,v as default}; diff --git a/assets/specular_diagram.417ae155.png b/assets/specular_diagram.q_G0tMuB.png similarity index 100% rename from assets/specular_diagram.417ae155.png rename to assets/specular_diagram.q_G0tMuB.png diff --git a/assets/specular_lighting.8a1c656e.png b/assets/specular_lighting.YjF8bpkw.png similarity index 100% rename from assets/specular_lighting.8a1c656e.png rename to assets/specular_lighting.YjF8bpkw.png diff --git a/assets/static-tree.2a7a54d6.png b/assets/static-tree.p1JxaguT.png similarity index 100% rename from assets/static-tree.2a7a54d6.png rename to assets/static-tree.p1JxaguT.png diff --git a/assets/style.BC5xVjTI.css b/assets/style.BC5xVjTI.css new file mode 100644 index 000000000..5e6de486f --- /dev/null +++ b/assets/style.BC5xVjTI.css @@ -0,0 +1 @@ +@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-cyrillic.jIZ9REo5.woff2) format("woff2");unicode-range:U+0301,U+0400-045F,U+0490-0491,U+04B0-04B1,U+2116}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-cyrillic-ext.8T9wMG5w.woff2) format("woff2");unicode-range:U+0460-052F,U+1C80-1C88,U+20B4,U+2DE0-2DFF,U+A640-A69F,U+FE2E-FE2F}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-greek.Cb5wWeGA.woff2) format("woff2");unicode-range:U+0370-03FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-greek-ext.9JiNzaSO.woff2) format("woff2");unicode-range:U+1F00-1FFF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-latin.bvIUbFQP.woff2) format("woff2");unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02BB-02BC,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2122,U+2191,U+2193,U+2212,U+2215,U+FEFF,U+FFFD}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-latin-ext.GZWE-KO4.woff2) format("woff2");unicode-range:U+0100-024F,U+0259,U+1E00-1EFF,U+2020,U+20A0-20AB,U+20AD-20CF,U+2113,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-vietnamese.paY3CzEB.woff2) format("woff2");unicode-range:U+0102-0103,U+0110-0111,U+0128-0129,U+0168-0169,U+01A0-01A1,U+01AF-01B0,U+1EA0-1EF9,U+20AB}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-cyrillic.-nLMcIwj.woff2) format("woff2");unicode-range:U+0301,U+0400-045F,U+0490-0491,U+04B0-04B1,U+2116}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-cyrillic-ext.OVycGSDq.woff2) format("woff2");unicode-range:U+0460-052F,U+1C80-1C88,U+20B4,U+2DE0-2DFF,U+A640-A69F,U+FE2E-FE2F}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-greek.PSfer2Kc.woff2) format("woff2");unicode-range:U+0370-03FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-greek-ext.hznxWNZO.woff2) format("woff2");unicode-range:U+1F00-1FFF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-latin.27E69YJn.woff2) format("woff2");unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02BB-02BC,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2122,U+2191,U+2193,U+2212,U+2215,U+FEFF,U+FFFD}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-latin-ext.RnFly65-.woff2) format("woff2");unicode-range:U+0100-024F,U+0259,U+1E00-1EFF,U+2020,U+20A0-20AB,U+20AD-20CF,U+2113,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-vietnamese.xzQHe1q1.woff2) format("woff2");unicode-range:U+0102-0103,U+0110-0111,U+0128-0129,U+0168-0169,U+01A0-01A1,U+01AF-01B0,U+1EA0-1EF9,U+20AB}@font-face{font-family:Chinese Quotes;src:local("PingFang SC Regular"),local("PingFang SC"),local("SimHei"),local("Source Han Sans SC");unicode-range:U+2018,U+2019,U+201C,U+201D}:root{--vp-c-white: #ffffff;--vp-c-black: #000000;--vp-c-neutral: var(--vp-c-black);--vp-c-neutral-inverse: var(--vp-c-white)}.dark{--vp-c-neutral: var(--vp-c-white);--vp-c-neutral-inverse: var(--vp-c-black)}:root{--vp-c-gray-1: #dddde3;--vp-c-gray-2: #e4e4e9;--vp-c-gray-3: #ebebef;--vp-c-gray-soft: rgba(142, 150, 170, .14);--vp-c-indigo-1: #3451b2;--vp-c-indigo-2: #3a5ccc;--vp-c-indigo-3: #5672cd;--vp-c-indigo-soft: rgba(100, 108, 255, .14);--vp-c-green-1: #18794e;--vp-c-green-2: #299764;--vp-c-green-3: #30a46c;--vp-c-green-soft: rgba(16, 185, 129, .14);--vp-c-yellow-1: #915930;--vp-c-yellow-2: #946300;--vp-c-yellow-3: #9f6a00;--vp-c-yellow-soft: rgba(234, 179, 8, .14);--vp-c-red-1: #b8272c;--vp-c-red-2: #d5393e;--vp-c-red-3: #e0575b;--vp-c-red-soft: rgba(244, 63, 94, .14);--vp-c-sponsor: #db2777}.dark{--vp-c-gray-1: #515c67;--vp-c-gray-2: #414853;--vp-c-gray-3: #32363f;--vp-c-gray-soft: rgba(101, 117, 133, .16);--vp-c-indigo-1: #a8b1ff;--vp-c-indigo-2: #5c73e7;--vp-c-indigo-3: #3e63dd;--vp-c-indigo-soft: rgba(100, 108, 255, .16);--vp-c-green-1: #3dd68c;--vp-c-green-2: #30a46c;--vp-c-green-3: #298459;--vp-c-green-soft: rgba(16, 185, 129, .16);--vp-c-yellow-1: #f9b44e;--vp-c-yellow-2: #da8b17;--vp-c-yellow-3: #a46a0a;--vp-c-yellow-soft: rgba(234, 179, 8, .16);--vp-c-red-1: #f66f81;--vp-c-red-2: #f14158;--vp-c-red-3: #b62a3c;--vp-c-red-soft: rgba(244, 63, 94, .16)}:root{--vp-c-bg: #ffffff;--vp-c-bg-alt: #f6f6f7;--vp-c-bg-elv: #ffffff;--vp-c-bg-soft: #f6f6f7}.dark{--vp-c-bg: #1b1b1f;--vp-c-bg-alt: #161618;--vp-c-bg-elv: #202127;--vp-c-bg-soft: #202127}:root{--vp-c-border: #c2c2c4;--vp-c-divider: #e2e2e3;--vp-c-gutter: #e2e2e3}.dark{--vp-c-border: #3c3f44;--vp-c-divider: #2e2e32;--vp-c-gutter: #000000}:root{--vp-c-text-1: rgba(60, 60, 67);--vp-c-text-2: rgba(60, 60, 67, .78);--vp-c-text-3: rgba(60, 60, 67, .56)}.dark{--vp-c-text-1: rgba(255, 255, 245, .86);--vp-c-text-2: rgba(235, 235, 245, .6);--vp-c-text-3: rgba(235, 235, 245, .38)}:root{--vp-c-default-1: var(--vp-c-gray-1);--vp-c-default-2: var(--vp-c-gray-2);--vp-c-default-3: var(--vp-c-gray-3);--vp-c-default-soft: var(--vp-c-gray-soft);--vp-c-brand-1: var(--vp-c-indigo-1);--vp-c-brand-2: var(--vp-c-indigo-2);--vp-c-brand-3: var(--vp-c-indigo-3);--vp-c-brand-soft: var(--vp-c-indigo-soft);--vp-c-brand: var(--vp-c-brand-1);--vp-c-tip-1: var(--vp-c-brand-1);--vp-c-tip-2: var(--vp-c-brand-2);--vp-c-tip-3: var(--vp-c-brand-3);--vp-c-tip-soft: var(--vp-c-brand-soft);--vp-c-warning-1: var(--vp-c-yellow-1);--vp-c-warning-2: var(--vp-c-yellow-2);--vp-c-warning-3: var(--vp-c-yellow-3);--vp-c-warning-soft: var(--vp-c-yellow-soft);--vp-c-danger-1: var(--vp-c-red-1);--vp-c-danger-2: var(--vp-c-red-2);--vp-c-danger-3: var(--vp-c-red-3);--vp-c-danger-soft: var(--vp-c-red-soft)}:root{--vp-font-family-base: "Chinese Quotes", "Inter var", "Inter", ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Helvetica, Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";--vp-font-family-mono: ui-monospace, SFMono-Regular, "SF Mono", Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}:root{--vp-shadow-1: 0 1px 2px rgba(0, 0, 0, .04), 0 1px 2px rgba(0, 0, 0, .06);--vp-shadow-2: 0 3px 12px rgba(0, 0, 0, .07), 0 1px 4px rgba(0, 0, 0, .07);--vp-shadow-3: 0 12px 32px rgba(0, 0, 0, .1), 0 2px 6px rgba(0, 0, 0, .08);--vp-shadow-4: 0 14px 44px rgba(0, 0, 0, .12), 0 3px 9px rgba(0, 0, 0, .12);--vp-shadow-5: 0 18px 56px rgba(0, 0, 0, .16), 0 4px 12px rgba(0, 0, 0, .16)}:root{--vp-z-index-footer: 10;--vp-z-index-local-nav: 20;--vp-z-index-nav: 30;--vp-z-index-layout-top: 40;--vp-z-index-backdrop: 50;--vp-z-index-sidebar: 60}:root{--vp-icon-copy: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' height='20' width='20' stroke='rgba(128,128,128,1)' stroke-width='2' viewBox='0 0 24 24'%3E%3Cpath stroke-linecap='round' stroke-linejoin='round' d='M9 5H7a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V7a2 2 0 0 0-2-2h-2M9 5a2 2 0 0 0 2 2h2a2 2 0 0 0 2-2M9 5a2 2 0 0 1 2-2h2a2 2 0 0 1 2 2'/%3E%3C/svg%3E");--vp-icon-copied: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' height='20' width='20' stroke='rgba(128,128,128,1)' stroke-width='2' viewBox='0 0 24 24'%3E%3Cpath stroke-linecap='round' stroke-linejoin='round' d='M9 5H7a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V7a2 2 0 0 0-2-2h-2M9 5a2 2 0 0 0 2 2h2a2 2 0 0 0 2-2M9 5a2 2 0 0 1 2-2h2a2 2 0 0 1 2 2m-6 9 2 2 4-4'/%3E%3C/svg%3E")}:root{--vp-layout-max-width: 1440px}:root{--vp-header-anchor-symbol: "#"}:root{--vp-code-line-height: 1.7;--vp-code-font-size: .875em;--vp-code-color: var(--vp-c-brand-1);--vp-code-link-color: var(--vp-c-brand-1);--vp-code-link-hover-color: var(--vp-c-brand-2);--vp-code-bg: var(--vp-c-default-soft);--vp-code-block-color: var(--vp-c-text-2);--vp-code-block-bg: var(--vp-c-bg-alt);--vp-code-block-divider-color: var(--vp-c-gutter);--vp-code-lang-color: var(--vp-c-text-3);--vp-code-line-highlight-color: var(--vp-c-default-soft);--vp-code-line-number-color: var(--vp-c-text-3);--vp-code-line-diff-add-color: var(--vp-c-green-soft);--vp-code-line-diff-add-symbol-color: var(--vp-c-green-1);--vp-code-line-diff-remove-color: var(--vp-c-red-soft);--vp-code-line-diff-remove-symbol-color: var(--vp-c-red-1);--vp-code-line-warning-color: var(--vp-c-yellow-soft);--vp-code-line-error-color: var(--vp-c-red-soft);--vp-code-copy-code-border-color: var(--vp-c-divider);--vp-code-copy-code-bg: var(--vp-c-bg-soft);--vp-code-copy-code-hover-border-color: var(--vp-c-divider);--vp-code-copy-code-hover-bg: var(--vp-c-bg);--vp-code-copy-code-active-text: var(--vp-c-text-2);--vp-code-copy-copied-text-content: "Copied";--vp-code-tab-divider: var(--vp-code-block-divider-color);--vp-code-tab-text-color: var(--vp-c-text-2);--vp-code-tab-bg: var(--vp-code-block-bg);--vp-code-tab-hover-text-color: var(--vp-c-text-1);--vp-code-tab-active-text-color: var(--vp-c-text-1);--vp-code-tab-active-bar-color: var(--vp-c-brand-1)}:root{--vp-button-brand-border: transparent;--vp-button-brand-text: var(--vp-c-white);--vp-button-brand-bg: var(--vp-c-brand-3);--vp-button-brand-hover-border: transparent;--vp-button-brand-hover-text: var(--vp-c-white);--vp-button-brand-hover-bg: var(--vp-c-brand-2);--vp-button-brand-active-border: transparent;--vp-button-brand-active-text: var(--vp-c-white);--vp-button-brand-active-bg: var(--vp-c-brand-1);--vp-button-alt-border: transparent;--vp-button-alt-text: var(--vp-c-text-1);--vp-button-alt-bg: var(--vp-c-default-3);--vp-button-alt-hover-border: transparent;--vp-button-alt-hover-text: var(--vp-c-text-1);--vp-button-alt-hover-bg: var(--vp-c-default-2);--vp-button-alt-active-border: transparent;--vp-button-alt-active-text: var(--vp-c-text-1);--vp-button-alt-active-bg: var(--vp-c-default-1);--vp-button-sponsor-border: var(--vp-c-text-2);--vp-button-sponsor-text: var(--vp-c-text-2);--vp-button-sponsor-bg: transparent;--vp-button-sponsor-hover-border: var(--vp-c-sponsor);--vp-button-sponsor-hover-text: var(--vp-c-sponsor);--vp-button-sponsor-hover-bg: transparent;--vp-button-sponsor-active-border: var(--vp-c-sponsor);--vp-button-sponsor-active-text: var(--vp-c-sponsor);--vp-button-sponsor-active-bg: transparent}:root{--vp-custom-block-font-size: 14px;--vp-custom-block-code-font-size: 13px;--vp-custom-block-info-border: transparent;--vp-custom-block-info-text: var(--vp-c-text-1);--vp-custom-block-info-bg: var(--vp-c-default-soft);--vp-custom-block-info-code-bg: var(--vp-c-default-soft);--vp-custom-block-tip-border: transparent;--vp-custom-block-tip-text: var(--vp-c-text-1);--vp-custom-block-tip-bg: var(--vp-c-brand-soft);--vp-custom-block-tip-code-bg: var(--vp-c-brand-soft);--vp-custom-block-warning-border: transparent;--vp-custom-block-warning-text: var(--vp-c-text-1);--vp-custom-block-warning-bg: var(--vp-c-warning-soft);--vp-custom-block-warning-code-bg: var(--vp-c-warning-soft);--vp-custom-block-danger-border: transparent;--vp-custom-block-danger-text: var(--vp-c-text-1);--vp-custom-block-danger-bg: var(--vp-c-danger-soft);--vp-custom-block-danger-code-bg: var(--vp-c-danger-soft);--vp-custom-block-details-border: var(--vp-custom-block-info-border);--vp-custom-block-details-text: var(--vp-custom-block-info-text);--vp-custom-block-details-bg: var(--vp-custom-block-info-bg);--vp-custom-block-details-code-bg: var(--vp-custom-block-info-code-bg)}:root{--vp-input-border-color: var(--vp-c-border);--vp-input-bg-color: var(--vp-c-bg-alt);--vp-input-switch-bg-color: var(--vp-c-gray-soft)}:root{--vp-nav-height: 64px;--vp-nav-bg-color: var(--vp-c-bg);--vp-nav-screen-bg-color: var(--vp-c-bg);--vp-nav-logo-height: 24px}.hide-nav{--vp-nav-height: 0px}.hide-nav .VPSidebar{--vp-nav-height: 22px}:root{--vp-local-nav-bg-color: var(--vp-c-bg)}:root{--vp-sidebar-width: 272px;--vp-sidebar-bg-color: var(--vp-c-bg-alt)}:root{--vp-backdrop-bg-color: rgba(0, 0, 0, .6)}:root{--vp-home-hero-name-color: var(--vp-c-brand-1);--vp-home-hero-name-background: transparent;--vp-home-hero-image-background-image: none;--vp-home-hero-image-filter: none}:root{--vp-badge-info-border: transparent;--vp-badge-info-text: var(--vp-c-text-2);--vp-badge-info-bg: var(--vp-c-default-soft);--vp-badge-tip-border: transparent;--vp-badge-tip-text: var(--vp-c-brand-1);--vp-badge-tip-bg: var(--vp-c-brand-soft);--vp-badge-warning-border: transparent;--vp-badge-warning-text: var(--vp-c-warning-1);--vp-badge-warning-bg: var(--vp-c-warning-soft);--vp-badge-danger-border: transparent;--vp-badge-danger-text: var(--vp-c-danger-1);--vp-badge-danger-bg: var(--vp-c-danger-soft)}:root{--vp-carbon-ads-text-color: var(--vp-c-text-1);--vp-carbon-ads-poweredby-color: var(--vp-c-text-2);--vp-carbon-ads-bg-color: var(--vp-c-bg-soft);--vp-carbon-ads-hover-text-color: var(--vp-c-brand-1);--vp-carbon-ads-hover-poweredby-color: var(--vp-c-text-1)}:root{--vp-local-search-bg: var(--vp-c-bg);--vp-local-search-result-bg: var(--vp-c-bg);--vp-local-search-result-border: var(--vp-c-divider);--vp-local-search-result-selected-bg: var(--vp-c-bg);--vp-local-search-result-selected-border: var(--vp-c-brand-1);--vp-local-search-highlight-bg: var(--vp-c-brand-1);--vp-local-search-highlight-text: var(--vp-c-neutral-inverse)}@media (prefers-reduced-motion: reduce){*,:before,:after{animation-delay:-1ms!important;animation-duration:1ms!important;animation-iteration-count:1!important;background-attachment:initial!important;scroll-behavior:auto!important;transition-duration:0s!important;transition-delay:0s!important}}*,:before,:after{box-sizing:border-box}html{line-height:1.4;font-size:16px;-webkit-text-size-adjust:100%}html.dark{color-scheme:dark}body{margin:0;width:100%;min-width:320px;min-height:100vh;line-height:24px;font-family:var(--vp-font-family-base);font-size:16px;font-weight:400;color:var(--vp-c-text-1);background-color:var(--vp-c-bg);direction:ltr;font-synthesis:style;text-rendering:optimizeLegibility;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}main{display:block}h1,h2,h3,h4,h5,h6{margin:0;line-height:24px;font-size:16px;font-weight:400}p{margin:0}strong,b{font-weight:600}a,area,button,[role=button],input,label,select,summary,textarea{touch-action:manipulation}a{color:inherit;text-decoration:inherit}ol,ul{list-style:none;margin:0;padding:0}blockquote{margin:0}pre,code,kbd,samp{font-family:var(--vp-font-family-mono)}img,svg,video,canvas,audio,iframe,embed,object{display:block}figure{margin:0}img,video{max-width:100%;height:auto}button,input,optgroup,select,textarea{border:0;padding:0;line-height:inherit;color:inherit}button{padding:0;font-family:inherit;background-color:transparent;background-image:none}button:enabled,[role=button]:enabled{cursor:pointer}button:focus,button:focus-visible{outline:1px dotted;outline:4px auto -webkit-focus-ring-color}button:focus:not(:focus-visible){outline:none!important}input:focus,textarea:focus,select:focus{outline:none}table{border-collapse:collapse}input{background-color:transparent}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:var(--vp-c-text-3)}input::-ms-input-placeholder,textarea::-ms-input-placeholder{color:var(--vp-c-text-3)}input::placeholder,textarea::placeholder{color:var(--vp-c-text-3)}input::-webkit-outer-spin-button,input::-webkit-inner-spin-button{-webkit-appearance:none;margin:0}input[type=number]{-moz-appearance:textfield}textarea{resize:vertical}select{-webkit-appearance:none}fieldset{margin:0;padding:0}h1,h2,h3,h4,h5,h6,li,p{overflow-wrap:break-word}vite-error-overlay{z-index:9999}mjx-container{display:inline-block;margin:auto 2px -2px}mjx-container>svg{margin:auto}.visually-hidden{position:absolute;width:1px;height:1px;white-space:nowrap;clip:rect(0 0 0 0);clip-path:inset(50%);overflow:hidden}.custom-block{border:1px solid transparent;border-radius:8px;padding:16px 16px 8px;line-height:24px;font-size:var(--vp-custom-block-font-size);color:var(--vp-c-text-2)}.custom-block.info{border-color:var(--vp-custom-block-info-border);color:var(--vp-custom-block-info-text);background-color:var(--vp-custom-block-info-bg)}.custom-block.info a,.custom-block.info code{color:var(--vp-c-brand-1)}.custom-block.info a:hover{color:var(--vp-c-brand-2)}.custom-block.info code{background-color:var(--vp-custom-block-info-code-bg)}.custom-block.tip{border-color:var(--vp-custom-block-tip-border);color:var(--vp-custom-block-tip-text);background-color:var(--vp-custom-block-tip-bg)}.custom-block.tip a,.custom-block.tip code{color:var(--vp-c-brand-1)}.custom-block.tip a:hover{color:var(--vp-c-brand-2)}.custom-block.tip code{background-color:var(--vp-custom-block-tip-code-bg)}.custom-block.warning{border-color:var(--vp-custom-block-warning-border);color:var(--vp-custom-block-warning-text);background-color:var(--vp-custom-block-warning-bg)}.custom-block.warning a,.custom-block.warning code{color:var(--vp-c-warning-1)}.custom-block.warning a:hover{color:var(--vp-c-warning-2)}.custom-block.warning code{background-color:var(--vp-custom-block-warning-code-bg)}.custom-block.danger{border-color:var(--vp-custom-block-danger-border);color:var(--vp-custom-block-danger-text);background-color:var(--vp-custom-block-danger-bg)}.custom-block.danger a,.custom-block.danger code{color:var(--vp-c-danger-1)}.custom-block.danger a:hover{color:var(--vp-c-danger-2)}.custom-block.danger code{background-color:var(--vp-custom-block-danger-code-bg)}.custom-block.details{border-color:var(--vp-custom-block-details-border);color:var(--vp-custom-block-details-text);background-color:var(--vp-custom-block-details-bg)}.custom-block.details a{color:var(--vp-c-brand-1)}.custom-block.details a:hover{color:var(--vp-c-brand-2)}.custom-block.details code{background-color:var(--vp-custom-block-details-code-bg)}.custom-block-title{font-weight:600}.custom-block p+p{margin:8px 0}.custom-block.details summary{margin:0 0 8px;font-weight:700;cursor:pointer}.custom-block.details summary+p{margin:8px 0}.custom-block a{color:inherit;font-weight:600;text-decoration:underline;text-underline-offset:2px;transition:opacity .25s}.custom-block a:hover{opacity:.75}.custom-block code{font-size:var(--vp-custom-block-code-font-size)}.custom-block.custom-block th,.custom-block.custom-block blockquote>p{font-size:var(--vp-custom-block-font-size);color:inherit}.dark .vp-code span{color:var(--shiki-dark, inherit)}html:not(.dark) .vp-code span{color:var(--shiki-light, inherit)}.vp-code-group{margin-top:16px}.vp-code-group .tabs{position:relative;display:flex;margin-right:-24px;margin-left:-24px;padding:0 12px;background-color:var(--vp-code-tab-bg);overflow-x:auto;overflow-y:hidden;box-shadow:inset 0 -1px var(--vp-code-tab-divider)}@media (min-width: 640px){.vp-code-group .tabs{margin-right:0;margin-left:0;border-radius:8px 8px 0 0}}.vp-code-group .tabs input{position:fixed;opacity:0;pointer-events:none}.vp-code-group .tabs label{position:relative;display:inline-block;border-bottom:1px solid transparent;padding:0 12px;line-height:48px;font-size:14px;font-weight:500;color:var(--vp-code-tab-text-color);white-space:nowrap;cursor:pointer;transition:color .25s}.vp-code-group .tabs label:after{position:absolute;right:8px;bottom:-1px;left:8px;z-index:1;height:2px;border-radius:2px;content:"";background-color:transparent;transition:background-color .25s}.vp-code-group label:hover{color:var(--vp-code-tab-hover-text-color)}.vp-code-group input:checked+label{color:var(--vp-code-tab-active-text-color)}.vp-code-group input:checked+label:after{background-color:var(--vp-code-tab-active-bar-color)}.vp-code-group div[class*=language-],.vp-block{display:none;margin-top:0!important;border-top-left-radius:0!important;border-top-right-radius:0!important}.vp-code-group div[class*=language-].active,.vp-block.active{display:block}.vp-block{padding:20px 24px}.vp-doc h1,.vp-doc h2,.vp-doc h3,.vp-doc h4,.vp-doc h5,.vp-doc h6{position:relative;font-weight:600;outline:none}.vp-doc h1{letter-spacing:-.02em;line-height:40px;font-size:28px}.vp-doc h2{margin:48px 0 16px;border-top:1px solid var(--vp-c-divider);padding-top:24px;letter-spacing:-.02em;line-height:32px;font-size:24px}.vp-doc h3{margin:32px 0 0;letter-spacing:-.01em;line-height:28px;font-size:20px}.vp-doc .header-anchor{position:absolute;top:0;left:0;margin-left:-.87em;font-weight:500;-webkit-user-select:none;user-select:none;opacity:0;text-decoration:none;transition:color .25s,opacity .25s}.vp-doc .header-anchor:before{content:var(--vp-header-anchor-symbol)}.vp-doc h1:hover .header-anchor,.vp-doc h1 .header-anchor:focus,.vp-doc h2:hover .header-anchor,.vp-doc h2 .header-anchor:focus,.vp-doc h3:hover .header-anchor,.vp-doc h3 .header-anchor:focus,.vp-doc h4:hover .header-anchor,.vp-doc h4 .header-anchor:focus,.vp-doc h5:hover .header-anchor,.vp-doc h5 .header-anchor:focus,.vp-doc h6:hover .header-anchor,.vp-doc h6 .header-anchor:focus{opacity:1}@media (min-width: 768px){.vp-doc h1{letter-spacing:-.02em;line-height:40px;font-size:32px}}.vp-doc h2 .header-anchor{top:24px}.vp-doc p,.vp-doc summary{margin:16px 0}.vp-doc p{line-height:28px}.vp-doc blockquote{margin:16px 0;border-left:2px solid var(--vp-c-divider);padding-left:16px;transition:border-color .5s}.vp-doc blockquote>p{margin:0;font-size:16px;color:var(--vp-c-text-2);transition:color .5s}.vp-doc a{font-weight:500;color:var(--vp-c-brand-1);text-decoration:underline;text-underline-offset:2px;transition:color .25s,opacity .25s}.vp-doc a:hover{color:var(--vp-c-brand-2)}.vp-doc strong{font-weight:600}.vp-doc ul,.vp-doc ol{padding-left:1.25rem;margin:16px 0}.vp-doc ul{list-style:disc}.vp-doc ol{list-style:decimal}.vp-doc li+li{margin-top:8px}.vp-doc li>ol,.vp-doc li>ul{margin:8px 0 0}.vp-doc table{display:block;border-collapse:collapse;margin:20px 0;overflow-x:auto}.vp-doc tr{background-color:var(--vp-c-bg);border-top:1px solid var(--vp-c-divider);transition:background-color .5s}.vp-doc tr:nth-child(2n){background-color:var(--vp-c-bg-soft)}.vp-doc th,.vp-doc td{border:1px solid var(--vp-c-divider);padding:8px 16px}.vp-doc th{text-align:left;font-size:14px;font-weight:600;color:var(--vp-c-text-2);background-color:var(--vp-c-bg-soft)}.vp-doc td{font-size:14px}.vp-doc hr{margin:16px 0;border:none;border-top:1px solid var(--vp-c-divider)}.vp-doc .custom-block{margin:16px 0}.vp-doc .custom-block p{margin:8px 0;line-height:24px}.vp-doc .custom-block p:first-child{margin:0}.vp-doc .custom-block div[class*=language-]{margin:8px 0;border-radius:8px}.vp-doc .custom-block div[class*=language-] code{font-weight:400;background-color:transparent}.vp-doc .custom-block .vp-code-group .tabs{margin:0;border-radius:8px 8px 0 0}.vp-doc :not(pre,h1,h2,h3,h4,h5,h6)>code{font-size:var(--vp-code-font-size);color:var(--vp-code-color)}.vp-doc :not(pre)>code{border-radius:4px;padding:3px 6px;background-color:var(--vp-code-bg);transition:color .25s,background-color .5s}.vp-doc a>code{color:var(--vp-code-link-color)}.vp-doc a:hover>code{color:var(--vp-code-link-hover-color)}.vp-doc h1>code,.vp-doc h2>code,.vp-doc h3>code{font-size:.9em}.vp-doc div[class*=language-],.vp-block{position:relative;margin:16px -24px;background-color:var(--vp-code-block-bg);overflow-x:auto;transition:background-color .5s}@media (min-width: 640px){.vp-doc div[class*=language-],.vp-block{border-radius:8px;margin:16px 0}}@media (max-width: 639px){.vp-doc li div[class*=language-]{border-radius:8px 0 0 8px}}.vp-doc div[class*=language-]+div[class*=language-],.vp-doc div[class$=-api]+div[class*=language-],.vp-doc div[class*=language-]+div[class$=-api]>div[class*=language-]{margin-top:-8px}.vp-doc [class*=language-] pre,.vp-doc [class*=language-] code{direction:ltr;text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none}.vp-doc [class*=language-] pre{position:relative;z-index:1;margin:0;padding:20px 0;background:transparent;overflow-x:auto}.vp-doc [class*=language-] code{display:block;padding:0 24px;width:fit-content;min-width:100%;line-height:var(--vp-code-line-height);font-size:var(--vp-code-font-size);color:var(--vp-code-block-color);transition:color .5s}.vp-doc [class*=language-] code .highlighted{background-color:var(--vp-code-line-highlight-color);transition:background-color .5s;margin:0 -24px;padding:0 24px;width:calc(100% + 48px);display:inline-block}.vp-doc [class*=language-] code .highlighted.error{background-color:var(--vp-code-line-error-color)}.vp-doc [class*=language-] code .highlighted.warning{background-color:var(--vp-code-line-warning-color)}.vp-doc [class*=language-] code .diff{transition:background-color .5s;margin:0 -24px;padding:0 24px;width:calc(100% + 48px);display:inline-block}.vp-doc [class*=language-] code .diff:before{position:absolute;left:10px}.vp-doc [class*=language-] .has-focused-lines .line:not(.has-focus){filter:blur(.095rem);opacity:.4;transition:filter .35s,opacity .35s}.vp-doc [class*=language-] .has-focused-lines .line:not(.has-focus){opacity:.7;transition:filter .35s,opacity .35s}.vp-doc [class*=language-]:hover .has-focused-lines .line:not(.has-focus){filter:blur(0);opacity:1}.vp-doc [class*=language-] code .diff.remove{background-color:var(--vp-code-line-diff-remove-color);opacity:.7}.vp-doc [class*=language-] code .diff.remove:before{content:"-";color:var(--vp-code-line-diff-remove-symbol-color)}.vp-doc [class*=language-] code .diff.add{background-color:var(--vp-code-line-diff-add-color)}.vp-doc [class*=language-] code .diff.add:before{content:"+";color:var(--vp-code-line-diff-add-symbol-color)}.vp-doc div[class*=language-].line-numbers-mode{padding-left:32px}.vp-doc .line-numbers-wrapper{position:absolute;top:0;bottom:0;left:0;z-index:3;border-right:1px solid var(--vp-code-block-divider-color);padding-top:20px;width:32px;text-align:center;font-family:var(--vp-font-family-mono);line-height:var(--vp-code-line-height);font-size:var(--vp-code-font-size);color:var(--vp-code-line-number-color);transition:border-color .5s,color .5s}.vp-doc [class*=language-]>button.copy{direction:ltr;position:absolute;top:12px;right:12px;z-index:3;border:1px solid var(--vp-code-copy-code-border-color);border-radius:4px;width:40px;height:40px;background-color:var(--vp-code-copy-code-bg);opacity:0;cursor:pointer;background-image:var(--vp-icon-copy);background-position:50%;background-size:20px;background-repeat:no-repeat;transition:border-color .25s,background-color .25s,opacity .25s}.vp-doc [class*=language-]:hover>button.copy,.vp-doc [class*=language-]>button.copy:focus{opacity:1}.vp-doc [class*=language-]>button.copy:hover,.vp-doc [class*=language-]>button.copy.copied{border-color:var(--vp-code-copy-code-hover-border-color);background-color:var(--vp-code-copy-code-hover-bg)}.vp-doc [class*=language-]>button.copy.copied,.vp-doc [class*=language-]>button.copy:hover.copied{border-radius:0 4px 4px 0;background-color:var(--vp-code-copy-code-hover-bg);background-image:var(--vp-icon-copied)}.vp-doc [class*=language-]>button.copy.copied:before,.vp-doc [class*=language-]>button.copy:hover.copied:before{position:relative;top:-1px;transform:translate(calc(-100% - 1px));display:flex;justify-content:center;align-items:center;border:1px solid var(--vp-code-copy-code-hover-border-color);border-right:0;border-radius:4px 0 0 4px;padding:0 10px;width:fit-content;height:40px;text-align:center;font-size:12px;font-weight:500;color:var(--vp-code-copy-code-active-text);background-color:var(--vp-code-copy-code-hover-bg);white-space:nowrap;content:var(--vp-code-copy-copied-text-content)}.vp-doc [class*=language-]>span.lang{position:absolute;top:2px;right:8px;z-index:2;font-size:12px;font-weight:500;color:var(--vp-code-lang-color);transition:color .4s,opacity .4s}.vp-doc [class*=language-]:hover>button.copy+span.lang,.vp-doc [class*=language-]>button.copy:focus+span.lang{opacity:0}.vp-doc .VPTeamMembers{margin-top:24px}.vp-doc .VPTeamMembers.small.count-1 .container{margin:0!important;max-width:calc((100% - 24px)/2)!important}.vp-doc .VPTeamMembers.small.count-2 .container,.vp-doc .VPTeamMembers.small.count-3 .container{max-width:100%!important}.vp-doc .VPTeamMembers.medium.count-1 .container{margin:0!important;max-width:calc((100% - 24px)/2)!important}:is(.vp-external-link-icon,.vp-doc a[href*="://"],.vp-doc a[target=_blank]):not(.no-icon):after{display:inline-block;margin-top:-1px;margin-left:4px;width:11px;height:11px;background:currentColor;color:var(--vp-c-text-3);flex-shrink:0;--icon: url("data:image/svg+xml, %3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none' /%3E%3Cpath d='M9 5v2h6.59L4 18.59 5.41 20 17 8.41V15h2V5H9z' /%3E%3C/svg%3E");-webkit-mask-image:var(--icon);mask-image:var(--icon)}.vp-external-link-icon:after{content:""}.vp-sponsor{border-radius:16px;overflow:hidden}.vp-sponsor.aside{border-radius:12px}.vp-sponsor-section+.vp-sponsor-section{margin-top:4px}.vp-sponsor-tier{margin-bottom:4px;text-align:center;letter-spacing:1px;line-height:24px;width:100%;font-weight:600;color:var(--vp-c-text-2);background-color:var(--vp-c-bg-soft)}.vp-sponsor.normal .vp-sponsor-tier{padding:13px 0 11px;font-size:14px}.vp-sponsor.aside .vp-sponsor-tier{padding:9px 0 7px;font-size:12px}.vp-sponsor-grid+.vp-sponsor-tier{margin-top:4px}.vp-sponsor-grid{display:flex;flex-wrap:wrap;gap:4px}.vp-sponsor-grid.xmini .vp-sponsor-grid-link{height:64px}.vp-sponsor-grid.xmini .vp-sponsor-grid-image{max-width:64px;max-height:22px}.vp-sponsor-grid.mini .vp-sponsor-grid-link{height:72px}.vp-sponsor-grid.mini .vp-sponsor-grid-image{max-width:96px;max-height:24px}.vp-sponsor-grid.small .vp-sponsor-grid-link{height:96px}.vp-sponsor-grid.small .vp-sponsor-grid-image{max-width:96px;max-height:24px}.vp-sponsor-grid.medium .vp-sponsor-grid-link{height:112px}.vp-sponsor-grid.medium .vp-sponsor-grid-image{max-width:120px;max-height:36px}.vp-sponsor-grid.big .vp-sponsor-grid-link{height:184px}.vp-sponsor-grid.big .vp-sponsor-grid-image{max-width:192px;max-height:56px}.vp-sponsor-grid[data-vp-grid="2"] .vp-sponsor-grid-item{width:calc((100% - 4px)/2)}.vp-sponsor-grid[data-vp-grid="3"] .vp-sponsor-grid-item{width:calc((100% - 4px * 2) / 3)}.vp-sponsor-grid[data-vp-grid="4"] .vp-sponsor-grid-item{width:calc((100% - 12px)/4)}.vp-sponsor-grid[data-vp-grid="5"] .vp-sponsor-grid-item{width:calc((100% - 16px)/5)}.vp-sponsor-grid[data-vp-grid="6"] .vp-sponsor-grid-item{width:calc((100% - 4px * 5) / 6)}.vp-sponsor-grid-item{flex-shrink:0;width:100%;background-color:var(--vp-c-bg-soft);transition:background-color .25s}.vp-sponsor-grid-item:hover{background-color:var(--vp-c-default-soft)}.vp-sponsor-grid-item:hover .vp-sponsor-grid-image{filter:grayscale(0) invert(0)}.vp-sponsor-grid-item.empty:hover{background-color:var(--vp-c-bg-soft)}.dark .vp-sponsor-grid-item:hover{background-color:var(--vp-c-white)}.dark .vp-sponsor-grid-item.empty:hover{background-color:var(--vp-c-bg-soft)}.vp-sponsor-grid-link{display:flex}.vp-sponsor-grid-box{display:flex;justify-content:center;align-items:center;width:100%}.vp-sponsor-grid-image{max-width:100%;filter:grayscale(1);transition:filter .25s}.dark .vp-sponsor-grid-image{filter:grayscale(1) invert(1)}.VPBadge[data-v-9613cc9f]{display:inline-block;margin-left:2px;border:1px solid transparent;border-radius:12px;padding:0 10px;line-height:22px;font-size:12px;font-weight:500;transform:translateY(-2px)}.vp-doc h1>.VPBadge[data-v-9613cc9f]{margin-top:4px;vertical-align:top}.vp-doc h2>.VPBadge[data-v-9613cc9f]{margin-top:3px;padding:0 8px;vertical-align:top}.vp-doc h3>.VPBadge[data-v-9613cc9f]{vertical-align:middle}.vp-doc h4>.VPBadge[data-v-9613cc9f],.vp-doc h5>.VPBadge[data-v-9613cc9f],.vp-doc h6>.VPBadge[data-v-9613cc9f]{vertical-align:middle;line-height:18px}.VPBadge.info[data-v-9613cc9f]{border-color:var(--vp-badge-info-border);color:var(--vp-badge-info-text);background-color:var(--vp-badge-info-bg)}.VPBadge.tip[data-v-9613cc9f]{border-color:var(--vp-badge-tip-border);color:var(--vp-badge-tip-text);background-color:var(--vp-badge-tip-bg)}.VPBadge.warning[data-v-9613cc9f]{border-color:var(--vp-badge-warning-border);color:var(--vp-badge-warning-text);background-color:var(--vp-badge-warning-bg)}.VPBadge.danger[data-v-9613cc9f]{border-color:var(--vp-badge-danger-border);color:var(--vp-badge-danger-text);background-color:var(--vp-badge-danger-bg)}.VPBackdrop[data-v-c79a1216]{position:fixed;top:0;right:0;bottom:0;left:0;z-index:var(--vp-z-index-backdrop);background:var(--vp-backdrop-bg-color);transition:opacity .5s}.VPBackdrop.fade-enter-from[data-v-c79a1216],.VPBackdrop.fade-leave-to[data-v-c79a1216]{opacity:0}.VPBackdrop.fade-leave-active[data-v-c79a1216]{transition-duration:.25s}@media (min-width: 1280px){.VPBackdrop[data-v-c79a1216]{display:none}}.NotFound[data-v-f87ff6e4]{padding:64px 24px 96px;text-align:center}@media (min-width: 768px){.NotFound[data-v-f87ff6e4]{padding:96px 32px 168px}}.code[data-v-f87ff6e4]{line-height:64px;font-size:64px;font-weight:600}.title[data-v-f87ff6e4]{padding-top:12px;letter-spacing:2px;line-height:20px;font-size:20px;font-weight:700}.divider[data-v-f87ff6e4]{margin:24px auto 18px;width:64px;height:1px;background-color:var(--vp-c-divider)}.quote[data-v-f87ff6e4]{margin:0 auto;max-width:256px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}.action[data-v-f87ff6e4]{padding-top:20px}.link[data-v-f87ff6e4]{display:inline-block;border:1px solid var(--vp-c-brand-1);border-radius:16px;padding:3px 16px;font-size:14px;font-weight:500;color:var(--vp-c-brand-1);transition:border-color .25s,color .25s}.link[data-v-f87ff6e4]:hover{border-color:var(--vp-c-brand-2);color:var(--vp-c-brand-2)}.root[data-v-d0ee3533]{position:relative;z-index:1}.nested[data-v-d0ee3533]{padding-left:16px}.outline-link[data-v-d0ee3533]{display:block;line-height:28px;color:var(--vp-c-text-2);white-space:nowrap;overflow:hidden;text-overflow:ellipsis;transition:color .5s;font-weight:400}.outline-link[data-v-d0ee3533]:hover,.outline-link.active[data-v-d0ee3533]{color:var(--vp-c-text-1);transition:color .25s}.outline-link.nested[data-v-d0ee3533]{padding-left:13px}.VPDocAsideOutline[data-v-d330b1bb]{display:none}.VPDocAsideOutline.has-outline[data-v-d330b1bb]{display:block}.content[data-v-d330b1bb]{position:relative;border-left:1px solid var(--vp-c-divider);padding-left:16px;font-size:13px;font-weight:500}.outline-marker[data-v-d330b1bb]{position:absolute;top:32px;left:-1px;z-index:0;opacity:0;width:2px;border-radius:2px;height:18px;background-color:var(--vp-c-brand-1);transition:top .25s cubic-bezier(0,1,.5,1),background-color .5s,opacity .25s}.outline-title[data-v-d330b1bb]{letter-spacing:.4px;line-height:28px;font-size:13px;font-weight:600}.VPDocAside[data-v-3f215769]{display:flex;flex-direction:column;flex-grow:1}.spacer[data-v-3f215769]{flex-grow:1}.VPDocAside[data-v-3f215769] .spacer+.VPDocAsideSponsors,.VPDocAside[data-v-3f215769] .spacer+.VPDocAsideCarbonAds{margin-top:24px}.VPDocAside[data-v-3f215769] .VPDocAsideSponsors+.VPDocAsideCarbonAds{margin-top:16px}.VPLastUpdated[data-v-7e05ebdb]{line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}@media (min-width: 640px){.VPLastUpdated[data-v-7e05ebdb]{line-height:32px;font-size:14px;font-weight:500}}.VPDocFooter[data-v-48f9bb55]{margin-top:64px}.edit-info[data-v-48f9bb55]{padding-bottom:18px}@media (min-width: 640px){.edit-info[data-v-48f9bb55]{display:flex;justify-content:space-between;align-items:center;padding-bottom:14px}}.edit-link-button[data-v-48f9bb55]{display:flex;align-items:center;border:0;line-height:32px;font-size:14px;font-weight:500;color:var(--vp-c-brand-1);transition:color .25s}.edit-link-button[data-v-48f9bb55]:hover{color:var(--vp-c-brand-2)}.edit-link-icon[data-v-48f9bb55]{margin-right:8px;width:14px;height:14px;fill:currentColor}.prev-next[data-v-48f9bb55]{border-top:1px solid var(--vp-c-divider);padding-top:24px;display:grid;grid-row-gap:8px}@media (min-width: 640px){.prev-next[data-v-48f9bb55]{grid-template-columns:repeat(2,1fr);grid-column-gap:16px}}.pager-link[data-v-48f9bb55]{display:block;border:1px solid var(--vp-c-divider);border-radius:8px;padding:11px 16px 13px;width:100%;height:100%;transition:border-color .25s}.pager-link[data-v-48f9bb55]:hover{border-color:var(--vp-c-brand-1)}.pager-link.next[data-v-48f9bb55]{margin-left:auto;text-align:right}.desc[data-v-48f9bb55]{display:block;line-height:20px;font-size:12px;font-weight:500;color:var(--vp-c-text-2)}.title[data-v-48f9bb55]{display:block;line-height:20px;font-size:14px;font-weight:500;color:var(--vp-c-brand-1);transition:color .25s}.VPDocOutlineDropdown[data-v-eadfb36b]{margin-bottom:48px}.VPDocOutlineDropdown button[data-v-eadfb36b]{display:block;font-size:14px;font-weight:500;line-height:24px;border:1px solid var(--vp-c-border);padding:4px 12px;color:var(--vp-c-text-2);background-color:var(--vp-c-default-soft);border-radius:8px;transition:color .5s}.VPDocOutlineDropdown button[data-v-eadfb36b]:hover{color:var(--vp-c-text-1);transition:color .25s}.VPDocOutlineDropdown button.open[data-v-eadfb36b]{color:var(--vp-c-text-1)}.icon[data-v-eadfb36b]{display:inline-block;vertical-align:middle;width:16px;height:16px;fill:currentColor}[data-v-eadfb36b] .outline-link{font-size:14px;font-weight:400}.open>.icon[data-v-eadfb36b]{transform:rotate(90deg)}.items[data-v-eadfb36b]{margin-top:12px;border-left:1px solid var(--vp-c-divider)}.VPDoc[data-v-6b87e69f]{padding:32px 24px 96px;width:100%}.VPDoc .VPDocOutlineDropdown[data-v-6b87e69f]{display:none}@media (min-width: 960px) and (max-width: 1279px){.VPDoc .VPDocOutlineDropdown[data-v-6b87e69f]{display:block}}@media (min-width: 768px){.VPDoc[data-v-6b87e69f]{padding:48px 32px 128px}}@media (min-width: 960px){.VPDoc[data-v-6b87e69f]{padding:32px 32px 0}.VPDoc:not(.has-sidebar) .container[data-v-6b87e69f]{display:flex;justify-content:center;max-width:992px}.VPDoc:not(.has-sidebar) .content[data-v-6b87e69f]{max-width:752px}}@media (min-width: 1280px){.VPDoc .container[data-v-6b87e69f]{display:flex;justify-content:center}.VPDoc .aside[data-v-6b87e69f]{display:block}}@media (min-width: 1440px){.VPDoc:not(.has-sidebar) .content[data-v-6b87e69f]{max-width:784px}.VPDoc:not(.has-sidebar) .container[data-v-6b87e69f]{max-width:1104px}}.container[data-v-6b87e69f]{margin:0 auto;width:100%}.aside[data-v-6b87e69f]{position:relative;display:none;order:2;flex-grow:1;padding-left:32px;width:100%;max-width:256px}.left-aside[data-v-6b87e69f]{order:1;padding-left:unset;padding-right:32px}.aside-container[data-v-6b87e69f]{position:fixed;top:0;padding-top:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + var(--vp-doc-top-height, 0px) + 32px);width:224px;height:100vh;overflow-x:hidden;overflow-y:auto;scrollbar-width:none}.aside-container[data-v-6b87e69f]::-webkit-scrollbar{display:none}.aside-curtain[data-v-6b87e69f]{position:fixed;bottom:0;z-index:10;width:224px;height:32px;background:linear-gradient(transparent,var(--vp-c-bg) 70%)}.aside-content[data-v-6b87e69f]{display:flex;flex-direction:column;min-height:calc(100vh - (var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 32px));padding-bottom:32px}.content[data-v-6b87e69f]{position:relative;margin:0 auto;width:100%}@media (min-width: 960px){.content[data-v-6b87e69f]{padding:0 32px 128px}}@media (min-width: 1280px){.content[data-v-6b87e69f]{order:1;margin:0;min-width:640px}}.content-container[data-v-6b87e69f]{margin:0 auto}.VPDoc.has-aside .content-container[data-v-6b87e69f]{max-width:688px}.external-link-icon-enabled :is(.vp-doc a[href*="://"][data-v-6b87e69f],.vp-doc a[target=_blank][data-v-6b87e69f]):after{content:"";color:currentColor}.VPButton[data-v-c1c5efc1]{display:inline-block;border:1px solid transparent;text-align:center;font-weight:600;white-space:nowrap;transition:color .25s,border-color .25s,background-color .25s}.VPButton[data-v-c1c5efc1]:active{transition:color .1s,border-color .1s,background-color .1s}.VPButton.medium[data-v-c1c5efc1]{border-radius:20px;padding:0 20px;line-height:38px;font-size:14px}.VPButton.big[data-v-c1c5efc1]{border-radius:24px;padding:0 24px;line-height:46px;font-size:16px}.VPButton.brand[data-v-c1c5efc1]{border-color:var(--vp-button-brand-border);color:var(--vp-button-brand-text);background-color:var(--vp-button-brand-bg)}.VPButton.brand[data-v-c1c5efc1]:hover{border-color:var(--vp-button-brand-hover-border);color:var(--vp-button-brand-hover-text);background-color:var(--vp-button-brand-hover-bg)}.VPButton.brand[data-v-c1c5efc1]:active{border-color:var(--vp-button-brand-active-border);color:var(--vp-button-brand-active-text);background-color:var(--vp-button-brand-active-bg)}.VPButton.alt[data-v-c1c5efc1]{border-color:var(--vp-button-alt-border);color:var(--vp-button-alt-text);background-color:var(--vp-button-alt-bg)}.VPButton.alt[data-v-c1c5efc1]:hover{border-color:var(--vp-button-alt-hover-border);color:var(--vp-button-alt-hover-text);background-color:var(--vp-button-alt-hover-bg)}.VPButton.alt[data-v-c1c5efc1]:active{border-color:var(--vp-button-alt-active-border);color:var(--vp-button-alt-active-text);background-color:var(--vp-button-alt-active-bg)}.VPButton.sponsor[data-v-c1c5efc1]{border-color:var(--vp-button-sponsor-border);color:var(--vp-button-sponsor-text);background-color:var(--vp-button-sponsor-bg)}.VPButton.sponsor[data-v-c1c5efc1]:hover{border-color:var(--vp-button-sponsor-hover-border);color:var(--vp-button-sponsor-hover-text);background-color:var(--vp-button-sponsor-hover-bg)}.VPButton.sponsor[data-v-c1c5efc1]:active{border-color:var(--vp-button-sponsor-active-border);color:var(--vp-button-sponsor-active-text);background-color:var(--vp-button-sponsor-active-bg)}html:not(.dark) .VPImage.dark[data-v-8426fc1a]{display:none}.dark .VPImage.light[data-v-8426fc1a]{display:none}.VPHero[data-v-da5d1713]{margin-top:calc((var(--vp-nav-height) + var(--vp-layout-top-height, 0px)) * -1);padding:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 48px) 24px 48px}@media (min-width: 640px){.VPHero[data-v-da5d1713]{padding:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 80px) 48px 64px}}@media (min-width: 960px){.VPHero[data-v-da5d1713]{padding:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 80px) 64px 64px}}.container[data-v-da5d1713]{display:flex;flex-direction:column;margin:0 auto;max-width:1152px}@media (min-width: 960px){.container[data-v-da5d1713]{flex-direction:row}}.main[data-v-da5d1713]{position:relative;z-index:10;order:2;flex-grow:1;flex-shrink:0}.VPHero.has-image .container[data-v-da5d1713]{text-align:center}@media (min-width: 960px){.VPHero.has-image .container[data-v-da5d1713]{text-align:left}}@media (min-width: 960px){.main[data-v-da5d1713]{order:1;width:calc((100% / 3) * 2)}.VPHero.has-image .main[data-v-da5d1713]{max-width:592px}}.name[data-v-da5d1713],.text[data-v-da5d1713]{max-width:392px;letter-spacing:-.4px;line-height:40px;font-size:32px;font-weight:700;white-space:pre-wrap}.VPHero.has-image .name[data-v-da5d1713],.VPHero.has-image .text[data-v-da5d1713]{margin:0 auto}.name[data-v-da5d1713]{color:var(--vp-home-hero-name-color)}.clip[data-v-da5d1713]{background:var(--vp-home-hero-name-background);-webkit-background-clip:text;background-clip:text;-webkit-text-fill-color:var(--vp-home-hero-name-color)}@media (min-width: 640px){.name[data-v-da5d1713],.text[data-v-da5d1713]{max-width:576px;line-height:56px;font-size:48px}}@media (min-width: 960px){.name[data-v-da5d1713],.text[data-v-da5d1713]{line-height:64px;font-size:56px}.VPHero.has-image .name[data-v-da5d1713],.VPHero.has-image .text[data-v-da5d1713]{margin:0}}.tagline[data-v-da5d1713]{padding-top:8px;max-width:392px;line-height:28px;font-size:18px;font-weight:500;white-space:pre-wrap;color:var(--vp-c-text-2)}.VPHero.has-image .tagline[data-v-da5d1713]{margin:0 auto}@media (min-width: 640px){.tagline[data-v-da5d1713]{padding-top:12px;max-width:576px;line-height:32px;font-size:20px}}@media (min-width: 960px){.tagline[data-v-da5d1713]{line-height:36px;font-size:24px}.VPHero.has-image .tagline[data-v-da5d1713]{margin:0}}.actions[data-v-da5d1713]{display:flex;flex-wrap:wrap;margin:-6px;padding-top:24px}.VPHero.has-image .actions[data-v-da5d1713]{justify-content:center}@media (min-width: 640px){.actions[data-v-da5d1713]{padding-top:32px}}@media (min-width: 960px){.VPHero.has-image .actions[data-v-da5d1713]{justify-content:flex-start}}.action[data-v-da5d1713]{flex-shrink:0;padding:6px}.image[data-v-da5d1713]{order:1;margin:-76px -24px -48px}@media (min-width: 640px){.image[data-v-da5d1713]{margin:-108px -24px -48px}}@media (min-width: 960px){.image[data-v-da5d1713]{flex-grow:1;order:2;margin:0;min-height:100%}}.image-container[data-v-da5d1713]{position:relative;margin:0 auto;width:320px;height:320px}@media (min-width: 640px){.image-container[data-v-da5d1713]{width:392px;height:392px}}@media (min-width: 960px){.image-container[data-v-da5d1713]{display:flex;justify-content:center;align-items:center;width:100%;height:100%;transform:translate(-32px,-32px)}}.image-bg[data-v-da5d1713]{position:absolute;top:50%;left:50%;border-radius:50%;width:192px;height:192px;background-image:var(--vp-home-hero-image-background-image);filter:var(--vp-home-hero-image-filter);transform:translate(-50%,-50%)}@media (min-width: 640px){.image-bg[data-v-da5d1713]{width:256px;height:256px}}@media (min-width: 960px){.image-bg[data-v-da5d1713]{width:320px;height:320px}}[data-v-da5d1713] .image-src{position:absolute;top:50%;left:50%;max-width:192px;max-height:192px;transform:translate(-50%,-50%)}@media (min-width: 640px){[data-v-da5d1713] .image-src{max-width:256px;max-height:256px}}@media (min-width: 960px){[data-v-da5d1713] .image-src{max-width:320px;max-height:320px}}.VPFeature[data-v-33204567]{display:block;border:1px solid var(--vp-c-bg-soft);border-radius:12px;height:100%;background-color:var(--vp-c-bg-soft);transition:border-color .25s,background-color .25s}.VPFeature.link[data-v-33204567]:hover{border-color:var(--vp-c-brand-1)}.box[data-v-33204567]{display:flex;flex-direction:column;padding:24px;height:100%}.box[data-v-33204567]>.VPImage{margin-bottom:20px}.icon[data-v-33204567]{display:flex;justify-content:center;align-items:center;margin-bottom:20px;border-radius:6px;background-color:var(--vp-c-default-soft);width:48px;height:48px;font-size:24px;transition:background-color .25s}.title[data-v-33204567]{line-height:24px;font-size:16px;font-weight:600}.details[data-v-33204567]{flex-grow:1;padding-top:8px;line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}.link-text[data-v-33204567]{padding-top:8px}.link-text-value[data-v-33204567]{display:flex;align-items:center;font-size:14px;font-weight:500;color:var(--vp-c-brand-1)}.link-text-icon[data-v-33204567]{display:inline-block;margin-left:6px;width:14px;height:14px;fill:currentColor}.VPFeatures[data-v-a6181336]{position:relative;padding:0 24px}@media (min-width: 640px){.VPFeatures[data-v-a6181336]{padding:0 48px}}@media (min-width: 960px){.VPFeatures[data-v-a6181336]{padding:0 64px}}.container[data-v-a6181336]{margin:0 auto;max-width:1152px}.items[data-v-a6181336]{display:flex;flex-wrap:wrap;margin:-8px}.item[data-v-a6181336]{padding:8px;width:100%}@media (min-width: 640px){.item.grid-2[data-v-a6181336],.item.grid-4[data-v-a6181336],.item.grid-6[data-v-a6181336]{width:50%}}@media (min-width: 768px){.item.grid-2[data-v-a6181336],.item.grid-4[data-v-a6181336]{width:50%}.item.grid-3[data-v-a6181336],.item.grid-6[data-v-a6181336]{width:calc(100% / 3)}}@media (min-width: 960px){.item.grid-4[data-v-a6181336]{width:25%}}.VPHome[data-v-d82743a8]{padding-bottom:96px}.VPHome[data-v-d82743a8] .VPHomeSponsors{margin-top:112px;margin-bottom:-128px}@media (min-width: 768px){.VPHome[data-v-d82743a8]{padding-bottom:128px}}.VPContent[data-v-669faec9]{flex-grow:1;flex-shrink:0;margin:var(--vp-layout-top-height, 0px) auto 0;width:100%}.VPContent.is-home[data-v-669faec9]{width:100%;max-width:100%}.VPContent.has-sidebar[data-v-669faec9]{margin:0}@media (min-width: 960px){.VPContent[data-v-669faec9]{padding-top:var(--vp-nav-height)}.VPContent.has-sidebar[data-v-669faec9]{margin:var(--vp-layout-top-height, 0px) 0 0;padding-left:var(--vp-sidebar-width)}}@media (min-width: 1440px){.VPContent.has-sidebar[data-v-669faec9]{padding-right:calc((100vw - var(--vp-layout-max-width)) / 2);padding-left:calc((100vw - var(--vp-layout-max-width)) / 2 + var(--vp-sidebar-width))}}.VPFooter[data-v-e315a0ad]{position:relative;z-index:var(--vp-z-index-footer);border-top:1px solid var(--vp-c-gutter);padding:32px 24px;background-color:var(--vp-c-bg)}.VPFooter.has-sidebar[data-v-e315a0ad]{display:none}.VPFooter[data-v-e315a0ad] a{text-decoration-line:underline;text-underline-offset:2px;transition:color .25s}.VPFooter[data-v-e315a0ad] a:hover{color:var(--vp-c-text-1)}@media (min-width: 768px){.VPFooter[data-v-e315a0ad]{padding:32px}}.container[data-v-e315a0ad]{margin:0 auto;max-width:var(--vp-layout-max-width);text-align:center}.message[data-v-e315a0ad],.copyright[data-v-e315a0ad]{line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}.VPLocalNavOutlineDropdown[data-v-1c15a60a]{padding:12px 20px 11px}.VPLocalNavOutlineDropdown button[data-v-1c15a60a]{display:block;font-size:12px;font-weight:500;line-height:24px;color:var(--vp-c-text-2);transition:color .5s;position:relative}.VPLocalNavOutlineDropdown button[data-v-1c15a60a]:hover{color:var(--vp-c-text-1);transition:color .25s}.VPLocalNavOutlineDropdown button.open[data-v-1c15a60a]{color:var(--vp-c-text-1)}.icon[data-v-1c15a60a]{display:inline-block;vertical-align:middle;margin-left:2px;width:14px;height:14px;fill:currentColor}[data-v-1c15a60a] .outline-link{font-size:14px;padding:2px 0}.open>.icon[data-v-1c15a60a]{transform:rotate(90deg)}.items[data-v-1c15a60a]{position:absolute;top:64px;right:16px;left:16px;display:grid;gap:1px;border:1px solid var(--vp-c-border);border-radius:8px;background-color:var(--vp-c-gutter);max-height:calc(var(--vp-vh, 100vh) - 86px);overflow:hidden auto;box-shadow:var(--vp-shadow-3)}.header[data-v-1c15a60a]{background-color:var(--vp-c-bg-soft)}.top-link[data-v-1c15a60a]{display:block;padding:0 16px;line-height:48px;font-size:14px;font-weight:500;color:var(--vp-c-brand-1)}.outline[data-v-1c15a60a]{padding:8px 0;background-color:var(--vp-c-bg-soft)}.flyout-enter-active[data-v-1c15a60a]{transition:all .2s ease-out}.flyout-leave-active[data-v-1c15a60a]{transition:all .15s ease-in}.flyout-enter-from[data-v-1c15a60a],.flyout-leave-to[data-v-1c15a60a]{opacity:0;transform:translateY(-16px)}.VPLocalNav[data-v-f84a0989]{position:sticky;top:0;left:0;z-index:var(--vp-z-index-local-nav);display:flex;justify-content:space-between;align-items:center;border-top:1px solid var(--vp-c-gutter);border-bottom:1px solid var(--vp-c-gutter);padding-top:var(--vp-layout-top-height, 0px);width:100%;background-color:var(--vp-local-nav-bg-color)}.VPLocalNav.fixed[data-v-f84a0989]{position:fixed}.VPLocalNav.reached-top[data-v-f84a0989]{border-top-color:transparent}@media (min-width: 960px){.VPLocalNav[data-v-f84a0989]{display:none}}.menu[data-v-f84a0989]{display:flex;align-items:center;padding:12px 24px 11px;line-height:24px;font-size:12px;font-weight:500;color:var(--vp-c-text-2);transition:color .5s}.menu[data-v-f84a0989]:hover{color:var(--vp-c-text-1);transition:color .25s}@media (min-width: 768px){.menu[data-v-f84a0989]{padding:0 32px}}.menu-icon[data-v-f84a0989]{margin-right:8px;width:16px;height:16px;fill:currentColor}.VPOutlineDropdown[data-v-f84a0989]{padding:12px 24px 11px}@media (min-width: 768px){.VPOutlineDropdown[data-v-f84a0989]{padding:12px 32px 11px}}.VPSwitch[data-v-b1685198]{position:relative;border-radius:11px;display:block;width:40px;height:22px;flex-shrink:0;border:1px solid var(--vp-input-border-color);background-color:var(--vp-input-switch-bg-color);transition:border-color .25s!important}.VPSwitch[data-v-b1685198]:hover{border-color:var(--vp-c-brand-1)}.check[data-v-b1685198]{position:absolute;top:1px;left:1px;width:18px;height:18px;border-radius:50%;background-color:var(--vp-c-neutral-inverse);box-shadow:var(--vp-shadow-1);transition:transform .25s!important}.icon[data-v-b1685198]{position:relative;display:block;width:18px;height:18px;border-radius:50%;overflow:hidden}.icon[data-v-b1685198] svg{position:absolute;top:3px;left:3px;width:12px;height:12px;fill:var(--vp-c-text-2)}.dark .icon[data-v-b1685198] svg{fill:var(--vp-c-text-1);transition:opacity .25s!important}.sun[data-v-cbbe1149]{opacity:1}.moon[data-v-cbbe1149],.dark .sun[data-v-cbbe1149]{opacity:0}.dark .moon[data-v-cbbe1149]{opacity:1}.dark .VPSwitchAppearance[data-v-cbbe1149] .check{transform:translate(18px)}.VPNavBarAppearance[data-v-e6aabb21]{display:none}@media (min-width: 1280px){.VPNavBarAppearance[data-v-e6aabb21]{display:flex;align-items:center}}.VPMenuGroup+.VPMenuLink[data-v-43f1e123]{margin:12px -12px 0;border-top:1px solid var(--vp-c-divider);padding:12px 12px 0}.link[data-v-43f1e123]{display:block;border-radius:6px;padding:0 12px;line-height:32px;font-size:14px;font-weight:500;color:var(--vp-c-text-1);white-space:nowrap;transition:background-color .25s,color .25s}.link[data-v-43f1e123]:hover{color:var(--vp-c-brand-1);background-color:var(--vp-c-default-soft)}.link.active[data-v-43f1e123]{color:var(--vp-c-brand-1)}.VPMenuGroup[data-v-69e747b5]{margin:12px -12px 0;border-top:1px solid var(--vp-c-divider);padding:12px 12px 0}.VPMenuGroup[data-v-69e747b5]:first-child{margin-top:0;border-top:0;padding-top:0}.VPMenuGroup+.VPMenuGroup[data-v-69e747b5]{margin-top:12px;border-top:1px solid var(--vp-c-divider)}.title[data-v-69e747b5]{padding:0 12px;line-height:32px;font-size:14px;font-weight:600;color:var(--vp-c-text-2);white-space:nowrap;transition:color .25s}.VPMenu[data-v-e7ea1737]{border-radius:12px;padding:12px;min-width:128px;border:1px solid var(--vp-c-divider);background-color:var(--vp-c-bg-elv);box-shadow:var(--vp-shadow-3);transition:background-color .5s;max-height:calc(100vh - var(--vp-nav-height));overflow-y:auto}.VPMenu[data-v-e7ea1737] .group{margin:0 -12px;padding:0 12px 12px}.VPMenu[data-v-e7ea1737] .group+.group{border-top:1px solid var(--vp-c-divider);padding:11px 12px 12px}.VPMenu[data-v-e7ea1737] .group:last-child{padding-bottom:0}.VPMenu[data-v-e7ea1737] .group+.item{border-top:1px solid var(--vp-c-divider);padding:11px 16px 0}.VPMenu[data-v-e7ea1737] .item{padding:0 16px;white-space:nowrap}.VPMenu[data-v-e7ea1737] .label{flex-grow:1;line-height:28px;font-size:12px;font-weight:500;color:var(--vp-c-text-2);transition:color .5s}.VPMenu[data-v-e7ea1737] .action{padding-left:24px}.VPFlyout[data-v-9c007e85]{position:relative}.VPFlyout[data-v-9c007e85]:hover{color:var(--vp-c-brand-1);transition:color .25s}.VPFlyout:hover .text[data-v-9c007e85]{color:var(--vp-c-text-2)}.VPFlyout:hover .icon[data-v-9c007e85]{fill:var(--vp-c-text-2)}.VPFlyout.active .text[data-v-9c007e85]{color:var(--vp-c-brand-1)}.VPFlyout.active:hover .text[data-v-9c007e85]{color:var(--vp-c-brand-2)}.VPFlyout:hover .menu[data-v-9c007e85],.button[aria-expanded=true]+.menu[data-v-9c007e85]{opacity:1;visibility:visible;transform:translateY(0)}.button[aria-expanded=false]+.menu[data-v-9c007e85]{opacity:0;visibility:hidden;transform:translateY(0)}.button[data-v-9c007e85]{display:flex;align-items:center;padding:0 12px;height:var(--vp-nav-height);color:var(--vp-c-text-1);transition:color .5s}.text[data-v-9c007e85]{display:flex;align-items:center;line-height:var(--vp-nav-height);font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:color .25s}.option-icon[data-v-9c007e85]{margin-right:0;width:16px;height:16px;fill:currentColor}.text-icon[data-v-9c007e85]{margin-left:4px;width:14px;height:14px;fill:currentColor}.icon[data-v-9c007e85]{width:20px;height:20px;fill:currentColor;transition:fill .25s}.menu[data-v-9c007e85]{position:absolute;top:calc(var(--vp-nav-height) / 2 + 20px);right:0;opacity:0;visibility:hidden;transition:opacity .25s,visibility .25s,transform .25s}.VPSocialLink[data-v-f80f8133]{display:flex;justify-content:center;align-items:center;width:36px;height:36px;color:var(--vp-c-text-2);transition:color .5s}.VPSocialLink[data-v-f80f8133]:hover{color:var(--vp-c-text-1);transition:color .25s}.VPSocialLink[data-v-f80f8133]>svg{width:20px;height:20px;fill:currentColor}.VPSocialLinks[data-v-7bc22406]{display:flex;justify-content:center}.VPNavBarExtra[data-v-d0bd9dde]{display:none;margin-right:-12px}@media (min-width: 768px){.VPNavBarExtra[data-v-d0bd9dde]{display:block}}@media (min-width: 1280px){.VPNavBarExtra[data-v-d0bd9dde]{display:none}}.trans-title[data-v-d0bd9dde]{padding:0 24px 0 12px;line-height:32px;font-size:14px;font-weight:700;color:var(--vp-c-text-1)}.item.appearance[data-v-d0bd9dde],.item.social-links[data-v-d0bd9dde]{display:flex;align-items:center;padding:0 12px}.item.appearance[data-v-d0bd9dde]{min-width:176px}.appearance-action[data-v-d0bd9dde]{margin-right:-2px}.social-links-list[data-v-d0bd9dde]{margin:-4px -8px}.VPNavBarHamburger[data-v-e5dd9c1c]{display:flex;justify-content:center;align-items:center;width:48px;height:var(--vp-nav-height)}@media (min-width: 768px){.VPNavBarHamburger[data-v-e5dd9c1c]{display:none}}.container[data-v-e5dd9c1c]{position:relative;width:16px;height:14px;overflow:hidden}.VPNavBarHamburger:hover .top[data-v-e5dd9c1c]{top:0;left:0;transform:translate(4px)}.VPNavBarHamburger:hover .middle[data-v-e5dd9c1c]{top:6px;left:0;transform:translate(0)}.VPNavBarHamburger:hover .bottom[data-v-e5dd9c1c]{top:12px;left:0;transform:translate(8px)}.VPNavBarHamburger.active .top[data-v-e5dd9c1c]{top:6px;transform:translate(0) rotate(225deg)}.VPNavBarHamburger.active .middle[data-v-e5dd9c1c]{top:6px;transform:translate(16px)}.VPNavBarHamburger.active .bottom[data-v-e5dd9c1c]{top:6px;transform:translate(0) rotate(135deg)}.VPNavBarHamburger.active:hover .top[data-v-e5dd9c1c],.VPNavBarHamburger.active:hover .middle[data-v-e5dd9c1c],.VPNavBarHamburger.active:hover .bottom[data-v-e5dd9c1c]{background-color:var(--vp-c-text-2);transition:top .25s,background-color .25s,transform .25s}.top[data-v-e5dd9c1c],.middle[data-v-e5dd9c1c],.bottom[data-v-e5dd9c1c]{position:absolute;width:16px;height:2px;background-color:var(--vp-c-text-1);transition:top .25s,background-color .5s,transform .25s}.top[data-v-e5dd9c1c]{top:0;left:0;transform:translate(0)}.middle[data-v-e5dd9c1c]{top:6px;left:0;transform:translate(8px)}.bottom[data-v-e5dd9c1c]{top:12px;left:0;transform:translate(4px)}.VPNavBarMenuLink[data-v-42ef59de]{display:flex;align-items:center;padding:0 12px;line-height:var(--vp-nav-height);font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:color .25s}.VPNavBarMenuLink.active[data-v-42ef59de],.VPNavBarMenuLink[data-v-42ef59de]:hover{color:var(--vp-c-brand-1)}.VPNavBarMenu[data-v-7f418b0f]{display:none}@media (min-width: 768px){.VPNavBarMenu[data-v-7f418b0f]{display:flex}}/*! @docsearch/css 3.5.2 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com */:root{--docsearch-primary-color:#5468ff;--docsearch-text-color:#1c1e21;--docsearch-spacing:12px;--docsearch-icon-stroke-width:1.4;--docsearch-highlight-color:var(--docsearch-primary-color);--docsearch-muted-color:#969faf;--docsearch-container-background:rgba(101,108,133,.8);--docsearch-logo-color:#5468ff;--docsearch-modal-width:560px;--docsearch-modal-height:600px;--docsearch-modal-background:#f5f6f7;--docsearch-modal-shadow:inset 1px 1px 0 0 hsla(0,0%,100%,.5),0 3px 8px 0 #555a64;--docsearch-searchbox-height:56px;--docsearch-searchbox-background:#ebedf0;--docsearch-searchbox-focus-background:#fff;--docsearch-searchbox-shadow:inset 0 0 0 2px var(--docsearch-primary-color);--docsearch-hit-height:56px;--docsearch-hit-color:#444950;--docsearch-hit-active-color:#fff;--docsearch-hit-background:#fff;--docsearch-hit-shadow:0 1px 3px 0 #d4d9e1;--docsearch-key-gradient:linear-gradient(-225deg,#d5dbe4,#f8f8f8);--docsearch-key-shadow:inset 0 -2px 0 0 #cdcde6,inset 0 0 1px 1px #fff,0 1px 2px 1px rgba(30,35,90,.4);--docsearch-footer-height:44px;--docsearch-footer-background:#fff;--docsearch-footer-shadow:0 -1px 0 0 #e0e3e8,0 -3px 6px 0 rgba(69,98,155,.12)}html[data-theme=dark]{--docsearch-text-color:#f5f6f7;--docsearch-container-background:rgba(9,10,17,.8);--docsearch-modal-background:#15172a;--docsearch-modal-shadow:inset 1px 1px 0 0 #2c2e40,0 3px 8px 0 #000309;--docsearch-searchbox-background:#090a11;--docsearch-searchbox-focus-background:#000;--docsearch-hit-color:#bec3c9;--docsearch-hit-shadow:none;--docsearch-hit-background:#090a11;--docsearch-key-gradient:linear-gradient(-26.5deg,#565872,#31355b);--docsearch-key-shadow:inset 0 -2px 0 0 #282d55,inset 0 0 1px 1px #51577d,0 2px 2px 0 rgba(3,4,9,.3);--docsearch-footer-background:#1e2136;--docsearch-footer-shadow:inset 0 1px 0 0 rgba(73,76,106,.5),0 -4px 8px 0 rgba(0,0,0,.2);--docsearch-logo-color:#fff;--docsearch-muted-color:#7f8497}.DocSearch-Button{align-items:center;background:var(--docsearch-searchbox-background);border:0;border-radius:40px;color:var(--docsearch-muted-color);cursor:pointer;display:flex;font-weight:500;height:36px;justify-content:space-between;margin:0 0 0 16px;padding:0 8px;-webkit-user-select:none;user-select:none}.DocSearch-Button:active,.DocSearch-Button:focus,.DocSearch-Button:hover{background:var(--docsearch-searchbox-focus-background);box-shadow:var(--docsearch-searchbox-shadow);color:var(--docsearch-text-color);outline:none}.DocSearch-Button-Container{align-items:center;display:flex}.DocSearch-Search-Icon{stroke-width:1.6}.DocSearch-Button .DocSearch-Search-Icon{color:var(--docsearch-text-color)}.DocSearch-Button-Placeholder{font-size:1rem;padding:0 12px 0 6px}.DocSearch-Button-Keys{display:flex;min-width:calc(40px + .8em)}.DocSearch-Button-Key{align-items:center;background:var(--docsearch-key-gradient);border-radius:3px;box-shadow:var(--docsearch-key-shadow);color:var(--docsearch-muted-color);display:flex;height:18px;justify-content:center;margin-right:.4em;position:relative;padding:0 0 2px;border:0;top:-1px;width:20px}@media (max-width:768px){.DocSearch-Button-Keys,.DocSearch-Button-Placeholder{display:none}}.DocSearch--active{overflow:hidden!important}.DocSearch-Container,.DocSearch-Container *{box-sizing:border-box}.DocSearch-Container{background-color:var(--docsearch-container-background);height:100vh;left:0;position:fixed;top:0;width:100vw;z-index:200}.DocSearch-Container a{text-decoration:none}.DocSearch-Link{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;font:inherit;margin:0;padding:0}.DocSearch-Modal{background:var(--docsearch-modal-background);border-radius:6px;box-shadow:var(--docsearch-modal-shadow);flex-direction:column;margin:60px auto auto;max-width:var(--docsearch-modal-width);position:relative}.DocSearch-SearchBar{display:flex;padding:var(--docsearch-spacing) var(--docsearch-spacing) 0}.DocSearch-Form{align-items:center;background:var(--docsearch-searchbox-focus-background);border-radius:4px;box-shadow:var(--docsearch-searchbox-shadow);display:flex;height:var(--docsearch-searchbox-height);margin:0;padding:0 var(--docsearch-spacing);position:relative;width:100%}.DocSearch-Input{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:transparent;border:0;color:var(--docsearch-text-color);flex:1;font:inherit;font-size:1.2em;height:100%;outline:none;padding:0 0 0 8px;width:80%}.DocSearch-Input::placeholder{color:var(--docsearch-muted-color);opacity:1}.DocSearch-Input::-webkit-search-cancel-button,.DocSearch-Input::-webkit-search-decoration,.DocSearch-Input::-webkit-search-results-button,.DocSearch-Input::-webkit-search-results-decoration{display:none}.DocSearch-LoadingIndicator,.DocSearch-MagnifierLabel,.DocSearch-Reset{margin:0;padding:0}.DocSearch-MagnifierLabel,.DocSearch-Reset{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}.DocSearch-Container--Stalled .DocSearch-MagnifierLabel,.DocSearch-LoadingIndicator{display:none}.DocSearch-Container--Stalled .DocSearch-LoadingIndicator{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Reset{animation:none;-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;right:0;stroke-width:var(--docsearch-icon-stroke-width)}}.DocSearch-Reset{animation:fade-in .1s ease-in forwards;-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;padding:2px;right:0;stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Reset[hidden]{display:none}.DocSearch-Reset:hover{color:var(--docsearch-highlight-color)}.DocSearch-LoadingIndicator svg,.DocSearch-MagnifierLabel svg{height:24px;width:24px}.DocSearch-Cancel{display:none}.DocSearch-Dropdown{max-height:calc(var(--docsearch-modal-height) - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height));min-height:var(--docsearch-spacing);overflow-y:auto;overflow-y:overlay;padding:0 var(--docsearch-spacing);scrollbar-color:var(--docsearch-muted-color) var(--docsearch-modal-background);scrollbar-width:thin}.DocSearch-Dropdown::-webkit-scrollbar{width:12px}.DocSearch-Dropdown::-webkit-scrollbar-track{background:transparent}.DocSearch-Dropdown::-webkit-scrollbar-thumb{background-color:var(--docsearch-muted-color);border:3px solid var(--docsearch-modal-background);border-radius:20px}.DocSearch-Dropdown ul{list-style:none;margin:0;padding:0}.DocSearch-Label{font-size:.75em;line-height:1.6em}.DocSearch-Help,.DocSearch-Label{color:var(--docsearch-muted-color)}.DocSearch-Help{font-size:.9em;margin:0;-webkit-user-select:none;user-select:none}.DocSearch-Title{font-size:1.2em}.DocSearch-Logo a{display:flex}.DocSearch-Logo svg{color:var(--docsearch-logo-color);margin-left:8px}.DocSearch-Hits:last-of-type{margin-bottom:24px}.DocSearch-Hits mark{background:none;color:var(--docsearch-highlight-color)}.DocSearch-HitsFooter{color:var(--docsearch-muted-color);display:flex;font-size:.85em;justify-content:center;margin-bottom:var(--docsearch-spacing);padding:var(--docsearch-spacing)}.DocSearch-HitsFooter a{border-bottom:1px solid;color:inherit}.DocSearch-Hit{border-radius:4px;display:flex;padding-bottom:4px;position:relative}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit--deleting{transition:none}}.DocSearch-Hit--deleting{opacity:0;transition:all .25s linear}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit--favoriting{transition:none}}.DocSearch-Hit--favoriting{transform:scale(0);transform-origin:top center;transition:all .25s linear;transition-delay:.25s}.DocSearch-Hit a{background:var(--docsearch-hit-background);border-radius:4px;box-shadow:var(--docsearch-hit-shadow);display:block;padding-left:var(--docsearch-spacing);width:100%}.DocSearch-Hit-source{background:var(--docsearch-modal-background);color:var(--docsearch-highlight-color);font-size:.85em;font-weight:600;line-height:32px;margin:0 -4px;padding:8px 4px 0;position:sticky;top:0;z-index:10}.DocSearch-Hit-Tree{color:var(--docsearch-muted-color);height:var(--docsearch-hit-height);opacity:.5;stroke-width:var(--docsearch-icon-stroke-width);width:24px}.DocSearch-Hit[aria-selected=true] a{background-color:var(--docsearch-highlight-color)}.DocSearch-Hit[aria-selected=true] mark{text-decoration:underline}.DocSearch-Hit-Container{align-items:center;color:var(--docsearch-hit-color);display:flex;flex-direction:row;height:var(--docsearch-hit-height);padding:0 var(--docsearch-spacing) 0 0}.DocSearch-Hit-icon{height:20px;width:20px}.DocSearch-Hit-action,.DocSearch-Hit-icon{color:var(--docsearch-muted-color);stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Hit-action{align-items:center;display:flex;height:22px;width:22px}.DocSearch-Hit-action svg{display:block;height:18px;width:18px}.DocSearch-Hit-action+.DocSearch-Hit-action{margin-left:6px}.DocSearch-Hit-action-button{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:50%;color:inherit;cursor:pointer;padding:2px}svg.DocSearch-Hit-Select-Icon{display:none}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Select-Icon{display:block}.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:#0003;transition:background-color .1s ease-in}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{transition:none}}.DocSearch-Hit-action-button:focus path,.DocSearch-Hit-action-button:hover path{fill:#fff}.DocSearch-Hit-content-wrapper{display:flex;flex:1 1 auto;flex-direction:column;font-weight:500;justify-content:center;line-height:1.2em;margin:0 8px;overflow-x:hidden;position:relative;text-overflow:ellipsis;white-space:nowrap;width:80%}.DocSearch-Hit-title{font-size:.9em}.DocSearch-Hit-path{color:var(--docsearch-muted-color);font-size:.75em}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-action,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-icon,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-path,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-text,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-title,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Tree,.DocSearch-Hit[aria-selected=true] mark{color:var(--docsearch-hit-active-color)!important}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:#0003;transition:none}}.DocSearch-ErrorScreen,.DocSearch-NoResults,.DocSearch-StartScreen{font-size:.9em;margin:0 auto;padding:36px 0;text-align:center;width:80%}.DocSearch-Screen-Icon{color:var(--docsearch-muted-color);padding-bottom:12px}.DocSearch-NoResults-Prefill-List{display:inline-block;padding-bottom:24px;text-align:left}.DocSearch-NoResults-Prefill-List ul{display:inline-block;padding:8px 0 0}.DocSearch-NoResults-Prefill-List li{list-style-position:inside;list-style-type:"» "}.DocSearch-Prefill{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:1em;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;font-size:1em;font-weight:700;padding:0}.DocSearch-Prefill:focus,.DocSearch-Prefill:hover{outline:none;text-decoration:underline}.DocSearch-Footer{align-items:center;background:var(--docsearch-footer-background);border-radius:0 0 8px 8px;box-shadow:var(--docsearch-footer-shadow);display:flex;flex-direction:row-reverse;flex-shrink:0;height:var(--docsearch-footer-height);justify-content:space-between;padding:0 var(--docsearch-spacing);position:relative;-webkit-user-select:none;user-select:none;width:100%;z-index:300}.DocSearch-Commands{color:var(--docsearch-muted-color);display:flex;list-style:none;margin:0;padding:0}.DocSearch-Commands li{align-items:center;display:flex}.DocSearch-Commands li:not(:last-of-type){margin-right:.8em}.DocSearch-Commands-Key{align-items:center;background:var(--docsearch-key-gradient);border-radius:2px;box-shadow:var(--docsearch-key-shadow);display:flex;height:18px;justify-content:center;margin-right:.4em;padding:0 0 1px;color:var(--docsearch-muted-color);border:0;width:20px}@media (max-width:768px){:root{--docsearch-spacing:10px;--docsearch-footer-height:40px}.DocSearch-Dropdown{height:100%}.DocSearch-Container{height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh)*100);position:absolute}.DocSearch-Footer{border-radius:0;bottom:0;position:absolute}.DocSearch-Hit-content-wrapper{display:flex;position:relative;width:80%}.DocSearch-Modal{border-radius:0;box-shadow:none;height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh)*100);margin:0;max-width:100%;width:100%}.DocSearch-Dropdown{max-height:calc(var(--docsearch-vh, 1vh)*100 - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height))}.DocSearch-Cancel{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;flex:none;font:inherit;font-size:1em;font-weight:500;margin-left:var(--docsearch-spacing);outline:none;overflow:hidden;padding:0;-webkit-user-select:none;user-select:none;white-space:nowrap}.DocSearch-Commands,.DocSearch-Hit-Tree{display:none}}@keyframes fade-in{0%{opacity:0}to{opacity:1}}[class*=DocSearch]{--docsearch-primary-color: var(--vp-c-brand-1);--docsearch-highlight-color: var(--docsearch-primary-color);--docsearch-text-color: var(--vp-c-text-1);--docsearch-muted-color: var(--vp-c-text-2);--docsearch-searchbox-shadow: none;--docsearch-searchbox-background: transparent;--docsearch-searchbox-focus-background: transparent;--docsearch-key-gradient: transparent;--docsearch-key-shadow: none;--docsearch-modal-background: var(--vp-c-bg-soft);--docsearch-footer-background: var(--vp-c-bg)}.dark [class*=DocSearch]{--docsearch-modal-shadow: none;--docsearch-footer-shadow: none;--docsearch-logo-color: var(--vp-c-text-2);--docsearch-hit-background: var(--vp-c-default-soft);--docsearch-hit-color: var(--vp-c-text-2);--docsearch-hit-shadow: none}.DocSearch-Button{display:flex;justify-content:center;align-items:center;margin:0;padding:0;width:48px;height:55px;background:transparent;transition:border-color .25s}.DocSearch-Button:hover{background:transparent}.DocSearch-Button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}.DocSearch-Button:focus:not(:focus-visible){outline:none!important}@media (min-width: 768px){.DocSearch-Button{justify-content:flex-start;border:1px solid transparent;border-radius:8px;padding:0 10px 0 12px;width:100%;height:40px;background-color:var(--vp-c-bg-alt)}.DocSearch-Button:hover{border-color:var(--vp-c-brand-1);background:var(--vp-c-bg-alt)}}.DocSearch-Button .DocSearch-Button-Container{display:flex;align-items:center}.DocSearch-Button .DocSearch-Search-Icon{position:relative;width:16px;height:16px;color:var(--vp-c-text-1);fill:currentColor;transition:color .5s}.DocSearch-Button:hover .DocSearch-Search-Icon{color:var(--vp-c-text-1)}@media (min-width: 768px){.DocSearch-Button .DocSearch-Search-Icon{top:1px;margin-right:8px;width:14px;height:14px;color:var(--vp-c-text-2)}}.DocSearch-Button .DocSearch-Button-Placeholder{display:none;margin-top:2px;padding:0 16px 0 0;font-size:13px;font-weight:500;color:var(--vp-c-text-2);transition:color .5s}.DocSearch-Button:hover .DocSearch-Button-Placeholder{color:var(--vp-c-text-1)}@media (min-width: 768px){.DocSearch-Button .DocSearch-Button-Placeholder{display:inline-block}}.DocSearch-Button .DocSearch-Button-Keys{direction:ltr;display:none;min-width:auto}@media (min-width: 768px){.DocSearch-Button .DocSearch-Button-Keys{display:flex;align-items:center}}.DocSearch-Button .DocSearch-Button-Key{display:block;margin:2px 0 0;border:1px solid var(--vp-c-divider);border-right:none;border-radius:4px 0 0 4px;padding-left:6px;min-width:0;width:auto;height:22px;line-height:22px;font-family:var(--vp-font-family-base);font-size:12px;font-weight:500;transition:color .5s,border-color .5s}.DocSearch-Button .DocSearch-Button-Key+.DocSearch-Button-Key{border-right:1px solid var(--vp-c-divider);border-left:none;border-radius:0 4px 4px 0;padding-left:2px;padding-right:6px}.DocSearch-Button .DocSearch-Button-Key:first-child{font-size:0!important}.DocSearch-Button .DocSearch-Button-Key:first-child:after{content:"Ctrl";font-size:12px;letter-spacing:normal;color:var(--docsearch-muted-color)}.mac .DocSearch-Button .DocSearch-Button-Key:first-child:after{content:"⌘"}.DocSearch-Button .DocSearch-Button-Key:first-child>*{display:none}.VPNavBarSearch{display:flex;align-items:center}@media (min-width: 768px){.VPNavBarSearch{flex-grow:1;padding-left:24px}}@media (min-width: 960px){.VPNavBarSearch{padding-left:32px}}.dark .DocSearch-Footer{border-top:1px solid var(--vp-c-divider)}.DocSearch-Form{border:1px solid var(--vp-c-brand-1);background-color:var(--vp-c-white)}.dark .DocSearch-Form{background-color:var(--vp-c-default-soft)}.DocSearch-Screen-Icon>svg{margin:auto}.VPNavBarSocialLinks[data-v-0394ad82]{display:none}@media (min-width: 1280px){.VPNavBarSocialLinks[data-v-0394ad82]{display:flex;align-items:center}}.title[data-v-86d1bed8]{display:flex;align-items:center;border-bottom:1px solid transparent;width:100%;height:var(--vp-nav-height);font-size:16px;font-weight:600;color:var(--vp-c-text-1);transition:opacity .25s}@media (min-width: 960px){.title[data-v-86d1bed8]{flex-shrink:0}.VPNavBarTitle.has-sidebar .title[data-v-86d1bed8]{border-bottom-color:var(--vp-c-divider)}}[data-v-86d1bed8] .logo{margin-right:8px;height:var(--vp-nav-logo-height)}.VPNavBarTranslations[data-v-74abcbb9]{display:none}@media (min-width: 1280px){.VPNavBarTranslations[data-v-74abcbb9]{display:flex;align-items:center}}.title[data-v-74abcbb9]{padding:0 24px 0 12px;line-height:32px;font-size:14px;font-weight:700;color:var(--vp-c-text-1)}.VPNavBar[data-v-d83f3580]{position:relative;border-bottom:1px solid transparent;padding:0 8px 0 24px;height:var(--vp-nav-height);pointer-events:none;white-space:nowrap}@media (min-width: 768px){.VPNavBar[data-v-d83f3580]{padding:0 32px}}@media (min-width: 960px){.VPNavBar.has-sidebar[data-v-d83f3580]{padding:0}.VPNavBar[data-v-d83f3580]:not(.has-sidebar):not(.top){border-bottom-color:var(--vp-c-gutter);background-color:var(--vp-nav-bg-color)}}.container[data-v-d83f3580]{display:flex;justify-content:space-between;margin:0 auto;max-width:calc(var(--vp-layout-max-width) - 64px);height:var(--vp-nav-height);pointer-events:none}.container>.title[data-v-d83f3580],.container>.content[data-v-d83f3580]{pointer-events:none}.container[data-v-d83f3580] *{pointer-events:auto}@media (min-width: 960px){.VPNavBar.has-sidebar .container[data-v-d83f3580]{max-width:100%}}.title[data-v-d83f3580]{flex-shrink:0;height:calc(var(--vp-nav-height) - 1px);transition:background-color .5s}@media (min-width: 960px){.VPNavBar.has-sidebar .title[data-v-d83f3580]{position:absolute;top:0;left:0;z-index:2;padding:0 32px;width:var(--vp-sidebar-width);height:var(--vp-nav-height);background-color:transparent}}@media (min-width: 1440px){.VPNavBar.has-sidebar .title[data-v-d83f3580]{padding-left:max(32px,calc((100% - (var(--vp-layout-max-width) - 64px)) / 2));width:calc((100% - (var(--vp-layout-max-width) - 64px)) / 2 + var(--vp-sidebar-width) - 32px)}}.content[data-v-d83f3580]{flex-grow:1}@media (min-width: 960px){.VPNavBar.has-sidebar .content[data-v-d83f3580]{position:relative;z-index:1;padding-right:32px;padding-left:var(--vp-sidebar-width)}}@media (min-width: 1440px){.VPNavBar.has-sidebar .content[data-v-d83f3580]{padding-right:calc((100vw - var(--vp-layout-max-width)) / 2 + 32px);padding-left:calc((100vw - var(--vp-layout-max-width)) / 2 + var(--vp-sidebar-width))}}.content-body[data-v-d83f3580]{display:flex;justify-content:flex-end;align-items:center;height:calc(var(--vp-nav-height) - 1px);transition:background-color .5s}@media (min-width: 960px){.VPNavBar:not(.top) .content-body[data-v-d83f3580]{position:relative;background-color:var(--vp-nav-bg-color)}}@media (max-width: 767px){.content-body[data-v-d83f3580]{column-gap:.5rem}}.menu+.translations[data-v-d83f3580]:before,.menu+.appearance[data-v-d83f3580]:before,.menu+.social-links[data-v-d83f3580]:before,.translations+.appearance[data-v-d83f3580]:before,.appearance+.social-links[data-v-d83f3580]:before{margin-right:8px;margin-left:8px;width:1px;height:24px;background-color:var(--vp-c-divider);content:""}.menu+.appearance[data-v-d83f3580]:before,.translations+.appearance[data-v-d83f3580]:before{margin-right:16px}.appearance+.social-links[data-v-d83f3580]:before{margin-left:16px}.social-links[data-v-d83f3580]{margin-right:-8px}@media (min-width: 960px){.VPNavBar.has-sidebar .curtain[data-v-d83f3580]{position:absolute;right:0;bottom:-31px;width:calc(100% - var(--vp-sidebar-width));height:32px}.VPNavBar.has-sidebar .curtain[data-v-d83f3580]:before{display:block;width:100%;height:32px;background:linear-gradient(var(--vp-c-bg),transparent 70%);content:""}}@media (min-width: 1440px){.VPNavBar.has-sidebar .curtain[data-v-d83f3580]{width:calc(100% - ((100vw - var(--vp-layout-max-width)) / 2 + var(--vp-sidebar-width)))}}.VPNavScreenAppearance[data-v-2d7af913]{display:flex;justify-content:space-between;align-items:center;border-radius:8px;padding:12px 14px 12px 16px;background-color:var(--vp-c-bg-soft)}.text[data-v-2d7af913]{line-height:24px;font-size:12px;font-weight:500;color:var(--vp-c-text-2)}.VPNavScreenMenuLink[data-v-05f27b2a]{display:block;border-bottom:1px solid var(--vp-c-divider);padding:12px 0 11px;line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:border-color .25s,color .25s}.VPNavScreenMenuLink[data-v-05f27b2a]:hover{color:var(--vp-c-brand-1)}.VPNavScreenMenuGroupLink[data-v-19976ae1]{display:block;margin-left:12px;line-height:32px;font-size:14px;font-weight:400;color:var(--vp-c-text-1);transition:color .25s}.VPNavScreenMenuGroupLink[data-v-19976ae1]:hover{color:var(--vp-c-brand-1)}.VPNavScreenMenuGroupSection[data-v-8133b170]{display:block}.title[data-v-8133b170]{line-height:32px;font-size:13px;font-weight:700;color:var(--vp-c-text-2);transition:color .25s}.VPNavScreenMenuGroup[data-v-65ef89ca]{border-bottom:1px solid var(--vp-c-divider);height:48px;overflow:hidden;transition:border-color .5s}.VPNavScreenMenuGroup .items[data-v-65ef89ca]{visibility:hidden}.VPNavScreenMenuGroup.open .items[data-v-65ef89ca]{visibility:visible}.VPNavScreenMenuGroup.open[data-v-65ef89ca]{padding-bottom:10px;height:auto}.VPNavScreenMenuGroup.open .button[data-v-65ef89ca]{padding-bottom:6px;color:var(--vp-c-brand-1)}.VPNavScreenMenuGroup.open .button-icon[data-v-65ef89ca]{transform:rotate(45deg)}.button[data-v-65ef89ca]{display:flex;justify-content:space-between;align-items:center;padding:12px 4px 11px 0;width:100%;line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:color .25s}.button[data-v-65ef89ca]:hover{color:var(--vp-c-brand-1)}.button-icon[data-v-65ef89ca]{width:14px;height:14px;fill:var(--vp-c-text-2);transition:fill .5s,transform .25s}.group[data-v-65ef89ca]:first-child{padding-top:0}.group+.group[data-v-65ef89ca],.group+.item[data-v-65ef89ca]{padding-top:4px}.VPNavScreenTranslations[data-v-d72aa483]{height:24px;overflow:hidden}.VPNavScreenTranslations.open[data-v-d72aa483]{height:auto}.title[data-v-d72aa483]{display:flex;align-items:center;font-size:14px;font-weight:500;color:var(--vp-c-text-1)}.icon[data-v-d72aa483]{width:16px;height:16px;fill:currentColor}.icon.lang[data-v-d72aa483]{margin-right:8px}.icon.chevron[data-v-d72aa483]{margin-left:4px}.list[data-v-d72aa483]{padding:4px 0 0 24px}.link[data-v-d72aa483]{line-height:32px;font-size:13px;color:var(--vp-c-text-1)}.VPNavScreen[data-v-cc5739dd]{position:fixed;top:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 1px);right:0;bottom:0;left:0;padding:0 32px;width:100%;background-color:var(--vp-nav-screen-bg-color);overflow-y:auto;transition:background-color .5s;pointer-events:auto}.VPNavScreen.fade-enter-active[data-v-cc5739dd],.VPNavScreen.fade-leave-active[data-v-cc5739dd]{transition:opacity .25s}.VPNavScreen.fade-enter-active .container[data-v-cc5739dd],.VPNavScreen.fade-leave-active .container[data-v-cc5739dd]{transition:transform .25s ease}.VPNavScreen.fade-enter-from[data-v-cc5739dd],.VPNavScreen.fade-leave-to[data-v-cc5739dd]{opacity:0}.VPNavScreen.fade-enter-from .container[data-v-cc5739dd],.VPNavScreen.fade-leave-to .container[data-v-cc5739dd]{transform:translateY(-8px)}@media (min-width: 768px){.VPNavScreen[data-v-cc5739dd]{display:none}}.container[data-v-cc5739dd]{margin:0 auto;padding:24px 0 96px;max-width:288px}.menu+.translations[data-v-cc5739dd],.menu+.appearance[data-v-cc5739dd],.translations+.appearance[data-v-cc5739dd]{margin-top:24px}.menu+.social-links[data-v-cc5739dd]{margin-top:16px}.appearance+.social-links[data-v-cc5739dd]{margin-top:16px}.VPNav[data-v-ae24b3ad]{position:relative;top:var(--vp-layout-top-height, 0px);left:0;z-index:var(--vp-z-index-nav);width:100%;pointer-events:none;transition:background-color .5s}@media (min-width: 960px){.VPNav[data-v-ae24b3ad]{position:fixed}}.VPSidebarItem.level-0[data-v-e31bd47b]{padding-bottom:24px}.VPSidebarItem.collapsed.level-0[data-v-e31bd47b]{padding-bottom:10px}.item[data-v-e31bd47b]{position:relative;display:flex;width:100%}.VPSidebarItem.collapsible>.item[data-v-e31bd47b]{cursor:pointer}.indicator[data-v-e31bd47b]{position:absolute;top:6px;bottom:6px;left:-17px;width:2px;border-radius:2px;transition:background-color .25s}.VPSidebarItem.level-2.is-active>.item>.indicator[data-v-e31bd47b],.VPSidebarItem.level-3.is-active>.item>.indicator[data-v-e31bd47b],.VPSidebarItem.level-4.is-active>.item>.indicator[data-v-e31bd47b],.VPSidebarItem.level-5.is-active>.item>.indicator[data-v-e31bd47b]{background-color:var(--vp-c-brand-1)}.link[data-v-e31bd47b]{display:flex;align-items:center;flex-grow:1}.text[data-v-e31bd47b]{flex-grow:1;padding:4px 0;line-height:24px;font-size:14px;transition:color .25s}.VPSidebarItem.level-0 .text[data-v-e31bd47b]{font-weight:700;color:var(--vp-c-text-1)}.VPSidebarItem.level-1 .text[data-v-e31bd47b],.VPSidebarItem.level-2 .text[data-v-e31bd47b],.VPSidebarItem.level-3 .text[data-v-e31bd47b],.VPSidebarItem.level-4 .text[data-v-e31bd47b],.VPSidebarItem.level-5 .text[data-v-e31bd47b]{font-weight:500;color:var(--vp-c-text-2)}.VPSidebarItem.level-0.is-link>.item>.link:hover .text[data-v-e31bd47b],.VPSidebarItem.level-1.is-link>.item>.link:hover .text[data-v-e31bd47b],.VPSidebarItem.level-2.is-link>.item>.link:hover .text[data-v-e31bd47b],.VPSidebarItem.level-3.is-link>.item>.link:hover .text[data-v-e31bd47b],.VPSidebarItem.level-4.is-link>.item>.link:hover .text[data-v-e31bd47b],.VPSidebarItem.level-5.is-link>.item>.link:hover .text[data-v-e31bd47b]{color:var(--vp-c-brand-1)}.VPSidebarItem.level-0.has-active>.item>.text[data-v-e31bd47b],.VPSidebarItem.level-1.has-active>.item>.text[data-v-e31bd47b],.VPSidebarItem.level-2.has-active>.item>.text[data-v-e31bd47b],.VPSidebarItem.level-3.has-active>.item>.text[data-v-e31bd47b],.VPSidebarItem.level-4.has-active>.item>.text[data-v-e31bd47b],.VPSidebarItem.level-5.has-active>.item>.text[data-v-e31bd47b],.VPSidebarItem.level-0.has-active>.item>.link>.text[data-v-e31bd47b],.VPSidebarItem.level-1.has-active>.item>.link>.text[data-v-e31bd47b],.VPSidebarItem.level-2.has-active>.item>.link>.text[data-v-e31bd47b],.VPSidebarItem.level-3.has-active>.item>.link>.text[data-v-e31bd47b],.VPSidebarItem.level-4.has-active>.item>.link>.text[data-v-e31bd47b],.VPSidebarItem.level-5.has-active>.item>.link>.text[data-v-e31bd47b]{color:var(--vp-c-text-1)}.VPSidebarItem.level-0.is-active>.item .link>.text[data-v-e31bd47b],.VPSidebarItem.level-1.is-active>.item .link>.text[data-v-e31bd47b],.VPSidebarItem.level-2.is-active>.item .link>.text[data-v-e31bd47b],.VPSidebarItem.level-3.is-active>.item .link>.text[data-v-e31bd47b],.VPSidebarItem.level-4.is-active>.item .link>.text[data-v-e31bd47b],.VPSidebarItem.level-5.is-active>.item .link>.text[data-v-e31bd47b]{color:var(--vp-c-brand-1)}.caret[data-v-e31bd47b]{display:flex;justify-content:center;align-items:center;margin-right:-7px;width:32px;height:32px;color:var(--vp-c-text-3);cursor:pointer;transition:color .25s;flex-shrink:0}.item:hover .caret[data-v-e31bd47b]{color:var(--vp-c-text-2)}.item:hover .caret[data-v-e31bd47b]:hover{color:var(--vp-c-text-1)}.caret-icon[data-v-e31bd47b]{width:18px;height:18px;fill:currentColor;transform:rotate(90deg);transition:transform .25s}.VPSidebarItem.collapsed .caret-icon[data-v-e31bd47b]{transform:rotate(0)}.VPSidebarItem.level-1 .items[data-v-e31bd47b],.VPSidebarItem.level-2 .items[data-v-e31bd47b],.VPSidebarItem.level-3 .items[data-v-e31bd47b],.VPSidebarItem.level-4 .items[data-v-e31bd47b],.VPSidebarItem.level-5 .items[data-v-e31bd47b]{border-left:1px solid var(--vp-c-divider);padding-left:16px}.VPSidebarItem.collapsed .items[data-v-e31bd47b]{display:none}.VPSidebar[data-v-7f44e717]{position:fixed;top:var(--vp-layout-top-height, 0px);bottom:0;left:0;z-index:var(--vp-z-index-sidebar);padding:32px 32px 96px;width:calc(100vw - 64px);max-width:320px;background-color:var(--vp-sidebar-bg-color);opacity:0;box-shadow:var(--vp-c-shadow-3);overflow-x:hidden;overflow-y:auto;transform:translate(-100%);transition:opacity .5s,transform .25s ease;overscroll-behavior:contain}.VPSidebar.open[data-v-7f44e717]{opacity:1;visibility:visible;transform:translate(0);transition:opacity .25s,transform .5s cubic-bezier(.19,1,.22,1)}.dark .VPSidebar[data-v-7f44e717]{box-shadow:var(--vp-shadow-1)}@media (min-width: 960px){.VPSidebar[data-v-7f44e717]{z-index:1;padding-top:var(--vp-nav-height);width:var(--vp-sidebar-width);max-width:100%;background-color:var(--vp-sidebar-bg-color);opacity:1;visibility:visible;box-shadow:none;transform:translate(0)}}@media (min-width: 1440px){.VPSidebar[data-v-7f44e717]{padding-left:max(32px,calc((100% - (var(--vp-layout-max-width) - 64px)) / 2));width:calc((100% - (var(--vp-layout-max-width) - 64px)) / 2 + var(--vp-sidebar-width) - 32px)}}@media (min-width: 960px){.curtain[data-v-7f44e717]{position:sticky;top:-64px;left:0;z-index:1;margin-top:calc(var(--vp-nav-height) * -1);margin-right:-32px;margin-left:-32px;height:var(--vp-nav-height);background-color:var(--vp-sidebar-bg-color)}}.nav[data-v-7f44e717]{outline:0}.group+.group[data-v-7f44e717]{border-top:1px solid var(--vp-c-divider);padding-top:10px}@media (min-width: 960px){.group[data-v-7f44e717]{padding-top:10px;width:calc(var(--vp-sidebar-width) - 64px)}}.VPSkipLink[data-v-0f60ec36]{top:8px;left:8px;padding:8px 16px;z-index:999;border-radius:8px;font-size:12px;font-weight:700;text-decoration:none;color:var(--vp-c-brand-1);box-shadow:var(--vp-shadow-3);background-color:var(--vp-c-bg)}.VPSkipLink[data-v-0f60ec36]:focus{height:auto;width:auto;clip:auto;clip-path:none}@media (min-width: 1280px){.VPSkipLink[data-v-0f60ec36]{top:14px;left:16px}}.Layout[data-v-5a346dfe]{display:flex;flex-direction:column;min-height:100vh}.VPHomeSponsors[data-v-96bd69d5]{border-top:1px solid var(--vp-c-gutter);padding:88px 24px 96px;background-color:var(--vp-c-bg)}.container[data-v-96bd69d5]{margin:0 auto;max-width:1152px}.love[data-v-96bd69d5]{margin:0 auto;width:28px;height:28px;color:var(--vp-c-text-3)}.icon[data-v-96bd69d5]{width:28px;height:28px;fill:currentColor}.message[data-v-96bd69d5]{margin:0 auto;padding-top:10px;max-width:320px;text-align:center;line-height:24px;font-size:16px;font-weight:500;color:var(--vp-c-text-2)}.sponsors[data-v-96bd69d5]{padding-top:32px}.action[data-v-96bd69d5]{padding-top:40px;text-align:center}.VPTeamPage[data-v-10b00018]{padding-bottom:96px}@media (min-width: 768px){.VPTeamPage[data-v-10b00018]{padding-bottom:128px}}.VPTeamPageSection+.VPTeamPageSection[data-v-10b00018-s],.VPTeamMembers+.VPTeamPageSection[data-v-10b00018-s]{margin-top:64px}.VPTeamMembers+.VPTeamMembers[data-v-10b00018-s]{margin-top:24px}@media (min-width: 768px){.VPTeamPageTitle+.VPTeamPageSection[data-v-10b00018-s]{margin-top:16px}.VPTeamPageSection+.VPTeamPageSection[data-v-10b00018-s],.VPTeamMembers+.VPTeamPageSection[data-v-10b00018-s]{margin-top:96px}}.VPTeamMembers[data-v-10b00018-s]{padding:0 24px}@media (min-width: 768px){.VPTeamMembers[data-v-10b00018-s]{padding:0 48px}}@media (min-width: 960px){.VPTeamMembers[data-v-10b00018-s]{padding:0 64px}}.VPTeamPageTitle[data-v-bf2cbdac]{padding:48px 32px;text-align:center}@media (min-width: 768px){.VPTeamPageTitle[data-v-bf2cbdac]{padding:64px 48px 48px}}@media (min-width: 960px){.VPTeamPageTitle[data-v-bf2cbdac]{padding:80px 64px 48px}}.title[data-v-bf2cbdac]{letter-spacing:0;line-height:44px;font-size:36px;font-weight:500}@media (min-width: 768px){.title[data-v-bf2cbdac]{letter-spacing:-.5px;line-height:56px;font-size:48px}}.lead[data-v-bf2cbdac]{margin:0 auto;max-width:512px;padding-top:12px;line-height:24px;font-size:16px;font-weight:500;color:var(--vp-c-text-2)}@media (min-width: 768px){.lead[data-v-bf2cbdac]{max-width:592px;letter-spacing:.15px;line-height:28px;font-size:20px}}.VPTeamPageSection[data-v-b1a88750]{padding:0 32px}@media (min-width: 768px){.VPTeamPageSection[data-v-b1a88750]{padding:0 48px}}@media (min-width: 960px){.VPTeamPageSection[data-v-b1a88750]{padding:0 64px}}.title[data-v-b1a88750]{position:relative;margin:0 auto;max-width:1152px;text-align:center;color:var(--vp-c-text-2)}.title-line[data-v-b1a88750]{position:absolute;top:16px;left:0;width:100%;height:1px;background-color:var(--vp-c-divider)}.title-text[data-v-b1a88750]{position:relative;display:inline-block;padding:0 24px;letter-spacing:0;line-height:32px;font-size:20px;font-weight:500;background-color:var(--vp-c-bg)}.lead[data-v-b1a88750]{margin:0 auto;max-width:480px;padding-top:12px;text-align:center;line-height:24px;font-size:16px;font-weight:500;color:var(--vp-c-text-2)}.members[data-v-b1a88750]{padding-top:40px}.VPTeamMembersItem[data-v-28528e42]{display:flex;flex-direction:column;gap:2px;border-radius:12px;width:100%;height:100%;overflow:hidden}.VPTeamMembersItem.small .profile[data-v-28528e42]{padding:32px}.VPTeamMembersItem.small .data[data-v-28528e42]{padding-top:20px}.VPTeamMembersItem.small .avatar[data-v-28528e42]{width:64px;height:64px}.VPTeamMembersItem.small .name[data-v-28528e42]{line-height:24px;font-size:16px}.VPTeamMembersItem.small .affiliation[data-v-28528e42]{padding-top:4px;line-height:20px;font-size:14px}.VPTeamMembersItem.small .desc[data-v-28528e42]{padding-top:12px;line-height:20px;font-size:14px}.VPTeamMembersItem.small .links[data-v-28528e42]{margin:0 -16px -20px;padding:10px 0 0}.VPTeamMembersItem.medium .profile[data-v-28528e42]{padding:48px 32px}.VPTeamMembersItem.medium .data[data-v-28528e42]{padding-top:24px;text-align:center}.VPTeamMembersItem.medium .avatar[data-v-28528e42]{width:96px;height:96px}.VPTeamMembersItem.medium .name[data-v-28528e42]{letter-spacing:.15px;line-height:28px;font-size:20px}.VPTeamMembersItem.medium .affiliation[data-v-28528e42]{padding-top:4px;font-size:16px}.VPTeamMembersItem.medium .desc[data-v-28528e42]{padding-top:16px;max-width:288px;font-size:16px}.VPTeamMembersItem.medium .links[data-v-28528e42]{margin:0 -16px -12px;padding:16px 12px 0}.profile[data-v-28528e42]{flex-grow:1;background-color:var(--vp-c-bg-soft)}.data[data-v-28528e42]{text-align:center}.avatar[data-v-28528e42]{position:relative;flex-shrink:0;margin:0 auto;border-radius:50%;box-shadow:var(--vp-shadow-3)}.avatar-img[data-v-28528e42]{position:absolute;top:0;right:0;bottom:0;left:0;border-radius:50%;object-fit:cover}.name[data-v-28528e42]{margin:0;font-weight:600}.affiliation[data-v-28528e42]{margin:0;font-weight:500;color:var(--vp-c-text-2)}.org.link[data-v-28528e42]{color:var(--vp-c-text-2);transition:color .25s}.org.link[data-v-28528e42]:hover{color:var(--vp-c-brand-1)}.desc[data-v-28528e42]{margin:0 auto}.desc[data-v-28528e42] a{font-weight:500;color:var(--vp-c-brand-1);text-decoration-style:dotted;transition:color .25s}.links[data-v-28528e42]{display:flex;justify-content:center;height:56px}.sp-link[data-v-28528e42]{display:flex;justify-content:center;align-items:center;text-align:center;padding:16px;font-size:14px;font-weight:500;color:var(--vp-c-sponsor);background-color:var(--vp-c-bg-soft);transition:color .25s,background-color .25s}.sp .sp-link.link[data-v-28528e42]:hover,.sp .sp-link.link[data-v-28528e42]:focus{outline:none;color:var(--vp-c-white);background-color:var(--vp-c-sponsor)}.sp-icon[data-v-28528e42]{margin-right:8px;width:16px;height:16px;fill:currentColor}.VPTeamMembers.small .container[data-v-6cb0dbc4]{grid-template-columns:repeat(auto-fit,minmax(224px,1fr))}.VPTeamMembers.small.count-1 .container[data-v-6cb0dbc4]{max-width:276px}.VPTeamMembers.small.count-2 .container[data-v-6cb0dbc4]{max-width:576px}.VPTeamMembers.small.count-3 .container[data-v-6cb0dbc4]{max-width:876px}.VPTeamMembers.medium .container[data-v-6cb0dbc4]{grid-template-columns:repeat(auto-fit,minmax(256px,1fr))}@media (min-width: 375px){.VPTeamMembers.medium .container[data-v-6cb0dbc4]{grid-template-columns:repeat(auto-fit,minmax(288px,1fr))}}.VPTeamMembers.medium.count-1 .container[data-v-6cb0dbc4]{max-width:368px}.VPTeamMembers.medium.count-2 .container[data-v-6cb0dbc4]{max-width:760px}.container[data-v-6cb0dbc4]{display:grid;gap:24px;margin:0 auto;max-width:1152px}.github-link{margin-top:2rem}.note{background-color:#eaecef66;border-radius:6px;-webkit-border-radius:6px;-moz-border-radius:6px;padding:.5rem 1.5rem;margin:1rem 0rem}.warning,.warn{background-color:#f63;color:#333;border-radius:6px;-webkit-border-radius:6px;-moz-border-radius:6px;padding:.5rem 1.5rem}.loading{color:gray}@media screen and (max-width: 719px){.note{border-radius:0;-webkit-border-radius:0px;-moz-border-radius:0px;padding:.5rem 1.5rem;margin:0rem -1.5rem}}canvas{background-color:#000}.webgpu_example_button{height:33px;font-size:14px;padding:0 8px;margin-top:16px;border:1px solid rgba(60,60,60,.15);border-radius:8px}.webgpu_example_button:hover{border-color:#059669}.auto-github-link{margin-top:1rem;line-height:40px}#wasm-example canvas{background-color:#000}#wasm-example button{height:33px;font-size:14px;padding:0 8px;border:1px solid rgba(60,60,60,.15);border-radius:8px}#wasm-example button:hover{border-color:#059669}body{margin:0}.a{color:#5a6}#simuverse_container{position:fixed;top:64px;left:0;right:0;bottom:0;background-color:#353535;min-width:450px;min-height:500px}#alert,#loading{text-align:center;justify-content:center;align-items:center;color:#fff;font-size:20px;margin-top:64px} diff --git a/assets/style.b473e453.css b/assets/style.b473e453.css deleted file mode 100644 index 9011b8017..000000000 --- a/assets/style.b473e453.css +++ /dev/null @@ -1 +0,0 @@ -@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-cyrillic.5f2c6c8c.woff2) format("woff2");unicode-range:U+0301,U+0400-045F,U+0490-0491,U+04B0-04B1,U+2116}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-cyrillic-ext.e75737ce.woff2) format("woff2");unicode-range:U+0460-052F,U+1C80-1C88,U+20B4,U+2DE0-2DFF,U+A640-A69F,U+FE2E-FE2F}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-greek.d5a6d92a.woff2) format("woff2");unicode-range:U+0370-03FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-greek-ext.ab0619bc.woff2) format("woff2");unicode-range:U+1F00-1FFF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-latin.2ed14f66.woff2) format("woff2");unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02BB-02BC,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2122,U+2191,U+2193,U+2212,U+2215,U+FEFF,U+FFFD}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-latin-ext.0030eebd.woff2) format("woff2");unicode-range:U+0100-024F,U+0259,U+1E00-1EFF,U+2020,U+20A0-20AB,U+20AD-20CF,U+2113,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:normal;font-named-instance:"Regular";src:url(/learn-wgpu-zh/assets/inter-roman-vietnamese.14ce25a6.woff2) format("woff2");unicode-range:U+0102-0103,U+0110-0111,U+0128-0129,U+0168-0169,U+01A0-01A1,U+01AF-01B0,U+1EA0-1EF9,U+20AB}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-cyrillic.ea42a392.woff2) format("woff2");unicode-range:U+0301,U+0400-045F,U+0490-0491,U+04B0-04B1,U+2116}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-cyrillic-ext.33bd5a8e.woff2) format("woff2");unicode-range:U+0460-052F,U+1C80-1C88,U+20B4,U+2DE0-2DFF,U+A640-A69F,U+FE2E-FE2F}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-greek.8f4463c4.woff2) format("woff2");unicode-range:U+0370-03FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-greek-ext.4fbe9427.woff2) format("woff2");unicode-range:U+1F00-1FFF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-latin.bd3b6f56.woff2) format("woff2");unicode-range:U+0000-00FF,U+0131,U+0152-0153,U+02BB-02BC,U+02C6,U+02DA,U+02DC,U+2000-206F,U+2074,U+20AC,U+2122,U+2191,U+2193,U+2212,U+2215,U+FEFF,U+FFFD}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-latin-ext.bd8920cc.woff2) format("woff2");unicode-range:U+0100-024F,U+0259,U+1E00-1EFF,U+2020,U+20A0-20AB,U+20AD-20CF,U+2113,U+2C60-2C7F,U+A720-A7FF}@font-face{font-family:Inter var;font-weight:100 900;font-display:swap;font-style:italic;font-named-instance:"Italic";src:url(/learn-wgpu-zh/assets/inter-italic-vietnamese.6ce511fb.woff2) format("woff2");unicode-range:U+0102-0103,U+0110-0111,U+0128-0129,U+0168-0169,U+01A0-01A1,U+01AF-01B0,U+1EA0-1EF9,U+20AB}@font-face{font-family:Chinese Quotes;src:local("PingFang SC Regular"),local("PingFang SC"),local("SimHei"),local("Source Han Sans SC");unicode-range:U+2018,U+2019,U+201C,U+201D}:root{--vp-c-white: #ffffff;--vp-c-black: #000000;--vp-c-gray: #8e8e93;--vp-c-text-light-1: rgba(60, 60, 67);--vp-c-text-light-2: rgba(60, 60, 67, .75);--vp-c-text-light-3: rgba(60, 60, 67, .33);--vp-c-text-dark-1: rgba(255, 255, 245, .86);--vp-c-text-dark-2: rgba(235, 235, 245, .6);--vp-c-text-dark-3: rgba(235, 235, 245, .38);--vp-c-sponsor: #db2777}:root{--vp-c-bg: #ffffff;--vp-c-bg-elv: #ffffff;--vp-c-bg-elv-up: #ffffff;--vp-c-bg-elv-down: #f6f6f7;--vp-c-bg-elv-mute: #f6f6f7;--vp-c-bg-soft: #f6f6f7;--vp-c-bg-soft-up: #f9f9fa;--vp-c-bg-soft-down: #e3e3e5;--vp-c-bg-soft-mute: #e3e3e5;--vp-c-bg-alt: #f6f6f7;--vp-c-border: #c2c2c4;--vp-c-divider: #e2e2e3;--vp-c-gutter: #e2e2e3;--vp-c-neutral: var(--vp-c-black);--vp-c-neutral-inverse: var(--vp-c-white);--vp-c-text-1: var(--vp-c-text-light-1);--vp-c-text-2: var(--vp-c-text-light-2);--vp-c-text-3: var(--vp-c-text-light-3);--vp-c-text-inverse-1: var(--vp-c-text-dark-1);--vp-c-text-inverse-2: var(--vp-c-text-dark-2);--vp-c-text-inverse-3: var(--vp-c-text-dark-3);--vp-c-text-code: #476582;--vp-c-mute: #eeeef0;--vp-c-mute-light: #f3f3f9;--vp-c-mute-lighter: #f9f9fa;--vp-c-mute-dark: #e3e3e5;--vp-c-mute-darker: #d7d7d9;--vp-c-brand: #4565d8;--vp-c-brand-light: #708fff;--vp-c-brand-lighter: #a4b2ff;--vp-c-brand-lightest: #ecefff;--vp-c-brand-dark: #4565d8;--vp-c-brand-darker: #23459f;--vp-c-brand-darkest: #132f59;--vp-c-brand-dimm-1: rgba(112, 143, 255, .05);--vp-c-brand-dimm-2: rgba(112, 143, 255, .2);--vp-c-brand-dimm-3: rgba(112, 143, 255, .5);--vp-c-green: #10b981;--vp-c-green-light: #34d399;--vp-c-green-lighter: #6ee7b7;--vp-c-green-dark: #059669;--vp-c-green-darker: #047857;--vp-c-green-dimm-1: rgba(16, 185, 129, .05);--vp-c-green-dimm-2: rgba(16, 185, 129, .2);--vp-c-green-dimm-3: rgba(16, 185, 129, .5);--vp-c-yellow: #d97706;--vp-c-yellow-light: #f59e0b;--vp-c-yellow-lighter: #fbbf24;--vp-c-yellow-lightest: #fcd34d;--vp-c-yellow-dark: #b45309;--vp-c-yellow-darker: #92400e;--vp-c-yellow-darkest: #6c3d08;--vp-c-yellow-dimm-1: rgba(234, 179, 8, .05);--vp-c-yellow-dimm-2: rgba(234, 179, 8, .2);--vp-c-yellow-dimm-3: rgba(234, 179, 8, .5);--vp-c-red: #f43f5e;--vp-c-red-light: #fb7185;--vp-c-red-lighter: #fda4af;--vp-c-red-lightest: #fdd6e0;--vp-c-red-dark: #e11d48;--vp-c-red-darker: #be123c;--vp-c-red-darkest: #9f1239;--vp-c-red-dimm-1: rgba(244, 63, 94, .05);--vp-c-red-dimm-2: rgba(244, 63, 94, .2);--vp-c-red-dimm-3: rgba(244, 63, 94, .5)}.dark{--vp-c-bg: #1e1e20;--vp-c-bg-elv: #252529;--vp-c-bg-elv-up: #323238;--vp-c-bg-elv-down: #1e1e20;--vp-c-bg-elv-mute: #323238;--vp-c-bg-soft: #252529;--vp-c-bg-soft-up: #323238;--vp-c-bg-soft-down: #1e1e20;--vp-c-bg-soft-mute: #323238;--vp-c-bg-alt: #161618;--vp-c-border: rgba(82, 82, 89, .68);--vp-c-divider: rgba(82, 82, 89, .32);--vp-c-gutter: #000000;--vp-c-neutral: var(--vp-c-white);--vp-c-neutral-inverse: var(--vp-c-black);--vp-c-text-1: var(--vp-c-text-dark-1);--vp-c-text-2: var(--vp-c-text-dark-2);--vp-c-text-3: var(--vp-c-text-dark-3);--vp-c-text-inverse-1: var(--vp-c-text-light-1);--vp-c-text-inverse-2: var(--vp-c-text-light-2);--vp-c-text-inverse-3: var(--vp-c-text-light-3);--vp-c-text-code: #c9def1;--vp-c-mute: #323238;--vp-c-mute-light: #3a3a3c;--vp-c-mute-lighter: #505053;--vp-c-mute-dark: #222226;--vp-c-mute-darker: #505053;--vp-c-brand: #708fff;--vp-c-brand-light: #a4b2ff;--vp-c-brand-lighter: #a4c8ff;--vp-c-brand-lightest: #ecefff;--vp-c-brand-dark: #4565d8;--vp-c-brand-darker: #23459f;--vp-c-brand-darkest: #132f59;--vp-c-brand-dimm-1: rgba(112, 143, 255, .05);--vp-c-brand-dimm-2: rgba(112, 143, 255, .2);--vp-c-brand-dimm-3: rgba(112, 143, 255, .5);--vp-c-green: #10b981;--vp-c-green-light: #34d399;--vp-c-green-lighter: #6ee7b7;--vp-c-green-lightest: #a7f3d0;--vp-c-green-dark: #059669;--vp-c-green-darker: #047857;--vp-c-green-darkest: #065f46;--vp-c-green-dimm-1: rgba(16, 185, 129, .05);--vp-c-green-dimm-2: rgba(16, 185, 129, .2);--vp-c-green-dimm-3: rgba(16, 185, 129, .5);--vp-c-yellow: #d1a336;--vp-c-yellow-light: #f8e3a1;--vp-c-yellow-lighter: #fff0c0;--vp-c-yellow-lightest: #fff7dc;--vp-c-yellow-dark: #b45309;--vp-c-yellow-darker: #92400e;--vp-c-yellow-darkest: #6c3d08;--vp-c-yellow-dimm-1: rgba(234, 179, 8, .05);--vp-c-yellow-dimm-2: rgba(234, 179, 8, .2);--vp-c-yellow-dimm-3: rgba(234, 179, 8, .5);--vp-c-red: #f43f5e;--vp-c-red-light: #fb7185;--vp-c-red-lighter: #fda4af;--vp-c-red-lightest: #fdd6e0;--vp-c-red-dark: #e11d48;--vp-c-red-darker: #be123c;--vp-c-red-darkest: #9f1239;--vp-c-red-dimm-1: rgba(244, 63, 94, .05);--vp-c-red-dimm-2: rgba(244, 63, 94, .2);--vp-c-red-dimm-3: rgba(244, 63, 94, .5)}:root{--vp-font-family-base: "Chinese Quotes", "Inter var", "Inter", ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Helvetica, Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";--vp-font-family-mono: ui-monospace, SFMono-Regular, "SF Mono", Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}:root{--vp-shadow-1: 0 1px 2px rgba(0, 0, 0, .04), 0 1px 2px rgba(0, 0, 0, .06);--vp-shadow-2: 0 3px 12px rgba(0, 0, 0, .07), 0 1px 4px rgba(0, 0, 0, .07);--vp-shadow-3: 0 12px 32px rgba(0, 0, 0, .1), 0 2px 6px rgba(0, 0, 0, .08);--vp-shadow-4: 0 14px 44px rgba(0, 0, 0, .12), 0 3px 9px rgba(0, 0, 0, .12);--vp-shadow-5: 0 18px 56px rgba(0, 0, 0, .16), 0 4px 12px rgba(0, 0, 0, .16)}:root{--vp-z-index-local-nav: 10;--vp-z-index-nav: 20;--vp-z-index-layout-top: 30;--vp-z-index-backdrop: 40;--vp-z-index-sidebar: 50;--vp-z-index-footer: 60}:root{--vp-icon-copy: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' height='20' width='20' stroke='rgba(128,128,128,1)' stroke-width='2' viewBox='0 0 24 24'%3E%3Cpath stroke-linecap='round' stroke-linejoin='round' d='M9 5H7a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V7a2 2 0 0 0-2-2h-2M9 5a2 2 0 0 0 2 2h2a2 2 0 0 0 2-2M9 5a2 2 0 0 1 2-2h2a2 2 0 0 1 2 2'/%3E%3C/svg%3E");--vp-icon-copied: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' height='20' width='20' stroke='rgba(128,128,128,1)' stroke-width='2' viewBox='0 0 24 24'%3E%3Cpath stroke-linecap='round' stroke-linejoin='round' d='M9 5H7a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V7a2 2 0 0 0-2-2h-2M9 5a2 2 0 0 0 2 2h2a2 2 0 0 0 2-2M9 5a2 2 0 0 1 2-2h2a2 2 0 0 1 2 2m-6 9 2 2 4-4'/%3E%3C/svg%3E")}:root{--vp-layout-max-width: 1440px}:root{--vp-header-anchor-symbol: "#"}:root{--vp-code-line-height: 1.7;--vp-code-font-size: .875em;--vp-c-code-dimm: var(--vp-c-text-dark-3);--vp-code-block-color: var(--vp-c-text-dark-1);--vp-code-block-bg: #161618;--vp-code-block-bg-light: #1e1e20;--vp-code-block-divider-color: #000000;--vp-code-line-highlight-color: rgba(0, 0, 0, .5);--vp-code-line-number-color: var(--vp-c-code-dimm);--vp-code-line-diff-add-color: var(--vp-c-green-dimm-2);--vp-code-line-diff-add-symbol-color: var(--vp-c-green);--vp-code-line-diff-remove-color: var(--vp-c-red-dimm-2);--vp-code-line-diff-remove-symbol-color: var(--vp-c-red);--vp-code-line-warning-color: var(--vp-c-yellow-dimm-2);--vp-code-line-error-color: var(--vp-c-red-dimm-2);--vp-code-copy-code-border-color: transparent;--vp-code-copy-code-bg: var(--vp-code-block-bg-light);--vp-code-copy-code-hover-border-color: var(--vp-c-divider);--vp-code-copy-code-hover-bg: var(--vp-code-block-bg-light);--vp-code-copy-code-active-text: var(--vp-c-text-dark-2);--vp-code-tab-divider: var(--vp-code-block-divider-color);--vp-code-tab-text-color: var(--vp-c-text-dark-2);--vp-code-tab-bg: var(--vp-code-block-bg);--vp-code-tab-hover-text-color: var(--vp-c-text-dark-1);--vp-code-tab-active-text-color: var(--vp-c-text-dark-1);--vp-code-tab-active-bar-color: var(--vp-c-brand)}.dark{--vp-code-block-bg: #161618}:root:not(.dark) .vp-adaptive-theme{--vp-c-code-dimm: var(--vp-c-text-2);--vp-code-block-color: var(--vp-c-text-1);--vp-code-block-bg: #f8f8f8;--vp-code-block-divider-color: var(--vp-c-divider);--vp-code-line-highlight-color: #ececec;--vp-code-line-number-color: var(--vp-c-code-dimm);--vp-code-copy-code-bg: #e2e2e2;--vp-code-copy-code-hover-bg: #dcdcdc;--vp-code-copy-code-active-text: var(--vp-c-text-2);--vp-code-tab-divider: var(--vp-c-divider);--vp-code-tab-text-color: var(--vp-c-text-2);--vp-code-tab-bg: var(--vp-code-block-bg);--vp-code-tab-hover-text-color: var(--vp-c-text-1);--vp-code-tab-active-text-color: var(--vp-c-text-1)}:root{--vp-button-brand-border: var(--vp-c-brand);--vp-button-brand-text: var(--vp-c-white);--vp-button-brand-bg: var(--vp-c-brand-dark);--vp-button-brand-hover-border: var(--vp-c-brand-light);--vp-button-brand-hover-text: var(--vp-c-white);--vp-button-brand-hover-bg: var(--vp-c-brand-darker);--vp-button-brand-active-border: var(--vp-c-brand-light);--vp-button-brand-active-text: var(--vp-c-white);--vp-button-brand-active-bg: var(--vp-c-brand-darkest);--vp-button-alt-border: var(--vp-c-border);--vp-button-alt-text: var(--vp-c-neutral);--vp-button-alt-bg: var(--vp-c-mute);--vp-button-alt-hover-border: var(--vp-c-border);--vp-button-alt-hover-text: var(--vp-c-neutral);--vp-button-alt-hover-bg: var(--vp-c-mute-dark);--vp-button-alt-active-border: var(--vp-c-border);--vp-button-alt-active-text: var(--vp-c-neutral);--vp-button-alt-active-bg: var(--vp-c-mute-darker);--vp-button-sponsor-border: var(--vp-c-gray-light-3);--vp-button-sponsor-text: var(--vp-c-text-light-2);--vp-button-sponsor-bg: transparent;--vp-button-sponsor-hover-border: var(--vp-c-sponsor);--vp-button-sponsor-hover-text: var(--vp-c-sponsor);--vp-button-sponsor-hover-bg: transparent;--vp-button-sponsor-active-border: var(--vp-c-sponsor);--vp-button-sponsor-active-text: var(--vp-c-sponsor);--vp-button-sponsor-active-bg: transparent}.dark{--vp-button-sponsor-border: var(--vp-c-gray-dark-1);--vp-button-sponsor-text: var(--vp-c-text-dark-2)}:root{--vp-custom-block-font-size: 14px;--vp-custom-block-code-font-size: 13px;--vp-custom-block-info-border: var(--vp-c-border);--vp-custom-block-info-text: var(--vp-c-text-2);--vp-custom-block-info-bg: var(--vp-c-bg-soft);--vp-custom-block-info-code-bg: var(--vp-c-mute);--vp-custom-block-tip-border: var(--vp-c-brand);--vp-custom-block-tip-text: var(--vp-c-brand);--vp-custom-block-tip-bg: var(--vp-c-bg-soft);--vp-custom-block-tip-code-bg: var(--vp-c-mute);--vp-custom-block-warning-border: var(--vp-c-yellow);--vp-custom-block-warning-text: var(--vp-c-yellow);--vp-custom-block-warning-bg: var(--vp-c-bg-soft);--vp-custom-block-warning-code-bg: var(--vp-c-mute);--vp-custom-block-danger-border: var(--vp-c-red);--vp-custom-block-danger-text: var(--vp-c-red);--vp-custom-block-danger-bg: var(--vp-c-bg-soft);--vp-custom-block-danger-code-bg: var(--vp-c-mute);--vp-custom-block-details-border: var(--vp-custom-block-info-border);--vp-custom-block-details-text: var(--vp-custom-block-info-text);--vp-custom-block-details-bg: var(--vp-custom-block-info-bg);--vp-custom-block-details-code-bg: var(--vp-custom-block-details-bg)}:root{--vp-input-border-color: var(--vp-c-border);--vp-input-bg-color: var(--vp-c-bg-alt);--vp-input-hover-border-color: var(--vp-c-gray);--vp-input-switch-bg-color: var(--vp-c-mute)}:root{--vp-nav-height: 64px;--vp-nav-bg-color: var(--vp-c-bg);--vp-nav-screen-bg-color: var(--vp-c-bg);--vp-nav-logo-height: 24px}:root{--vp-local-nav-bg-color: var(--vp-c-bg)}:root{--vp-sidebar-width: 272px;--vp-sidebar-bg-color: var(--vp-c-bg-alt)}:root{--vp-backdrop-bg-color: rgba(0, 0, 0, .6)}:root{--vp-home-hero-name-color: var(--vp-c-brand);--vp-home-hero-name-background: transparent;--vp-home-hero-image-background-image: none;--vp-home-hero-image-filter: none}:root{--vp-badge-info-border: var(--vp-c-border);--vp-badge-info-text: var(--vp-c-text-2);--vp-badge-info-bg: var(--vp-c-mute);--vp-badge-tip-border: var(--vp-c-brand);--vp-badge-tip-text: var(--vp-c-brand);--vp-badge-tip-bg: var(--vp-c-brand-dimm-1);--vp-badge-warning-border: var(--vp-c-yellow);--vp-badge-warning-text: var(--vp-c-yellow);--vp-badge-warning-bg: var(--vp-c-yellow-dimm-1);--vp-badge-danger-border: var(--vp-c-red);--vp-badge-danger-text: var(--vp-c-red);--vp-badge-danger-bg: var(--vp-c-red-dimm-1)}:root{--vp-carbon-ads-text-color: var(--vp-c-text-1);--vp-carbon-ads-poweredby-color: var(--vp-c-text-2);--vp-carbon-ads-bg-color: var(--vp-c-bg-soft);--vp-carbon-ads-hover-text-color: var(--vp-c-brand);--vp-carbon-ads-hover-poweredby-color: var(--vp-c-text-1)}:root{--vp-local-search-bg: var(--vp-c-bg);--vp-local-search-result-bg: var(--vp-c-bg);--vp-local-search-result-border: var(--vp-c-divider);--vp-local-search-result-selected-bg: var(--vp-c-bg);--vp-local-search-result-selected-border: var(--vp-c-brand);--vp-local-search-highlight-bg: var(--vp-c-green-lighter);--vp-local-search-highlight-text: var(--vp-c-black)}*,:before,:after{box-sizing:border-box}html{line-height:1.4;font-size:16px;-webkit-text-size-adjust:100%}html.dark{color-scheme:dark}body{margin:0;width:100%;min-width:320px;min-height:100vh;line-height:24px;font-family:var(--vp-font-family-base);font-size:16px;font-weight:400;color:var(--vp-c-text-1);background-color:var(--vp-c-bg);direction:ltr;font-synthesis:style;text-rendering:optimizeLegibility;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}main{display:block}h1,h2,h3,h4,h5,h6{margin:0;line-height:24px;font-size:16px;font-weight:400}p{margin:0}strong,b{font-weight:600}a,area,button,[role=button],input,label,select,summary,textarea{touch-action:manipulation}a{color:inherit;text-decoration:inherit}ol,ul{list-style:none;margin:0;padding:0}blockquote{margin:0}pre,code,kbd,samp{font-family:var(--vp-font-family-mono)}img,svg,video,canvas,audio,iframe,embed,object{display:block}figure{margin:0}img,video{max-width:100%;height:auto}button,input,optgroup,select,textarea{border:0;padding:0;line-height:inherit;color:inherit}button{padding:0;font-family:inherit;background-color:transparent;background-image:none}button:enabled,[role=button]:enabled{cursor:pointer}button:focus,button:focus-visible{outline:1px dotted;outline:4px auto -webkit-focus-ring-color}button:focus:not(:focus-visible){outline:none!important}input:focus,textarea:focus,select:focus{outline:none}table{border-collapse:collapse}input{background-color:transparent}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:var(--vp-c-text-3)}input::-ms-input-placeholder,textarea::-ms-input-placeholder{color:var(--vp-c-text-3)}input::placeholder,textarea::placeholder{color:var(--vp-c-text-3)}input::-webkit-outer-spin-button,input::-webkit-inner-spin-button{-webkit-appearance:none;margin:0}input[type=number]{-moz-appearance:textfield}textarea{resize:vertical}select{-webkit-appearance:none}fieldset{margin:0;padding:0}h1,h2,h3,h4,h5,h6,li,p{overflow-wrap:break-word}vite-error-overlay{z-index:9999}.visually-hidden{position:absolute;width:1px;height:1px;white-space:nowrap;clip:rect(0 0 0 0);clip-path:inset(50%);overflow:hidden}.custom-block{border:1px solid transparent;border-radius:8px;padding:16px 16px 8px;line-height:24px;font-size:var(--vp-custom-block-font-size);color:var(--vp-c-text-2)}.custom-block.info{border-color:var(--vp-custom-block-info-border);color:var(--vp-custom-block-info-text);background-color:var(--vp-custom-block-info-bg)}.custom-block.custom-block th,.custom-block.custom-block blockquote>p{font-size:var(--vp-custom-block-font-size);color:inherit}.custom-block.info code{background-color:var(--vp-custom-block-info-code-bg)}.custom-block.tip{border-color:var(--vp-custom-block-tip-border);color:var(--vp-custom-block-tip-text);background-color:var(--vp-custom-block-tip-bg)}.custom-block.tip code{background-color:var(--vp-custom-block-tip-code-bg)}.custom-block.warning{border-color:var(--vp-custom-block-warning-border);color:var(--vp-custom-block-warning-text);background-color:var(--vp-custom-block-warning-bg)}.custom-block.warning code{background-color:var(--vp-custom-block-warning-code-bg)}.custom-block.danger{border-color:var(--vp-custom-block-danger-border);color:var(--vp-custom-block-danger-text);background-color:var(--vp-custom-block-danger-bg)}.custom-block.danger code{background-color:var(--vp-custom-block-danger-code-bg)}.custom-block.details{border-color:var(--vp-custom-block-details-border);color:var(--vp-custom-block-details-text);background-color:var(--vp-custom-block-details-bg)}.custom-block.details code{background-color:var(--vp-custom-block-details-code-bg)}.custom-block-title{font-weight:600}.custom-block p+p{margin:8px 0}.custom-block.details summary{margin:0 0 8px;font-weight:700;cursor:pointer}.custom-block.details summary+p{margin:8px 0}.custom-block a{color:inherit;font-weight:600;text-decoration:underline dotted;text-underline-offset:2px;transition:opacity .25s}.custom-block a:hover{opacity:.75}.custom-block code{font-size:var(--vp-custom-block-code-font-size)}.dark .vp-code-light{display:none}html:not(.dark) .vp-code-dark{display:none}.vp-code-group{margin-top:16px}.vp-code-group .tabs{position:relative;display:flex;margin-right:-24px;margin-left:-24px;padding:0 12px;background-color:var(--vp-code-tab-bg);overflow-x:auto;overflow-y:hidden;box-shadow:inset 0 -1px var(--vp-code-tab-divider)}@media (min-width: 640px){.vp-code-group .tabs{margin-right:0;margin-left:0;border-radius:8px 8px 0 0}}.vp-code-group .tabs input{position:fixed;opacity:0;pointer-events:none}.vp-code-group .tabs label{position:relative;display:inline-block;border-bottom:1px solid transparent;padding:0 12px;line-height:48px;font-size:14px;font-weight:500;color:var(--vp-code-tab-text-color);white-space:nowrap;cursor:pointer;transition:color .25s}.vp-code-group .tabs label:after{position:absolute;right:8px;bottom:-1px;left:8px;z-index:1;height:1px;content:"";background-color:transparent;transition:background-color .25s}.vp-code-group label:hover{color:var(--vp-code-tab-hover-text-color)}.vp-code-group input:checked+label{color:var(--vp-code-tab-active-text-color)}.vp-code-group input:checked+label:after{background-color:var(--vp-code-tab-active-bar-color)}.vp-code-group div[class*=language-],.vp-block{display:none;margin-top:0!important;border-top-left-radius:0!important;border-top-right-radius:0!important}.vp-code-group div[class*=language-].active,.vp-block.active{display:block}.vp-block{padding:20px 24px}.vp-doc h1,.vp-doc h2,.vp-doc h3,.vp-doc h4,.vp-doc h5,.vp-doc h6{position:relative;font-weight:600;outline:none}.vp-doc h1{letter-spacing:-.02em;line-height:40px;font-size:28px}.vp-doc h2{margin:48px 0 16px;border-top:1px solid var(--vp-c-divider);padding-top:24px;letter-spacing:-.02em;line-height:32px;font-size:24px}.vp-doc h3{margin:32px 0 0;letter-spacing:-.01em;line-height:28px;font-size:20px}.vp-doc .header-anchor{position:absolute;top:0;left:0;margin-left:-.87em;font-weight:500;-webkit-user-select:none;user-select:none;opacity:0;transition:color .25s,opacity .25s}.vp-doc .header-anchor:before{content:var(--vp-header-anchor-symbol)}.vp-doc h1:hover .header-anchor,.vp-doc h1 .header-anchor:focus,.vp-doc h2:hover .header-anchor,.vp-doc h2 .header-anchor:focus,.vp-doc h3:hover .header-anchor,.vp-doc h3 .header-anchor:focus,.vp-doc h4:hover .header-anchor,.vp-doc h4 .header-anchor:focus,.vp-doc h5:hover .header-anchor,.vp-doc h5 .header-anchor:focus,.vp-doc h6:hover .header-anchor,.vp-doc h6 .header-anchor:focus{opacity:1}@media (min-width: 768px){.vp-doc h1{letter-spacing:-.02em;line-height:40px;font-size:32px}}.vp-doc h2 .header-anchor{top:24px}.vp-doc p,.vp-doc summary{margin:16px 0}.vp-doc p{line-height:28px}.vp-doc blockquote{margin:16px 0;border-left:2px solid var(--vp-c-divider);padding-left:16px;transition:border-color .5s}.vp-doc blockquote>p{margin:0;font-size:16px;color:var(--vp-c-text-2);transition:color .5s}.vp-doc a{font-weight:500;color:var(--vp-c-brand)}.vp-doc a:hover{text-decoration:underline dotted;text-underline-offset:2px}.vp-doc strong{font-weight:600}.vp-doc ul,.vp-doc ol{padding-left:1.25rem;margin:16px 0}.vp-doc ul{list-style:disc}.vp-doc ol{list-style:decimal}.vp-doc li+li{margin-top:8px}.vp-doc li>ol,.vp-doc li>ul{margin:8px 0 0}.vp-doc table{display:block;border-collapse:collapse;margin:20px 0;overflow-x:auto}.vp-doc tr{border-top:1px solid var(--vp-c-divider);transition:background-color .5s}.vp-doc tr:nth-child(2n){background-color:var(--vp-c-bg-soft)}.vp-doc th,.vp-doc td{border:1px solid var(--vp-c-divider);padding:8px 16px}.vp-doc th{text-align:left;font-size:14px;font-weight:600;color:var(--vp-c-text-2);background-color:var(--vp-c-bg-soft)}.vp-doc td{font-size:14px}.vp-doc hr{margin:16px 0;border:none;border-top:1px solid var(--vp-c-divider)}.vp-doc .custom-block{margin:16px 0}.vp-doc .custom-block p{margin:8px 0;line-height:24px}.vp-doc .custom-block p:first-child{margin:0}.vp-doc .custom-block a{color:inherit;font-weight:600}.vp-doc .custom-block code{font-size:var(--vp-custom-block-code-font-size);font-weight:700;color:inherit}.vp-doc .custom-block div[class*=language-]{margin:8px 0;border-radius:8px}.vp-doc .custom-block div[class*=language-] code{font-weight:400;background-color:transparent}.vp-doc .custom-block .vp-code-group .tabs{margin:0;border-radius:8px 8px 0 0}.vp-doc :not(pre,h1,h2,h3,h4,h5,h6)>code{font-size:var(--vp-code-font-size)}.vp-doc :not(pre)>code{border-radius:4px;padding:3px 6px;color:var(--vp-c-text-code);background-color:var(--vp-c-mute);transition:color .5s,background-color .5s}.vp-doc h1>code,.vp-doc h2>code,.vp-doc h3>code{font-size:.9em}.vp-doc a>code{color:var(--vp-c-brand)}.vp-doc div[class*=language-],.vp-block{position:relative;margin:16px -24px;background-color:var(--vp-code-block-bg);overflow-x:auto;transition:background-color .5s}@media (min-width: 640px){.vp-doc div[class*=language-],.vp-block{border-radius:8px;margin:16px 0}}@media (max-width: 639px){.vp-doc li div[class*=language-]{border-radius:8px 0 0 8px}}.vp-doc div[class*=language-]+div[class*=language-],.vp-doc div[class$=-api]+div[class*=language-],.vp-doc div[class*=language-]+div[class$=-api]>div[class*=language-]{margin-top:-8px}.vp-doc [class*=language-] pre,.vp-doc [class*=language-] code{direction:ltr;text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none}.vp-doc [class*=language-] pre{position:relative;z-index:1;margin:0;padding:20px 0;background:transparent;overflow-x:auto}.vp-doc [class*=language-] code{display:block;padding:0 24px;width:fit-content;min-width:100%;line-height:var(--vp-code-line-height);font-size:var(--vp-code-font-size);color:var(--vp-code-block-color);transition:color .5s}.vp-doc [class*=language-] code .highlighted{background-color:var(--vp-code-line-highlight-color);transition:background-color .5s;margin:0 -24px;padding:0 24px;width:calc(100% + 48px);display:inline-block}.vp-doc [class*=language-] code .highlighted.error{background-color:var(--vp-code-line-error-color)}.vp-doc [class*=language-] code .highlighted.warning{background-color:var(--vp-code-line-warning-color)}.vp-doc [class*=language-] code .diff{transition:background-color .5s;margin:0 -24px;padding:0 24px;width:calc(100% + 48px);display:inline-block}.vp-doc [class*=language-] code .diff:before{position:absolute;left:10px}.vp-doc [class*=language-] .has-focused-lines .line:not(.has-focus){filter:blur(.095rem);opacity:.4;transition:filter .35s,opacity .35s}.vp-doc [class*=language-] .has-focused-lines .line:not(.has-focus){opacity:.7;transition:filter .35s,opacity .35s}.vp-doc [class*=language-]:hover .has-focused-lines .line:not(.has-focus){filter:blur(0);opacity:1}.vp-doc [class*=language-] code .diff.remove{background-color:var(--vp-code-line-diff-remove-color);opacity:.7}.vp-doc [class*=language-] code .diff.remove:before{content:"-";color:var(--vp-code-line-diff-remove-symbol-color)}.vp-doc [class*=language-] code .diff.add{background-color:var(--vp-code-line-diff-add-color)}.vp-doc [class*=language-] code .diff.add:before{content:"+";color:var(--vp-code-line-diff-add-symbol-color)}.vp-doc div[class*=language-].line-numbers-mode{padding-left:32px}.vp-doc .line-numbers-wrapper{position:absolute;top:0;bottom:0;left:0;z-index:3;border-right:1px solid var(--vp-code-block-divider-color);padding-top:20px;width:32px;text-align:center;font-family:var(--vp-font-family-mono);line-height:var(--vp-code-line-height);font-size:var(--vp-code-font-size);color:var(--vp-code-line-number-color);transition:border-color .5s,color .5s}.vp-doc [class*=language-]>button.copy{direction:ltr;position:absolute;top:12px;right:12px;z-index:3;border:1px solid var(--vp-code-copy-code-border-color);border-radius:4px;width:40px;height:40px;background-color:var(--vp-code-copy-code-bg);opacity:0;cursor:pointer;background-image:var(--vp-icon-copy);background-position:50%;background-size:20px;background-repeat:no-repeat;transition:border-color .25s,background-color .25s,opacity .25s}.vp-doc [class*=language-]:hover>button.copy,.vp-doc [class*=language-]>button.copy:focus{opacity:1}.vp-doc [class*=language-]>button.copy:hover,.vp-doc [class*=language-]>button.copy.copied{border-color:var(--vp-code-copy-code-hover-border-color);background-color:var(--vp-code-copy-code-hover-bg)}.vp-doc [class*=language-]>button.copy.copied,.vp-doc [class*=language-]>button.copy:hover.copied{border-radius:0 4px 4px 0;background-color:var(--vp-code-copy-code-hover-bg);background-image:var(--vp-icon-copied)}.vp-doc [class*=language-]>button.copy.copied:before,.vp-doc [class*=language-]>button.copy:hover.copied:before{position:relative;top:-1px;left:-65px;display:flex;justify-content:center;align-items:center;border:1px solid var(--vp-code-copy-code-hover-border-color);border-right:0;border-radius:4px 0 0 4px;width:64px;height:40px;text-align:center;font-size:12px;font-weight:500;color:var(--vp-code-copy-code-active-text);background-color:var(--vp-code-copy-code-hover-bg);white-space:nowrap;content:"Copied"}.vp-doc [class*=language-]>span.lang{position:absolute;top:2px;right:8px;z-index:2;font-size:12px;font-weight:500;color:var(--vp-c-code-dimm);transition:color .4s,opacity .4s}.vp-doc [class*=language-]:hover>button.copy+span.lang,.vp-doc [class*=language-]>button.copy:focus+span.lang{opacity:0}.vp-doc .VPTeamMembers{margin-top:24px}.vp-doc .VPTeamMembers.small.count-1 .container{margin:0!important;max-width:calc((100% - 24px)/2)!important}.vp-doc .VPTeamMembers.small.count-2 .container,.vp-doc .VPTeamMembers.small.count-3 .container{max-width:100%!important}.vp-doc .VPTeamMembers.medium.count-1 .container{margin:0!important;max-width:calc((100% - 24px)/2)!important}:is(.vp-external-link-icon,.vp-doc a[href*="://"],.vp-doc a[target=_blank]):not(.no-icon):after{display:inline-block;margin-top:-1px;margin-left:4px;width:11px;height:11px;background:currentColor;color:var(--vp-c-text-3);flex-shrink:0;--icon: url("data:image/svg+xml, %3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' %3E%3Cpath d='M0 0h24v24H0V0z' fill='none' /%3E%3Cpath d='M9 5v2h6.59L4 18.59 5.41 20 17 8.41V15h2V5H9z' /%3E%3C/svg%3E");-webkit-mask-image:var(--icon);mask-image:var(--icon)}.vp-external-link-icon:after{content:""}.vp-sponsor{border-radius:16px;overflow:hidden}.vp-sponsor.aside{border-radius:12px}.vp-sponsor-section+.vp-sponsor-section{margin-top:4px}.vp-sponsor-tier{margin-bottom:4px;text-align:center;letter-spacing:1px;line-height:24px;width:100%;font-weight:600;color:var(--vp-c-text-2);background-color:var(--vp-c-bg-soft)}.vp-sponsor.normal .vp-sponsor-tier{padding:13px 0 11px;font-size:14px}.vp-sponsor.aside .vp-sponsor-tier{padding:9px 0 7px;font-size:12px}.vp-sponsor-grid+.vp-sponsor-tier{margin-top:4px}.vp-sponsor-grid{display:flex;flex-wrap:wrap;gap:4px}.vp-sponsor-grid.xmini .vp-sponsor-grid-link{height:64px}.vp-sponsor-grid.xmini .vp-sponsor-grid-image{max-width:64px;max-height:22px}.vp-sponsor-grid.mini .vp-sponsor-grid-link{height:72px}.vp-sponsor-grid.mini .vp-sponsor-grid-image{max-width:96px;max-height:24px}.vp-sponsor-grid.small .vp-sponsor-grid-link{height:96px}.vp-sponsor-grid.small .vp-sponsor-grid-image{max-width:96px;max-height:24px}.vp-sponsor-grid.medium .vp-sponsor-grid-link{height:112px}.vp-sponsor-grid.medium .vp-sponsor-grid-image{max-width:120px;max-height:36px}.vp-sponsor-grid.big .vp-sponsor-grid-link{height:184px}.vp-sponsor-grid.big .vp-sponsor-grid-image{max-width:192px;max-height:56px}.vp-sponsor-grid[data-vp-grid="2"] .vp-sponsor-grid-item{width:calc((100% - 4px)/2)}.vp-sponsor-grid[data-vp-grid="3"] .vp-sponsor-grid-item{width:calc((100% - 4px * 2) / 3)}.vp-sponsor-grid[data-vp-grid="4"] .vp-sponsor-grid-item{width:calc((100% - 12px)/4)}.vp-sponsor-grid[data-vp-grid="5"] .vp-sponsor-grid-item{width:calc((100% - 16px)/5)}.vp-sponsor-grid[data-vp-grid="6"] .vp-sponsor-grid-item{width:calc((100% - 4px * 5) / 6)}.vp-sponsor-grid-item{flex-shrink:0;width:100%;background-color:var(--vp-c-bg-soft);transition:background-color .25s}.vp-sponsor-grid-item:hover{background-color:var(--vp-c-bg-soft-down)}.vp-sponsor-grid-item:hover .vp-sponsor-grid-image{filter:grayscale(0) invert(0)}.vp-sponsor-grid-item.empty:hover{background-color:var(--vp-c-bg-soft)}.dark .vp-sponsor-grid-item:hover{background-color:var(--vp-c-white)}.dark .vp-sponsor-grid-item.empty:hover{background-color:var(--vp-c-bg-soft)}.vp-sponsor-grid-link{display:flex}.vp-sponsor-grid-box{display:flex;justify-content:center;align-items:center;width:100%}.vp-sponsor-grid-image{max-width:100%;filter:grayscale(1);transition:filter .25s}.dark .vp-sponsor-grid-image{filter:grayscale(1) invert(1)}.VPBadge[data-v-02919808]{display:inline-block;margin-left:2px;border:1px solid transparent;border-radius:12px;padding:0 10px;line-height:22px;font-size:12px;font-weight:600;transform:translateY(-2px)}.vp-doc h1>.VPBadge[data-v-02919808]{margin-top:4px;vertical-align:top}.vp-doc h2>.VPBadge[data-v-02919808]{margin-top:3px;line-height:20px;padding:0 8px;vertical-align:top}.vp-doc h3>.VPBadge[data-v-02919808]{line-height:20px;vertical-align:middle}.vp-doc h4>.VPBadge[data-v-02919808],.vp-doc h5>.VPBadge[data-v-02919808],.vp-doc h6>.VPBadge[data-v-02919808]{vertical-align:middle;line-height:18px}.VPBadge.info[data-v-02919808]{border-color:var(--vp-badge-info-border);color:var(--vp-badge-info-text);background-color:var(--vp-badge-info-bg)}.VPBadge.tip[data-v-02919808]{border-color:var(--vp-badge-tip-border);color:var(--vp-badge-tip-text);background-color:var(--vp-badge-tip-bg)}.VPBadge.warning[data-v-02919808]{border-color:var(--vp-badge-warning-border);color:var(--vp-badge-warning-text);background-color:var(--vp-badge-warning-bg)}.VPBadge.danger[data-v-02919808]{border-color:var(--vp-badge-danger-border);color:var(--vp-badge-danger-text);background-color:var(--vp-badge-danger-bg)}.VPBackdrop[data-v-c79a1216]{position:fixed;top:0;right:0;bottom:0;left:0;z-index:var(--vp-z-index-backdrop);background:var(--vp-backdrop-bg-color);transition:opacity .5s}.VPBackdrop.fade-enter-from[data-v-c79a1216],.VPBackdrop.fade-leave-to[data-v-c79a1216]{opacity:0}.VPBackdrop.fade-leave-active[data-v-c79a1216]{transition-duration:.25s}@media (min-width: 1280px){.VPBackdrop[data-v-c79a1216]{display:none}}.NotFound[data-v-a172abb3]{padding:64px 24px 96px;text-align:center}@media (min-width: 768px){.NotFound[data-v-a172abb3]{padding:96px 32px 168px}}.code[data-v-a172abb3]{line-height:64px;font-size:64px;font-weight:600}.title[data-v-a172abb3]{padding-top:12px;letter-spacing:2px;line-height:20px;font-size:20px;font-weight:700}.divider[data-v-a172abb3]{margin:24px auto 18px;width:64px;height:1px;background-color:var(--vp-c-divider)}.quote[data-v-a172abb3]{margin:0 auto;max-width:256px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}.action[data-v-a172abb3]{padding-top:20px}.link[data-v-a172abb3]{display:inline-block;border:1px solid var(--vp-c-brand);border-radius:16px;padding:3px 16px;font-size:14px;font-weight:500;color:var(--vp-c-brand);transition:border-color .25s,color .25s}.link[data-v-a172abb3]:hover{border-color:var(--vp-c-brand-dark);color:var(--vp-c-brand-dark)}.root[data-v-d0ee3533]{position:relative;z-index:1}.nested[data-v-d0ee3533]{padding-left:16px}.outline-link[data-v-d0ee3533]{display:block;line-height:28px;color:var(--vp-c-text-2);white-space:nowrap;overflow:hidden;text-overflow:ellipsis;transition:color .5s;font-weight:400}.outline-link[data-v-d0ee3533]:hover,.outline-link.active[data-v-d0ee3533]{color:var(--vp-c-text-1);transition:color .25s}.outline-link.nested[data-v-d0ee3533]{padding-left:13px}.VPDocAsideOutline[data-v-ff0f39c8]{display:none}.VPDocAsideOutline.has-outline[data-v-ff0f39c8]{display:block}.content[data-v-ff0f39c8]{position:relative;border-left:1px solid var(--vp-c-divider);padding-left:16px;font-size:13px;font-weight:500}.outline-marker[data-v-ff0f39c8]{position:absolute;top:32px;left:-1px;z-index:0;opacity:0;width:1px;height:18px;background-color:var(--vp-c-brand);transition:top .25s cubic-bezier(0,1,.5,1),background-color .5s,opacity .25s}.outline-title[data-v-ff0f39c8]{letter-spacing:.4px;line-height:28px;font-size:13px;font-weight:600}.VPDocAside[data-v-3f215769]{display:flex;flex-direction:column;flex-grow:1}.spacer[data-v-3f215769]{flex-grow:1}.VPDocAside[data-v-3f215769] .spacer+.VPDocAsideSponsors,.VPDocAside[data-v-3f215769] .spacer+.VPDocAsideCarbonAds{margin-top:24px}.VPDocAside[data-v-3f215769] .VPDocAsideSponsors+.VPDocAsideCarbonAds{margin-top:16px}.VPLastUpdated[data-v-149a99df]{line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}@media (min-width: 640px){.VPLastUpdated[data-v-149a99df]{line-height:32px;font-size:14px;font-weight:500}}.VPDocFooter[data-v-37656e44]{margin-top:64px}.edit-info[data-v-37656e44]{padding-bottom:18px}@media (min-width: 640px){.edit-info[data-v-37656e44]{display:flex;justify-content:space-between;align-items:center;padding-bottom:14px}}.edit-link-button[data-v-37656e44]{display:flex;align-items:center;border:0;line-height:32px;font-size:14px;font-weight:500;color:var(--vp-c-brand);transition:color .25s}.edit-link-button[data-v-37656e44]:hover{color:var(--vp-c-brand-dark)}.edit-link-icon[data-v-37656e44]{margin-right:8px;width:14px;height:14px;fill:currentColor}.prev-next[data-v-37656e44]{border-top:1px solid var(--vp-c-divider);padding-top:24px;display:grid;grid-row-gap:8px}@media (min-width: 640px){.prev-next[data-v-37656e44]{grid-template-columns:repeat(2,1fr);grid-column-gap:16px}}.pager-link[data-v-37656e44]{display:block;border:1px solid var(--vp-c-divider);border-radius:8px;padding:11px 16px 13px;width:100%;height:100%;transition:border-color .25s}.pager-link[data-v-37656e44]:hover{border-color:var(--vp-c-brand)}.pager-link.next[data-v-37656e44]{margin-left:auto;text-align:right}.desc[data-v-37656e44]{display:block;line-height:20px;font-size:12px;font-weight:500;color:var(--vp-c-text-2)}.title[data-v-37656e44]{display:block;line-height:20px;font-size:14px;font-weight:500;color:var(--vp-c-brand);transition:color .25s}.VPDocOutlineDropdown[data-v-0c1fc463]{margin-bottom:48px}.VPDocOutlineDropdown button[data-v-0c1fc463]{display:block;font-size:14px;font-weight:500;line-height:24px;border:1px solid var(--vp-c-border);padding:4px 12px;color:var(--vp-c-text-2);background-color:var(--vp-c-mute);border-radius:8px;transition:color .5s}.VPDocOutlineDropdown button[data-v-0c1fc463]:hover{color:var(--vp-c-text-1);transition:color .25s}.VPDocOutlineDropdown button.open[data-v-0c1fc463]{color:var(--vp-c-text-1)}.icon[data-v-0c1fc463]{display:inline-block;vertical-align:middle;width:16px;height:16px;fill:currentColor}[data-v-0c1fc463] .outline-link{font-size:14px;font-weight:400}.open>.icon[data-v-0c1fc463]{transform:rotate(90deg)}.items[data-v-0c1fc463]{margin-top:12px;border-left:1px solid var(--vp-c-divider)}.VPDoc[data-v-6b87e69f]{padding:32px 24px 96px;width:100%}.VPDoc .VPDocOutlineDropdown[data-v-6b87e69f]{display:none}@media (min-width: 960px) and (max-width: 1279px){.VPDoc .VPDocOutlineDropdown[data-v-6b87e69f]{display:block}}@media (min-width: 768px){.VPDoc[data-v-6b87e69f]{padding:48px 32px 128px}}@media (min-width: 960px){.VPDoc[data-v-6b87e69f]{padding:32px 32px 0}.VPDoc:not(.has-sidebar) .container[data-v-6b87e69f]{display:flex;justify-content:center;max-width:992px}.VPDoc:not(.has-sidebar) .content[data-v-6b87e69f]{max-width:752px}}@media (min-width: 1280px){.VPDoc .container[data-v-6b87e69f]{display:flex;justify-content:center}.VPDoc .aside[data-v-6b87e69f]{display:block}}@media (min-width: 1440px){.VPDoc:not(.has-sidebar) .content[data-v-6b87e69f]{max-width:784px}.VPDoc:not(.has-sidebar) .container[data-v-6b87e69f]{max-width:1104px}}.container[data-v-6b87e69f]{margin:0 auto;width:100%}.aside[data-v-6b87e69f]{position:relative;display:none;order:2;flex-grow:1;padding-left:32px;width:100%;max-width:256px}.left-aside[data-v-6b87e69f]{order:1;padding-left:unset;padding-right:32px}.aside-container[data-v-6b87e69f]{position:fixed;top:0;padding-top:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + var(--vp-doc-top-height, 0px) + 32px);width:224px;height:100vh;overflow-x:hidden;overflow-y:auto;scrollbar-width:none}.aside-container[data-v-6b87e69f]::-webkit-scrollbar{display:none}.aside-curtain[data-v-6b87e69f]{position:fixed;bottom:0;z-index:10;width:224px;height:32px;background:linear-gradient(transparent,var(--vp-c-bg) 70%)}.aside-content[data-v-6b87e69f]{display:flex;flex-direction:column;min-height:calc(100vh - (var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 32px));padding-bottom:32px}.content[data-v-6b87e69f]{position:relative;margin:0 auto;width:100%}@media (min-width: 960px){.content[data-v-6b87e69f]{padding:0 32px 128px}}@media (min-width: 1280px){.content[data-v-6b87e69f]{order:1;margin:0;min-width:640px}}.content-container[data-v-6b87e69f]{margin:0 auto}.VPDoc.has-aside .content-container[data-v-6b87e69f]{max-width:688px}.external-link-icon-enabled[data-v-6b87e69f] :is(.vp-doc a[href*="://"],.vp-doc a[target=_blank]):after{content:"";color:currentColor}.VPButton[data-v-567ba664]{display:inline-block;border:1px solid transparent;text-align:center;font-weight:600;white-space:nowrap;transition:color .25s,border-color .25s,background-color .25s}.VPButton[data-v-567ba664]:active{transition:color .1s,border-color .1s,background-color .1s}.VPButton.medium[data-v-567ba664]{border-radius:20px;padding:0 20px;line-height:38px;font-size:14px}.VPButton.big[data-v-567ba664]{border-radius:24px;padding:0 24px;line-height:46px;font-size:16px}.VPButton.brand[data-v-567ba664]{border-color:var(--vp-button-brand-border);color:var(--vp-button-brand-text);background-color:var(--vp-button-brand-bg)}.VPButton.brand[data-v-567ba664]:hover{border-color:var(--vp-button-brand-hover-border);color:var(--vp-button-brand-hover-text);background-color:var(--vp-button-brand-hover-bg)}.VPButton.brand[data-v-567ba664]:active{border-color:var(--vp-button-brand-active-border);color:var(--vp-button-brand-active-text);background-color:var(--vp-button-brand-active-bg)}.VPButton.alt[data-v-567ba664]{border-color:var(--vp-button-alt-border);color:var(--vp-button-alt-text);background-color:var(--vp-button-alt-bg)}.VPButton.alt[data-v-567ba664]:hover{border-color:var(--vp-button-alt-hover-border);color:var(--vp-button-alt-hover-text);background-color:var(--vp-button-alt-hover-bg)}.VPButton.alt[data-v-567ba664]:active{border-color:var(--vp-button-alt-active-border);color:var(--vp-button-alt-active-text);background-color:var(--vp-button-alt-active-bg)}.VPButton.sponsor[data-v-567ba664]{border-color:var(--vp-button-sponsor-border);color:var(--vp-button-sponsor-text);background-color:var(--vp-button-sponsor-bg)}.VPButton.sponsor[data-v-567ba664]:hover{border-color:var(--vp-button-sponsor-hover-border);color:var(--vp-button-sponsor-hover-text);background-color:var(--vp-button-sponsor-hover-bg)}.VPButton.sponsor[data-v-567ba664]:active{border-color:var(--vp-button-sponsor-active-border);color:var(--vp-button-sponsor-active-text);background-color:var(--vp-button-sponsor-active-bg)}html:not(.dark) .VPImage.dark[data-v-8426fc1a]{display:none}.dark .VPImage.light[data-v-8426fc1a]{display:none}.VPHero[data-v-da5d1713]{margin-top:calc((var(--vp-nav-height) + var(--vp-layout-top-height, 0px)) * -1);padding:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 48px) 24px 48px}@media (min-width: 640px){.VPHero[data-v-da5d1713]{padding:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 80px) 48px 64px}}@media (min-width: 960px){.VPHero[data-v-da5d1713]{padding:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 80px) 64px 64px}}.container[data-v-da5d1713]{display:flex;flex-direction:column;margin:0 auto;max-width:1152px}@media (min-width: 960px){.container[data-v-da5d1713]{flex-direction:row}}.main[data-v-da5d1713]{position:relative;z-index:10;order:2;flex-grow:1;flex-shrink:0}.VPHero.has-image .container[data-v-da5d1713]{text-align:center}@media (min-width: 960px){.VPHero.has-image .container[data-v-da5d1713]{text-align:left}}@media (min-width: 960px){.main[data-v-da5d1713]{order:1;width:calc((100% / 3) * 2)}.VPHero.has-image .main[data-v-da5d1713]{max-width:592px}}.name[data-v-da5d1713],.text[data-v-da5d1713]{max-width:392px;letter-spacing:-.4px;line-height:40px;font-size:32px;font-weight:700;white-space:pre-wrap}.VPHero.has-image .name[data-v-da5d1713],.VPHero.has-image .text[data-v-da5d1713]{margin:0 auto}.name[data-v-da5d1713]{color:var(--vp-home-hero-name-color)}.clip[data-v-da5d1713]{background:var(--vp-home-hero-name-background);-webkit-background-clip:text;background-clip:text;-webkit-text-fill-color:var(--vp-home-hero-name-color)}@media (min-width: 640px){.name[data-v-da5d1713],.text[data-v-da5d1713]{max-width:576px;line-height:56px;font-size:48px}}@media (min-width: 960px){.name[data-v-da5d1713],.text[data-v-da5d1713]{line-height:64px;font-size:56px}.VPHero.has-image .name[data-v-da5d1713],.VPHero.has-image .text[data-v-da5d1713]{margin:0}}.tagline[data-v-da5d1713]{padding-top:8px;max-width:392px;line-height:28px;font-size:18px;font-weight:500;white-space:pre-wrap;color:var(--vp-c-text-2)}.VPHero.has-image .tagline[data-v-da5d1713]{margin:0 auto}@media (min-width: 640px){.tagline[data-v-da5d1713]{padding-top:12px;max-width:576px;line-height:32px;font-size:20px}}@media (min-width: 960px){.tagline[data-v-da5d1713]{line-height:36px;font-size:24px}.VPHero.has-image .tagline[data-v-da5d1713]{margin:0}}.actions[data-v-da5d1713]{display:flex;flex-wrap:wrap;margin:-6px;padding-top:24px}.VPHero.has-image .actions[data-v-da5d1713]{justify-content:center}@media (min-width: 640px){.actions[data-v-da5d1713]{padding-top:32px}}@media (min-width: 960px){.VPHero.has-image .actions[data-v-da5d1713]{justify-content:flex-start}}.action[data-v-da5d1713]{flex-shrink:0;padding:6px}.image[data-v-da5d1713]{order:1;margin:-76px -24px -48px}@media (min-width: 640px){.image[data-v-da5d1713]{margin:-108px -24px -48px}}@media (min-width: 960px){.image[data-v-da5d1713]{flex-grow:1;order:2;margin:0;min-height:100%}}.image-container[data-v-da5d1713]{position:relative;margin:0 auto;width:320px;height:320px}@media (min-width: 640px){.image-container[data-v-da5d1713]{width:392px;height:392px}}@media (min-width: 960px){.image-container[data-v-da5d1713]{display:flex;justify-content:center;align-items:center;width:100%;height:100%;transform:translate(-32px,-32px)}}.image-bg[data-v-da5d1713]{position:absolute;top:50%;left:50%;border-radius:50%;width:192px;height:192px;background-image:var(--vp-home-hero-image-background-image);filter:var(--vp-home-hero-image-filter);transform:translate(-50%,-50%)}@media (min-width: 640px){.image-bg[data-v-da5d1713]{width:256px;height:256px}}@media (min-width: 960px){.image-bg[data-v-da5d1713]{width:320px;height:320px}}[data-v-da5d1713] .image-src{position:absolute;top:50%;left:50%;max-width:192px;max-height:192px;transform:translate(-50%,-50%)}@media (min-width: 640px){[data-v-da5d1713] .image-src{max-width:256px;max-height:256px}}@media (min-width: 960px){[data-v-da5d1713] .image-src{max-width:320px;max-height:320px}}.VPFeature[data-v-33086751]{display:block;border:1px solid var(--vp-c-bg-soft);border-radius:12px;height:100%;background-color:var(--vp-c-bg-soft);transition:border-color .25s,background-color .25s}.VPFeature.link[data-v-33086751]:hover{border-color:var(--vp-c-brand);background-color:var(--vp-c-bg-soft-up)}.box[data-v-33086751]{display:flex;flex-direction:column;padding:24px;height:100%}.VPFeature[data-v-33086751] .VPImage{margin-bottom:20px}.icon[data-v-33086751]{display:flex;justify-content:center;align-items:center;margin-bottom:20px;border-radius:6px;background-color:var(--vp-c-bg-soft-down);width:48px;height:48px;font-size:24px;transition:background-color .25s}.title[data-v-33086751]{line-height:24px;font-size:16px;font-weight:600}.details[data-v-33086751]{flex-grow:1;padding-top:8px;line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}.link-text[data-v-33086751]{padding-top:8px}.link-text-value[data-v-33086751]{display:flex;align-items:center;font-size:14px;font-weight:500;color:var(--vp-c-brand)}.link-text-icon[data-v-33086751]{display:inline-block;margin-left:6px;width:14px;height:14px;fill:currentColor}.VPFeatures[data-v-39646fad]{position:relative;padding:0 24px}@media (min-width: 640px){.VPFeatures[data-v-39646fad]{padding:0 48px}}@media (min-width: 960px){.VPFeatures[data-v-39646fad]{padding:0 64px}}.container[data-v-39646fad]{margin:0 auto;max-width:1152px}.items[data-v-39646fad]{display:flex;flex-wrap:wrap;margin:-8px}.item[data-v-39646fad]{padding:8px;width:100%}@media (min-width: 640px){.item.grid-2[data-v-39646fad],.item.grid-4[data-v-39646fad],.item.grid-6[data-v-39646fad]{width:50%}}@media (min-width: 768px){.item.grid-2[data-v-39646fad],.item.grid-4[data-v-39646fad]{width:50%}.item.grid-3[data-v-39646fad],.item.grid-6[data-v-39646fad]{width:calc(100% / 3)}}@media (min-width: 960px){.item.grid-4[data-v-39646fad]{width:25%}}.VPHome[data-v-d82743a8]{padding-bottom:96px}.VPHome[data-v-d82743a8] .VPHomeSponsors{margin-top:112px;margin-bottom:-128px}@media (min-width: 768px){.VPHome[data-v-d82743a8]{padding-bottom:128px}}.VPContent[data-v-669faec9]{flex-grow:1;flex-shrink:0;margin:var(--vp-layout-top-height, 0px) auto 0;width:100%}.VPContent.is-home[data-v-669faec9]{width:100%;max-width:100%}.VPContent.has-sidebar[data-v-669faec9]{margin:0}@media (min-width: 960px){.VPContent[data-v-669faec9]{padding-top:var(--vp-nav-height)}.VPContent.has-sidebar[data-v-669faec9]{margin:var(--vp-layout-top-height, 0px) 0 0;padding-left:var(--vp-sidebar-width)}}@media (min-width: 1440px){.VPContent.has-sidebar[data-v-669faec9]{padding-right:calc((100vw - var(--vp-layout-max-width)) / 2);padding-left:calc((100vw - var(--vp-layout-max-width)) / 2 + var(--vp-sidebar-width))}}.VPFooter[data-v-e03eb2e1]{position:relative;z-index:var(--vp-z-index-footer);border-top:1px solid var(--vp-c-gutter);padding:32px 24px;background-color:var(--vp-c-bg)}.VPFooter.has-sidebar[data-v-e03eb2e1]{display:none}@media (min-width: 768px){.VPFooter[data-v-e03eb2e1]{padding:32px}}.container[data-v-e03eb2e1]{margin:0 auto;max-width:var(--vp-layout-max-width);text-align:center}.message[data-v-e03eb2e1],.copyright[data-v-e03eb2e1]{line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-2)}.VPLocalNavOutlineDropdown[data-v-18201f51]{padding:12px 20px 11px}.VPLocalNavOutlineDropdown button[data-v-18201f51]{display:block;font-size:12px;font-weight:500;line-height:24px;color:var(--vp-c-text-2);transition:color .5s;position:relative}.VPLocalNavOutlineDropdown button[data-v-18201f51]:hover{color:var(--vp-c-text-1);transition:color .25s}.VPLocalNavOutlineDropdown button.open[data-v-18201f51]{color:var(--vp-c-text-1)}.icon[data-v-18201f51]{display:inline-block;vertical-align:middle;margin-left:2px;width:14px;height:14px;fill:currentColor}[data-v-18201f51] .outline-link{font-size:14px;padding:2px 0}.open>.icon[data-v-18201f51]{transform:rotate(90deg)}.items[data-v-18201f51]{position:absolute;top:64px;right:16px;left:16px;display:grid;gap:1px;border:1px solid var(--vp-c-border);border-radius:8px;background-color:var(--vp-c-gutter);max-height:calc(var(--vp-vh, 100vh) - 86px);overflow:hidden auto;box-shadow:var(--vp-shadow-3)}.header[data-v-18201f51]{background-color:var(--vp-c-bg-soft)}.top-link[data-v-18201f51]{display:block;padding:0 16px;line-height:48px;font-size:14px;font-weight:500;color:var(--vp-c-brand)}.outline[data-v-18201f51]{padding:8px 0;background-color:var(--vp-c-bg-soft)}.flyout-enter-active[data-v-18201f51]{transition:all .2s ease-out}.flyout-leave-active[data-v-18201f51]{transition:all .15s ease-in}.flyout-enter-from[data-v-18201f51],.flyout-leave-to[data-v-18201f51]{opacity:0;transform:translateY(-16px)}.VPLocalNav[data-v-5cfd5582]{position:sticky;top:0;left:0;z-index:var(--vp-z-index-local-nav);display:flex;justify-content:space-between;align-items:center;border-top:1px solid var(--vp-c-gutter);border-bottom:1px solid var(--vp-c-gutter);padding-top:var(--vp-layout-top-height, 0px);width:100%;background-color:var(--vp-local-nav-bg-color)}.VPLocalNav.fixed[data-v-5cfd5582]{position:fixed}.VPLocalNav.reached-top[data-v-5cfd5582]{border-top-color:transparent}@media (min-width: 960px){.VPLocalNav[data-v-5cfd5582]{display:none}}.menu[data-v-5cfd5582]{display:flex;align-items:center;padding:12px 24px 11px;line-height:24px;font-size:12px;font-weight:500;color:var(--vp-c-text-2);transition:color .5s}.menu[data-v-5cfd5582]:hover{color:var(--vp-c-text-1);transition:color .25s}@media (min-width: 768px){.menu[data-v-5cfd5582]{padding:0 32px}}.menu-icon[data-v-5cfd5582]{margin-right:8px;width:16px;height:16px;fill:currentColor}.VPOutlineDropdown[data-v-5cfd5582]{padding:12px 24px 11px}@media (min-width: 768px){.VPOutlineDropdown[data-v-5cfd5582]{padding:12px 32px 11px}}.VPSwitch[data-v-f3c41672]{position:relative;border-radius:11px;display:block;width:40px;height:22px;flex-shrink:0;border:1px solid var(--vp-input-border-color);background-color:var(--vp-input-switch-bg-color);transition:border-color .25s}.VPSwitch[data-v-f3c41672]:hover{border-color:var(--vp-input-hover-border-color)}.check[data-v-f3c41672]{position:absolute;top:1px;left:1px;width:18px;height:18px;border-radius:50%;background-color:var(--vp-c-neutral-inverse);box-shadow:var(--vp-shadow-1);transition:transform .25s}.icon[data-v-f3c41672]{position:relative;display:block;width:18px;height:18px;border-radius:50%;overflow:hidden}.icon[data-v-f3c41672] svg{position:absolute;top:3px;left:3px;width:12px;height:12px;fill:var(--vp-c-text-2)}.dark .icon[data-v-f3c41672] svg{fill:var(--vp-c-text-1);transition:opacity .25s}.sun[data-v-82b282f1]{opacity:1}.moon[data-v-82b282f1],.dark .sun[data-v-82b282f1]{opacity:0}.dark .moon[data-v-82b282f1]{opacity:1}.dark .VPSwitchAppearance[data-v-82b282f1] .check{transform:translate(18px)}.VPNavBarAppearance[data-v-f6a63727]{display:none}@media (min-width: 1280px){.VPNavBarAppearance[data-v-f6a63727]{display:flex;align-items:center}}.VPMenuGroup+.VPMenuLink[data-v-2f2cfafc]{margin:12px -12px 0;border-top:1px solid var(--vp-c-divider);padding:12px 12px 0}.link[data-v-2f2cfafc]{display:block;border-radius:6px;padding:0 12px;line-height:32px;font-size:14px;font-weight:500;color:var(--vp-c-text-1);white-space:nowrap;transition:background-color .25s,color .25s}.link[data-v-2f2cfafc]:hover{color:var(--vp-c-brand);background-color:var(--vp-c-bg-elv-mute)}.link.active[data-v-2f2cfafc]{color:var(--vp-c-brand)}.VPMenuGroup[data-v-69e747b5]{margin:12px -12px 0;border-top:1px solid var(--vp-c-divider);padding:12px 12px 0}.VPMenuGroup[data-v-69e747b5]:first-child{margin-top:0;border-top:0;padding-top:0}.VPMenuGroup+.VPMenuGroup[data-v-69e747b5]{margin-top:12px;border-top:1px solid var(--vp-c-divider)}.title[data-v-69e747b5]{padding:0 12px;line-height:32px;font-size:14px;font-weight:600;color:var(--vp-c-text-2);white-space:nowrap;transition:color .25s}.VPMenu[data-v-e7ea1737]{border-radius:12px;padding:12px;min-width:128px;border:1px solid var(--vp-c-divider);background-color:var(--vp-c-bg-elv);box-shadow:var(--vp-shadow-3);transition:background-color .5s;max-height:calc(100vh - var(--vp-nav-height));overflow-y:auto}.VPMenu[data-v-e7ea1737] .group{margin:0 -12px;padding:0 12px 12px}.VPMenu[data-v-e7ea1737] .group+.group{border-top:1px solid var(--vp-c-divider);padding:11px 12px 12px}.VPMenu[data-v-e7ea1737] .group:last-child{padding-bottom:0}.VPMenu[data-v-e7ea1737] .group+.item{border-top:1px solid var(--vp-c-divider);padding:11px 16px 0}.VPMenu[data-v-e7ea1737] .item{padding:0 16px;white-space:nowrap}.VPMenu[data-v-e7ea1737] .label{flex-grow:1;line-height:28px;font-size:12px;font-weight:500;color:var(--vp-c-text-2);transition:color .5s}.VPMenu[data-v-e7ea1737] .action{padding-left:24px}.VPFlyout[data-v-a7b5672a]{position:relative}.VPFlyout[data-v-a7b5672a]:hover{color:var(--vp-c-brand);transition:color .25s}.VPFlyout:hover .text[data-v-a7b5672a]{color:var(--vp-c-text-2)}.VPFlyout:hover .icon[data-v-a7b5672a]{fill:var(--vp-c-text-2)}.VPFlyout.active .text[data-v-a7b5672a]{color:var(--vp-c-brand)}.VPFlyout.active:hover .text[data-v-a7b5672a]{color:var(--vp-c-brand-dark)}.VPFlyout:hover .menu[data-v-a7b5672a],.button[aria-expanded=true]+.menu[data-v-a7b5672a]{opacity:1;visibility:visible;transform:translateY(0)}.button[aria-expanded=false]+.menu[data-v-a7b5672a]{opacity:0;visibility:hidden;transform:translateY(0)}.button[data-v-a7b5672a]{display:flex;align-items:center;padding:0 12px;height:var(--vp-nav-height);color:var(--vp-c-text-1);transition:color .5s}.text[data-v-a7b5672a]{display:flex;align-items:center;line-height:var(--vp-nav-height);font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:color .25s}.option-icon[data-v-a7b5672a]{margin-right:0;width:16px;height:16px;fill:currentColor}.text-icon[data-v-a7b5672a]{margin-left:4px;width:14px;height:14px;fill:currentColor}.icon[data-v-a7b5672a]{width:20px;height:20px;fill:currentColor;transition:fill .25s}.menu[data-v-a7b5672a]{position:absolute;top:calc(var(--vp-nav-height) / 2 + 20px);right:0;opacity:0;visibility:hidden;transition:opacity .25s,visibility .25s,transform .25s}.VPSocialLink[data-v-f80f8133]{display:flex;justify-content:center;align-items:center;width:36px;height:36px;color:var(--vp-c-text-2);transition:color .5s}.VPSocialLink[data-v-f80f8133]:hover{color:var(--vp-c-text-1);transition:color .25s}.VPSocialLink[data-v-f80f8133]>svg{width:20px;height:20px;fill:currentColor}.VPSocialLinks[data-v-7bc22406]{display:flex;justify-content:center}.VPNavBarExtra[data-v-40855f84]{display:none;margin-right:-12px}@media (min-width: 768px){.VPNavBarExtra[data-v-40855f84]{display:block}}@media (min-width: 1280px){.VPNavBarExtra[data-v-40855f84]{display:none}}.trans-title[data-v-40855f84]{padding:0 24px 0 12px;line-height:32px;font-size:14px;font-weight:700;color:var(--vp-c-text-1)}.item.appearance[data-v-40855f84],.item.social-links[data-v-40855f84]{display:flex;align-items:center;padding:0 12px}.item.appearance[data-v-40855f84]{min-width:176px}.appearance-action[data-v-40855f84]{margin-right:-2px}.social-links-list[data-v-40855f84]{margin:-4px -8px}.VPNavBarHamburger[data-v-e5dd9c1c]{display:flex;justify-content:center;align-items:center;width:48px;height:var(--vp-nav-height)}@media (min-width: 768px){.VPNavBarHamburger[data-v-e5dd9c1c]{display:none}}.container[data-v-e5dd9c1c]{position:relative;width:16px;height:14px;overflow:hidden}.VPNavBarHamburger:hover .top[data-v-e5dd9c1c]{top:0;left:0;transform:translate(4px)}.VPNavBarHamburger:hover .middle[data-v-e5dd9c1c]{top:6px;left:0;transform:translate(0)}.VPNavBarHamburger:hover .bottom[data-v-e5dd9c1c]{top:12px;left:0;transform:translate(8px)}.VPNavBarHamburger.active .top[data-v-e5dd9c1c]{top:6px;transform:translate(0) rotate(225deg)}.VPNavBarHamburger.active .middle[data-v-e5dd9c1c]{top:6px;transform:translate(16px)}.VPNavBarHamburger.active .bottom[data-v-e5dd9c1c]{top:6px;transform:translate(0) rotate(135deg)}.VPNavBarHamburger.active:hover .top[data-v-e5dd9c1c],.VPNavBarHamburger.active:hover .middle[data-v-e5dd9c1c],.VPNavBarHamburger.active:hover .bottom[data-v-e5dd9c1c]{background-color:var(--vp-c-text-2);transition:top .25s,background-color .25s,transform .25s}.top[data-v-e5dd9c1c],.middle[data-v-e5dd9c1c],.bottom[data-v-e5dd9c1c]{position:absolute;width:16px;height:2px;background-color:var(--vp-c-text-1);transition:top .25s,background-color .5s,transform .25s}.top[data-v-e5dd9c1c]{top:0;left:0;transform:translate(0)}.middle[data-v-e5dd9c1c]{top:6px;left:0;transform:translate(8px)}.bottom[data-v-e5dd9c1c]{top:12px;left:0;transform:translate(4px)}.VPNavBarMenuLink[data-v-0b525393]{display:flex;align-items:center;padding:0 12px;line-height:var(--vp-nav-height);font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:color .25s}.VPNavBarMenuLink.active[data-v-0b525393],.VPNavBarMenuLink[data-v-0b525393]:hover{color:var(--vp-c-brand)}.VPNavBarMenu[data-v-7f418b0f]{display:none}@media (min-width: 768px){.VPNavBarMenu[data-v-7f418b0f]{display:flex}}/*! @docsearch/css 3.5.2 | MIT License | © Algolia, Inc. and contributors | https://docsearch.algolia.com */:root{--docsearch-primary-color:#5468ff;--docsearch-text-color:#1c1e21;--docsearch-spacing:12px;--docsearch-icon-stroke-width:1.4;--docsearch-highlight-color:var(--docsearch-primary-color);--docsearch-muted-color:#969faf;--docsearch-container-background:rgba(101,108,133,.8);--docsearch-logo-color:#5468ff;--docsearch-modal-width:560px;--docsearch-modal-height:600px;--docsearch-modal-background:#f5f6f7;--docsearch-modal-shadow:inset 1px 1px 0 0 hsla(0,0%,100%,.5),0 3px 8px 0 #555a64;--docsearch-searchbox-height:56px;--docsearch-searchbox-background:#ebedf0;--docsearch-searchbox-focus-background:#fff;--docsearch-searchbox-shadow:inset 0 0 0 2px var(--docsearch-primary-color);--docsearch-hit-height:56px;--docsearch-hit-color:#444950;--docsearch-hit-active-color:#fff;--docsearch-hit-background:#fff;--docsearch-hit-shadow:0 1px 3px 0 #d4d9e1;--docsearch-key-gradient:linear-gradient(-225deg,#d5dbe4,#f8f8f8);--docsearch-key-shadow:inset 0 -2px 0 0 #cdcde6,inset 0 0 1px 1px #fff,0 1px 2px 1px rgba(30,35,90,.4);--docsearch-footer-height:44px;--docsearch-footer-background:#fff;--docsearch-footer-shadow:0 -1px 0 0 #e0e3e8,0 -3px 6px 0 rgba(69,98,155,.12)}html[data-theme=dark]{--docsearch-text-color:#f5f6f7;--docsearch-container-background:rgba(9,10,17,.8);--docsearch-modal-background:#15172a;--docsearch-modal-shadow:inset 1px 1px 0 0 #2c2e40,0 3px 8px 0 #000309;--docsearch-searchbox-background:#090a11;--docsearch-searchbox-focus-background:#000;--docsearch-hit-color:#bec3c9;--docsearch-hit-shadow:none;--docsearch-hit-background:#090a11;--docsearch-key-gradient:linear-gradient(-26.5deg,#565872,#31355b);--docsearch-key-shadow:inset 0 -2px 0 0 #282d55,inset 0 0 1px 1px #51577d,0 2px 2px 0 rgba(3,4,9,.3);--docsearch-footer-background:#1e2136;--docsearch-footer-shadow:inset 0 1px 0 0 rgba(73,76,106,.5),0 -4px 8px 0 rgba(0,0,0,.2);--docsearch-logo-color:#fff;--docsearch-muted-color:#7f8497}.DocSearch-Button{align-items:center;background:var(--docsearch-searchbox-background);border:0;border-radius:40px;color:var(--docsearch-muted-color);cursor:pointer;display:flex;font-weight:500;height:36px;justify-content:space-between;margin:0 0 0 16px;padding:0 8px;-webkit-user-select:none;user-select:none}.DocSearch-Button:active,.DocSearch-Button:focus,.DocSearch-Button:hover{background:var(--docsearch-searchbox-focus-background);box-shadow:var(--docsearch-searchbox-shadow);color:var(--docsearch-text-color);outline:none}.DocSearch-Button-Container{align-items:center;display:flex}.DocSearch-Search-Icon{stroke-width:1.6}.DocSearch-Button .DocSearch-Search-Icon{color:var(--docsearch-text-color)}.DocSearch-Button-Placeholder{font-size:1rem;padding:0 12px 0 6px}.DocSearch-Button-Keys{display:flex;min-width:calc(40px + .8em)}.DocSearch-Button-Key{align-items:center;background:var(--docsearch-key-gradient);border-radius:3px;box-shadow:var(--docsearch-key-shadow);color:var(--docsearch-muted-color);display:flex;height:18px;justify-content:center;margin-right:.4em;position:relative;padding:0 0 2px;border:0;top:-1px;width:20px}@media (max-width:768px){.DocSearch-Button-Keys,.DocSearch-Button-Placeholder{display:none}}.DocSearch--active{overflow:hidden!important}.DocSearch-Container,.DocSearch-Container *{box-sizing:border-box}.DocSearch-Container{background-color:var(--docsearch-container-background);height:100vh;left:0;position:fixed;top:0;width:100vw;z-index:200}.DocSearch-Container a{text-decoration:none}.DocSearch-Link{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;font:inherit;margin:0;padding:0}.DocSearch-Modal{background:var(--docsearch-modal-background);border-radius:6px;box-shadow:var(--docsearch-modal-shadow);flex-direction:column;margin:60px auto auto;max-width:var(--docsearch-modal-width);position:relative}.DocSearch-SearchBar{display:flex;padding:var(--docsearch-spacing) var(--docsearch-spacing) 0}.DocSearch-Form{align-items:center;background:var(--docsearch-searchbox-focus-background);border-radius:4px;box-shadow:var(--docsearch-searchbox-shadow);display:flex;height:var(--docsearch-searchbox-height);margin:0;padding:0 var(--docsearch-spacing);position:relative;width:100%}.DocSearch-Input{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:transparent;border:0;color:var(--docsearch-text-color);flex:1;font:inherit;font-size:1.2em;height:100%;outline:none;padding:0 0 0 8px;width:80%}.DocSearch-Input::placeholder{color:var(--docsearch-muted-color);opacity:1}.DocSearch-Input::-webkit-search-cancel-button,.DocSearch-Input::-webkit-search-decoration,.DocSearch-Input::-webkit-search-results-button,.DocSearch-Input::-webkit-search-results-decoration{display:none}.DocSearch-LoadingIndicator,.DocSearch-MagnifierLabel,.DocSearch-Reset{margin:0;padding:0}.DocSearch-MagnifierLabel,.DocSearch-Reset{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}.DocSearch-Container--Stalled .DocSearch-MagnifierLabel,.DocSearch-LoadingIndicator{display:none}.DocSearch-Container--Stalled .DocSearch-LoadingIndicator{align-items:center;color:var(--docsearch-highlight-color);display:flex;justify-content:center}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Reset{animation:none;-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;right:0;stroke-width:var(--docsearch-icon-stroke-width)}}.DocSearch-Reset{animation:fade-in .1s ease-in forwards;-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:50%;color:var(--docsearch-icon-color);cursor:pointer;padding:2px;right:0;stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Reset[hidden]{display:none}.DocSearch-Reset:hover{color:var(--docsearch-highlight-color)}.DocSearch-LoadingIndicator svg,.DocSearch-MagnifierLabel svg{height:24px;width:24px}.DocSearch-Cancel{display:none}.DocSearch-Dropdown{max-height:calc(var(--docsearch-modal-height) - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height));min-height:var(--docsearch-spacing);overflow-y:auto;overflow-y:overlay;padding:0 var(--docsearch-spacing);scrollbar-color:var(--docsearch-muted-color) var(--docsearch-modal-background);scrollbar-width:thin}.DocSearch-Dropdown::-webkit-scrollbar{width:12px}.DocSearch-Dropdown::-webkit-scrollbar-track{background:transparent}.DocSearch-Dropdown::-webkit-scrollbar-thumb{background-color:var(--docsearch-muted-color);border:3px solid var(--docsearch-modal-background);border-radius:20px}.DocSearch-Dropdown ul{list-style:none;margin:0;padding:0}.DocSearch-Label{font-size:.75em;line-height:1.6em}.DocSearch-Help,.DocSearch-Label{color:var(--docsearch-muted-color)}.DocSearch-Help{font-size:.9em;margin:0;-webkit-user-select:none;user-select:none}.DocSearch-Title{font-size:1.2em}.DocSearch-Logo a{display:flex}.DocSearch-Logo svg{color:var(--docsearch-logo-color);margin-left:8px}.DocSearch-Hits:last-of-type{margin-bottom:24px}.DocSearch-Hits mark{background:none;color:var(--docsearch-highlight-color)}.DocSearch-HitsFooter{color:var(--docsearch-muted-color);display:flex;font-size:.85em;justify-content:center;margin-bottom:var(--docsearch-spacing);padding:var(--docsearch-spacing)}.DocSearch-HitsFooter a{border-bottom:1px solid;color:inherit}.DocSearch-Hit{border-radius:4px;display:flex;padding-bottom:4px;position:relative}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit--deleting{transition:none}}.DocSearch-Hit--deleting{opacity:0;transition:all .25s linear}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit--favoriting{transition:none}}.DocSearch-Hit--favoriting{transform:scale(0);transform-origin:top center;transition:all .25s linear;transition-delay:.25s}.DocSearch-Hit a{background:var(--docsearch-hit-background);border-radius:4px;box-shadow:var(--docsearch-hit-shadow);display:block;padding-left:var(--docsearch-spacing);width:100%}.DocSearch-Hit-source{background:var(--docsearch-modal-background);color:var(--docsearch-highlight-color);font-size:.85em;font-weight:600;line-height:32px;margin:0 -4px;padding:8px 4px 0;position:sticky;top:0;z-index:10}.DocSearch-Hit-Tree{color:var(--docsearch-muted-color);height:var(--docsearch-hit-height);opacity:.5;stroke-width:var(--docsearch-icon-stroke-width);width:24px}.DocSearch-Hit[aria-selected=true] a{background-color:var(--docsearch-highlight-color)}.DocSearch-Hit[aria-selected=true] mark{text-decoration:underline}.DocSearch-Hit-Container{align-items:center;color:var(--docsearch-hit-color);display:flex;flex-direction:row;height:var(--docsearch-hit-height);padding:0 var(--docsearch-spacing) 0 0}.DocSearch-Hit-icon{height:20px;width:20px}.DocSearch-Hit-action,.DocSearch-Hit-icon{color:var(--docsearch-muted-color);stroke-width:var(--docsearch-icon-stroke-width)}.DocSearch-Hit-action{align-items:center;display:flex;height:22px;width:22px}.DocSearch-Hit-action svg{display:block;height:18px;width:18px}.DocSearch-Hit-action+.DocSearch-Hit-action{margin-left:6px}.DocSearch-Hit-action-button{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:50%;color:inherit;cursor:pointer;padding:2px}svg.DocSearch-Hit-Select-Icon{display:none}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Select-Icon{display:block}.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:rgba(0,0,0,.2);transition:background-color .1s ease-in}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{transition:none}}.DocSearch-Hit-action-button:focus path,.DocSearch-Hit-action-button:hover path{fill:#fff}.DocSearch-Hit-content-wrapper{display:flex;flex:1 1 auto;flex-direction:column;font-weight:500;justify-content:center;line-height:1.2em;margin:0 8px;overflow-x:hidden;position:relative;text-overflow:ellipsis;white-space:nowrap;width:80%}.DocSearch-Hit-title{font-size:.9em}.DocSearch-Hit-path{color:var(--docsearch-muted-color);font-size:.75em}.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-action,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-icon,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-path,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-text,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-title,.DocSearch-Hit[aria-selected=true] .DocSearch-Hit-Tree,.DocSearch-Hit[aria-selected=true] mark{color:var(--docsearch-hit-active-color)!important}@media screen and (prefers-reduced-motion:reduce){.DocSearch-Hit-action-button:focus,.DocSearch-Hit-action-button:hover{background:rgba(0,0,0,.2);transition:none}}.DocSearch-ErrorScreen,.DocSearch-NoResults,.DocSearch-StartScreen{font-size:.9em;margin:0 auto;padding:36px 0;text-align:center;width:80%}.DocSearch-Screen-Icon{color:var(--docsearch-muted-color);padding-bottom:12px}.DocSearch-NoResults-Prefill-List{display:inline-block;padding-bottom:24px;text-align:left}.DocSearch-NoResults-Prefill-List ul{display:inline-block;padding:8px 0 0}.DocSearch-NoResults-Prefill-List li{list-style-position:inside;list-style-type:"» "}.DocSearch-Prefill{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;border-radius:1em;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;font-size:1em;font-weight:700;padding:0}.DocSearch-Prefill:focus,.DocSearch-Prefill:hover{outline:none;text-decoration:underline}.DocSearch-Footer{align-items:center;background:var(--docsearch-footer-background);border-radius:0 0 8px 8px;box-shadow:var(--docsearch-footer-shadow);display:flex;flex-direction:row-reverse;flex-shrink:0;height:var(--docsearch-footer-height);justify-content:space-between;padding:0 var(--docsearch-spacing);position:relative;-webkit-user-select:none;user-select:none;width:100%;z-index:300}.DocSearch-Commands{color:var(--docsearch-muted-color);display:flex;list-style:none;margin:0;padding:0}.DocSearch-Commands li{align-items:center;display:flex}.DocSearch-Commands li:not(:last-of-type){margin-right:.8em}.DocSearch-Commands-Key{align-items:center;background:var(--docsearch-key-gradient);border-radius:2px;box-shadow:var(--docsearch-key-shadow);display:flex;height:18px;justify-content:center;margin-right:.4em;padding:0 0 1px;color:var(--docsearch-muted-color);border:0;width:20px}@media (max-width:768px){:root{--docsearch-spacing:10px;--docsearch-footer-height:40px}.DocSearch-Dropdown{height:100%}.DocSearch-Container{height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh)*100);position:absolute}.DocSearch-Footer{border-radius:0;bottom:0;position:absolute}.DocSearch-Hit-content-wrapper{display:flex;position:relative;width:80%}.DocSearch-Modal{border-radius:0;box-shadow:none;height:100vh;height:-webkit-fill-available;height:calc(var(--docsearch-vh, 1vh)*100);margin:0;max-width:100%;width:100%}.DocSearch-Dropdown{max-height:calc(var(--docsearch-vh, 1vh)*100 - var(--docsearch-searchbox-height) - var(--docsearch-spacing) - var(--docsearch-footer-height))}.DocSearch-Cancel{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:none;border:0;color:var(--docsearch-highlight-color);cursor:pointer;display:inline-block;flex:none;font:inherit;font-size:1em;font-weight:500;margin-left:var(--docsearch-spacing);outline:none;overflow:hidden;padding:0;-webkit-user-select:none;user-select:none;white-space:nowrap}.DocSearch-Commands,.DocSearch-Hit-Tree{display:none}}@keyframes fade-in{0%{opacity:0}to{opacity:1}}.DocSearch{--docsearch-primary-color: var(--vp-c-brand);--docsearch-highlight-color: var(--docsearch-primary-color);--docsearch-text-color: var(--vp-c-text-1);--docsearch-muted-color: var(--vp-c-text-2);--docsearch-searchbox-shadow: none;--docsearch-searchbox-background: transparent;--docsearch-searchbox-focus-background: transparent;--docsearch-key-gradient: transparent;--docsearch-key-shadow: none;--docsearch-modal-background: var(--vp-c-bg-soft);--docsearch-footer-background: var(--vp-c-bg)}.dark .DocSearch{--docsearch-modal-shadow: none;--docsearch-footer-shadow: none;--docsearch-logo-color: var(--vp-c-text-2);--docsearch-hit-background: var(--vp-c-bg-soft-mute);--docsearch-hit-color: var(--vp-c-text-2);--docsearch-hit-shadow: none}.DocSearch-Button{display:flex;justify-content:center;align-items:center;margin:0;padding:0;width:48px;height:55px;background:transparent;transition:border-color .25s}.DocSearch-Button:hover{background:transparent}.DocSearch-Button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}.DocSearch-Button:focus:not(:focus-visible){outline:none!important}@media (min-width: 768px){.DocSearch-Button{justify-content:flex-start;border:1px solid transparent;border-radius:8px;padding:0 10px 0 12px;width:100%;height:40px;background-color:var(--vp-c-bg-alt)}.DocSearch-Button:hover{border-color:var(--vp-c-brand);background:var(--vp-c-bg-alt)}}.DocSearch-Button .DocSearch-Button-Container{display:flex;align-items:center}.DocSearch-Button .DocSearch-Search-Icon{position:relative;width:16px;height:16px;color:var(--vp-c-text-1);fill:currentColor;transition:color .5s}.DocSearch-Button:hover .DocSearch-Search-Icon{color:var(--vp-c-text-1)}@media (min-width: 768px){.DocSearch-Button .DocSearch-Search-Icon{top:1px;margin-right:8px;width:14px;height:14px;color:var(--vp-c-text-2)}}.DocSearch-Button .DocSearch-Button-Placeholder{display:none;margin-top:2px;padding:0 16px 0 0;font-size:13px;font-weight:500;color:var(--vp-c-text-2);transition:color .5s}.DocSearch-Button:hover .DocSearch-Button-Placeholder{color:var(--vp-c-text-1)}@media (min-width: 768px){.DocSearch-Button .DocSearch-Button-Placeholder{display:inline-block}}.DocSearch-Button .DocSearch-Button-Keys{direction:ltr;display:none;min-width:auto}@media (min-width: 768px){.DocSearch-Button .DocSearch-Button-Keys{display:flex;align-items:center}}.DocSearch-Button .DocSearch-Button-Key{display:block;margin:2px 0 0;border:1px solid var(--vp-c-divider);border-right:none;border-radius:4px 0 0 4px;padding-left:6px;min-width:0;width:auto;height:22px;line-height:22px;font-family:var(--vp-font-family-base);font-size:12px;font-weight:500;transition:color .5s,border-color .5s}.DocSearch-Button .DocSearch-Button-Key+.DocSearch-Button-Key{border-right:1px solid var(--vp-c-divider);border-left:none;border-radius:0 4px 4px 0;padding-left:2px;padding-right:6px}.DocSearch-Button .DocSearch-Button-Key:first-child{font-size:1px;letter-spacing:-12px;color:transparent}.DocSearch-Button .DocSearch-Button-Key:first-child:after{content:var(--vp-meta-key);font-size:12px;letter-spacing:normal;color:var(--docsearch-muted-color)}.DocSearch-Button .DocSearch-Button-Key:first-child>*{display:none}.VPNavBarSearch{display:flex;align-items:center}@media (min-width: 768px){.VPNavBarSearch{flex-grow:1;padding-left:24px}}@media (min-width: 960px){.VPNavBarSearch{padding-left:32px}}.dark .DocSearch-Footer{border-top:1px solid var(--vp-c-divider)}.DocSearch-Form{border:1px solid var(--vp-c-brand);background-color:var(--vp-c-white)}.dark .DocSearch-Form{background-color:var(--vp-c-bg-soft-mute)}.DocSearch-Screen-Icon>svg{margin:auto}.VPNavBarSocialLinks[data-v-0394ad82]{display:none}@media (min-width: 1280px){.VPNavBarSocialLinks[data-v-0394ad82]{display:flex;align-items:center}}.title[data-v-86d1bed8]{display:flex;align-items:center;border-bottom:1px solid transparent;width:100%;height:var(--vp-nav-height);font-size:16px;font-weight:600;color:var(--vp-c-text-1);transition:opacity .25s}@media (min-width: 960px){.title[data-v-86d1bed8]{flex-shrink:0}.VPNavBarTitle.has-sidebar .title[data-v-86d1bed8]{border-bottom-color:var(--vp-c-divider)}}[data-v-86d1bed8] .logo{margin-right:8px;height:var(--vp-nav-logo-height)}.VPNavBarTranslations[data-v-74abcbb9]{display:none}@media (min-width: 1280px){.VPNavBarTranslations[data-v-74abcbb9]{display:flex;align-items:center}}.title[data-v-74abcbb9]{padding:0 24px 0 12px;line-height:32px;font-size:14px;font-weight:700;color:var(--vp-c-text-1)}.VPNavBar[data-v-0937f67c]{position:relative;border-bottom:1px solid transparent;padding:0 8px 0 24px;height:var(--vp-nav-height);pointer-events:none;white-space:nowrap}@media (min-width: 768px){.VPNavBar[data-v-0937f67c]{padding:0 32px}}@media (min-width: 960px){.VPNavBar.has-sidebar[data-v-0937f67c]{padding:0}.VPNavBar[data-v-0937f67c]:not(.has-sidebar):not(.top){border-bottom-color:var(--vp-c-gutter);background-color:var(--vp-nav-bg-color)}}.container[data-v-0937f67c]{display:flex;justify-content:space-between;margin:0 auto;max-width:calc(var(--vp-layout-max-width) - 64px);height:var(--vp-nav-height);pointer-events:none}.container>.title[data-v-0937f67c],.container>.content[data-v-0937f67c]{pointer-events:none}.container[data-v-0937f67c] *{pointer-events:auto}@media (min-width: 960px){.VPNavBar.has-sidebar .container[data-v-0937f67c]{max-width:100%}}.title[data-v-0937f67c]{flex-shrink:0;height:calc(var(--vp-nav-height) - 1px);transition:background-color .5s}@media (min-width: 960px){.VPNavBar.has-sidebar .title[data-v-0937f67c]{position:absolute;top:0;left:0;z-index:2;padding:0 32px;width:var(--vp-sidebar-width);height:var(--vp-nav-height);background-color:transparent}}@media (min-width: 1440px){.VPNavBar.has-sidebar .title[data-v-0937f67c]{padding-left:max(32px,calc((100% - (var(--vp-layout-max-width) - 64px)) / 2));width:calc((100% - (var(--vp-layout-max-width) - 64px)) / 2 + var(--vp-sidebar-width) - 32px)}}.content[data-v-0937f67c]{flex-grow:1}@media (min-width: 960px){.VPNavBar.has-sidebar .content[data-v-0937f67c]{position:relative;z-index:1;padding-right:32px;padding-left:var(--vp-sidebar-width)}}@media (min-width: 1440px){.VPNavBar.has-sidebar .content[data-v-0937f67c]{padding-right:calc((100vw - var(--vp-layout-max-width)) / 2 + 32px);padding-left:calc((100vw - var(--vp-layout-max-width)) / 2 + var(--vp-sidebar-width))}}.content-body[data-v-0937f67c]{display:flex;justify-content:flex-end;align-items:center;height:calc(var(--vp-nav-height) - 1px);transition:background-color .5s}@media (min-width: 960px){.VPNavBar:not(.top) .content-body[data-v-0937f67c]{position:relative;background-color:var(--vp-nav-bg-color)}}@media (max-width: 767px){.content-body[data-v-0937f67c]{column-gap:.5rem}}.menu+.translations[data-v-0937f67c]:before,.menu+.appearance[data-v-0937f67c]:before,.menu+.social-links[data-v-0937f67c]:before,.translations+.appearance[data-v-0937f67c]:before,.appearance+.social-links[data-v-0937f67c]:before{margin-right:8px;margin-left:8px;width:1px;height:24px;background-color:var(--vp-c-divider);content:""}.menu+.appearance[data-v-0937f67c]:before,.translations+.appearance[data-v-0937f67c]:before{margin-right:16px}.appearance+.social-links[data-v-0937f67c]:before{margin-left:16px}.social-links[data-v-0937f67c]{margin-right:-8px}@media (min-width: 960px){.VPNavBar.has-sidebar .curtain[data-v-0937f67c]{position:absolute;right:0;bottom:-31px;width:calc(100% - var(--vp-sidebar-width));height:32px}.VPNavBar.has-sidebar .curtain[data-v-0937f67c]:before{display:block;width:100%;height:32px;background:linear-gradient(var(--vp-c-bg),transparent 70%);content:""}}@media (min-width: 1440px){.VPNavBar.has-sidebar .curtain[data-v-0937f67c]{width:calc(100% - ((100vw - var(--vp-layout-max-width)) / 2 + var(--vp-sidebar-width)))}}.VPNavScreenMenuLink[data-v-30be0acb]{display:block;border-bottom:1px solid var(--vp-c-divider);padding:12px 0 11px;line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:border-color .25s,color .25s}.VPNavScreenMenuLink[data-v-30be0acb]:hover{color:var(--vp-c-brand)}.VPNavScreenMenuGroupLink[data-v-6656c42a]{display:block;margin-left:12px;line-height:32px;font-size:14px;font-weight:400;color:var(--vp-c-text-1);transition:color .25s}.VPNavScreenMenuGroupLink[data-v-6656c42a]:hover{color:var(--vp-c-brand)}.VPNavScreenMenuGroupSection[data-v-8133b170]{display:block}.title[data-v-8133b170]{line-height:32px;font-size:13px;font-weight:700;color:var(--vp-c-text-2);transition:color .25s}.VPNavScreenMenuGroup[data-v-338a1689]{border-bottom:1px solid var(--vp-c-divider);height:48px;overflow:hidden;transition:border-color .5s}.VPNavScreenMenuGroup .items[data-v-338a1689]{visibility:hidden}.VPNavScreenMenuGroup.open .items[data-v-338a1689]{visibility:visible}.VPNavScreenMenuGroup.open[data-v-338a1689]{padding-bottom:10px;height:auto}.VPNavScreenMenuGroup.open .button[data-v-338a1689]{padding-bottom:6px;color:var(--vp-c-brand)}.VPNavScreenMenuGroup.open .button-icon[data-v-338a1689]{transform:rotate(45deg)}.button[data-v-338a1689]{display:flex;justify-content:space-between;align-items:center;padding:12px 4px 11px 0;width:100%;line-height:24px;font-size:14px;font-weight:500;color:var(--vp-c-text-1);transition:color .25s}.button[data-v-338a1689]:hover{color:var(--vp-c-brand)}.button-icon[data-v-338a1689]{width:14px;height:14px;fill:var(--vp-c-text-2);transition:fill .5s,transform .25s}.group[data-v-338a1689]:first-child{padding-top:0}.group+.group[data-v-338a1689],.group+.item[data-v-338a1689]{padding-top:4px}.VPNavScreenAppearance[data-v-add8f686]{display:flex;justify-content:space-between;align-items:center;border-radius:8px;padding:12px 14px 12px 16px;background-color:var(--vp-c-bg-soft)}.text[data-v-add8f686]{line-height:24px;font-size:12px;font-weight:500;color:var(--vp-c-text-2)}.VPNavScreenTranslations[data-v-d72aa483]{height:24px;overflow:hidden}.VPNavScreenTranslations.open[data-v-d72aa483]{height:auto}.title[data-v-d72aa483]{display:flex;align-items:center;font-size:14px;font-weight:500;color:var(--vp-c-text-1)}.icon[data-v-d72aa483]{width:16px;height:16px;fill:currentColor}.icon.lang[data-v-d72aa483]{margin-right:8px}.icon.chevron[data-v-d72aa483]{margin-left:4px}.list[data-v-d72aa483]{padding:4px 0 0 24px}.link[data-v-d72aa483]{line-height:32px;font-size:13px;color:var(--vp-c-text-1)}.VPNavScreen[data-v-69fcc70f]{position:fixed;top:calc(var(--vp-nav-height) + var(--vp-layout-top-height, 0px) + 1px);right:0;bottom:0;left:0;padding:0 32px;width:100%;background-color:var(--vp-nav-screen-bg-color);overflow-y:auto;transition:background-color .5s;pointer-events:auto}.VPNavScreen.fade-enter-active[data-v-69fcc70f],.VPNavScreen.fade-leave-active[data-v-69fcc70f]{transition:opacity .25s}.VPNavScreen.fade-enter-active .container[data-v-69fcc70f],.VPNavScreen.fade-leave-active .container[data-v-69fcc70f]{transition:transform .25s ease}.VPNavScreen.fade-enter-from[data-v-69fcc70f],.VPNavScreen.fade-leave-to[data-v-69fcc70f]{opacity:0}.VPNavScreen.fade-enter-from .container[data-v-69fcc70f],.VPNavScreen.fade-leave-to .container[data-v-69fcc70f]{transform:translateY(-8px)}@media (min-width: 768px){.VPNavScreen[data-v-69fcc70f]{display:none}}.container[data-v-69fcc70f]{margin:0 auto;padding:24px 0 96px;max-width:288px}.menu+.translations[data-v-69fcc70f],.menu+.appearance[data-v-69fcc70f],.translations+.appearance[data-v-69fcc70f]{margin-top:24px}.menu+.social-links[data-v-69fcc70f]{margin-top:16px}.appearance+.social-links[data-v-69fcc70f]{margin-top:16px}.VPNav[data-v-7e5bc4a5]{position:relative;top:var(--vp-layout-top-height, 0px);left:0;z-index:var(--vp-z-index-nav);width:100%;pointer-events:none;transition:background-color .5s}@media (min-width: 960px){.VPNav[data-v-7e5bc4a5]{position:fixed}}.VPSidebarItem.level-0[data-v-9b797284]{padding-bottom:24px}.VPSidebarItem.collapsed.level-0[data-v-9b797284]{padding-bottom:10px}.item[data-v-9b797284]{position:relative;display:flex;width:100%}.VPSidebarItem.collapsible>.item[data-v-9b797284]{cursor:pointer}.indicator[data-v-9b797284]{position:absolute;top:6px;bottom:6px;left:-17px;width:1px;transition:background-color .25s}.VPSidebarItem.level-2.is-active>.item>.indicator[data-v-9b797284],.VPSidebarItem.level-3.is-active>.item>.indicator[data-v-9b797284],.VPSidebarItem.level-4.is-active>.item>.indicator[data-v-9b797284],.VPSidebarItem.level-5.is-active>.item>.indicator[data-v-9b797284]{background-color:var(--vp-c-brand)}.link[data-v-9b797284]{display:flex;align-items:center;flex-grow:1}.text[data-v-9b797284]{flex-grow:1;padding:4px 0;line-height:24px;font-size:14px;transition:color .25s}.VPSidebarItem.level-0 .text[data-v-9b797284]{font-weight:700;color:var(--vp-c-text-1)}.VPSidebarItem.level-1 .text[data-v-9b797284],.VPSidebarItem.level-2 .text[data-v-9b797284],.VPSidebarItem.level-3 .text[data-v-9b797284],.VPSidebarItem.level-4 .text[data-v-9b797284],.VPSidebarItem.level-5 .text[data-v-9b797284]{font-weight:500;color:var(--vp-c-text-2)}.VPSidebarItem.level-0.is-link>.item>.link:hover .text[data-v-9b797284],.VPSidebarItem.level-1.is-link>.item>.link:hover .text[data-v-9b797284],.VPSidebarItem.level-2.is-link>.item>.link:hover .text[data-v-9b797284],.VPSidebarItem.level-3.is-link>.item>.link:hover .text[data-v-9b797284],.VPSidebarItem.level-4.is-link>.item>.link:hover .text[data-v-9b797284],.VPSidebarItem.level-5.is-link>.item>.link:hover .text[data-v-9b797284]{color:var(--vp-c-brand)}.VPSidebarItem.level-0.has-active>.item>.text[data-v-9b797284],.VPSidebarItem.level-1.has-active>.item>.text[data-v-9b797284],.VPSidebarItem.level-2.has-active>.item>.text[data-v-9b797284],.VPSidebarItem.level-3.has-active>.item>.text[data-v-9b797284],.VPSidebarItem.level-4.has-active>.item>.text[data-v-9b797284],.VPSidebarItem.level-5.has-active>.item>.text[data-v-9b797284],.VPSidebarItem.level-0.has-active>.item>.link>.text[data-v-9b797284],.VPSidebarItem.level-1.has-active>.item>.link>.text[data-v-9b797284],.VPSidebarItem.level-2.has-active>.item>.link>.text[data-v-9b797284],.VPSidebarItem.level-3.has-active>.item>.link>.text[data-v-9b797284],.VPSidebarItem.level-4.has-active>.item>.link>.text[data-v-9b797284],.VPSidebarItem.level-5.has-active>.item>.link>.text[data-v-9b797284]{color:var(--vp-c-text-1)}.VPSidebarItem.level-0.is-active>.item .link>.text[data-v-9b797284],.VPSidebarItem.level-1.is-active>.item .link>.text[data-v-9b797284],.VPSidebarItem.level-2.is-active>.item .link>.text[data-v-9b797284],.VPSidebarItem.level-3.is-active>.item .link>.text[data-v-9b797284],.VPSidebarItem.level-4.is-active>.item .link>.text[data-v-9b797284],.VPSidebarItem.level-5.is-active>.item .link>.text[data-v-9b797284]{color:var(--vp-c-brand)}.caret[data-v-9b797284]{display:flex;justify-content:center;align-items:center;margin-right:-7px;width:32px;height:32px;color:var(--vp-c-text-3);cursor:pointer;transition:color .25s;flex-shrink:0}.item:hover .caret[data-v-9b797284]{color:var(--vp-c-text-2)}.item:hover .caret[data-v-9b797284]:hover{color:var(--vp-c-text-1)}.caret-icon[data-v-9b797284]{width:18px;height:18px;fill:currentColor;transform:rotate(90deg);transition:transform .25s}.VPSidebarItem.collapsed .caret-icon[data-v-9b797284]{transform:rotate(0)}.VPSidebarItem.level-1 .items[data-v-9b797284],.VPSidebarItem.level-2 .items[data-v-9b797284],.VPSidebarItem.level-3 .items[data-v-9b797284],.VPSidebarItem.level-4 .items[data-v-9b797284],.VPSidebarItem.level-5 .items[data-v-9b797284]{border-left:1px solid var(--vp-c-divider);padding-left:16px}.VPSidebarItem.collapsed .items[data-v-9b797284]{display:none}.VPSidebar[data-v-845b8fc6]{position:fixed;top:var(--vp-layout-top-height, 0px);bottom:0;left:0;z-index:var(--vp-z-index-sidebar);padding:32px 32px 96px;width:calc(100vw - 64px);max-width:320px;background-color:var(--vp-sidebar-bg-color);opacity:0;box-shadow:var(--vp-c-shadow-3);overflow-x:hidden;overflow-y:auto;transform:translate(-100%);transition:opacity .5s,transform .25s ease;overscroll-behavior:contain}.VPSidebar.open[data-v-845b8fc6]{opacity:1;visibility:visible;transform:translate(0);transition:opacity .25s,transform .5s cubic-bezier(.19,1,.22,1)}.dark .VPSidebar[data-v-845b8fc6]{box-shadow:var(--vp-shadow-1)}@media (min-width: 960px){.VPSidebar[data-v-845b8fc6]{z-index:1;padding-top:var(--vp-nav-height);padding-bottom:128px;width:var(--vp-sidebar-width);max-width:100%;background-color:var(--vp-sidebar-bg-color);opacity:1;visibility:visible;box-shadow:none;transform:translate(0)}}@media (min-width: 1440px){.VPSidebar[data-v-845b8fc6]{padding-left:max(32px,calc((100% - (var(--vp-layout-max-width) - 64px)) / 2));width:calc((100% - (var(--vp-layout-max-width) - 64px)) / 2 + var(--vp-sidebar-width) - 32px)}}@media (min-width: 960px){.curtain[data-v-845b8fc6]{position:sticky;top:-64px;left:0;z-index:1;margin-top:calc(var(--vp-nav-height) * -1);margin-right:-32px;margin-left:-32px;height:var(--vp-nav-height);background-color:var(--vp-sidebar-bg-color)}}.nav[data-v-845b8fc6]{outline:0}.group+.group[data-v-845b8fc6]{border-top:1px solid var(--vp-c-divider);padding-top:10px}@media (min-width: 960px){.group[data-v-845b8fc6]{padding-top:10px;width:calc(var(--vp-sidebar-width) - 64px)}}.VPSkipLink[data-v-ae3e3f51]{top:8px;left:8px;padding:8px 16px;z-index:999;border-radius:8px;font-size:12px;font-weight:700;text-decoration:none;color:var(--vp-c-brand);box-shadow:var(--vp-shadow-3);background-color:var(--vp-c-bg)}.VPSkipLink[data-v-ae3e3f51]:focus{height:auto;width:auto;clip:auto;clip-path:none}@media (min-width: 1280px){.VPSkipLink[data-v-ae3e3f51]{top:14px;left:16px}}.Layout[data-v-255ec12d]{display:flex;flex-direction:column;min-height:100vh}.VPHomeSponsors[data-v-3c6e61c2]{border-top:1px solid var(--vp-c-gutter);padding:88px 24px 96px;background-color:var(--vp-c-bg)}.container[data-v-3c6e61c2]{margin:0 auto;max-width:1152px}.love[data-v-3c6e61c2]{margin:0 auto;width:28px;height:28px;color:var(--vp-c-text-3)}.icon[data-v-3c6e61c2]{width:28px;height:28px;fill:currentColor}.message[data-v-3c6e61c2]{margin:0 auto;padding-top:10px;max-width:320px;text-align:center;line-height:24px;font-size:16px;font-weight:500;color:var(--vp-c-text-2)}.sponsors[data-v-3c6e61c2]{padding-top:32px}.action[data-v-3c6e61c2]{padding-top:40px;text-align:center}.VPTeamPage[data-v-10b00018]{padding-bottom:96px}@media (min-width: 768px){.VPTeamPage[data-v-10b00018]{padding-bottom:128px}}.VPTeamPageSection+.VPTeamPageSection[data-v-10b00018-s],.VPTeamMembers+.VPTeamPageSection[data-v-10b00018-s]{margin-top:64px}.VPTeamMembers+.VPTeamMembers[data-v-10b00018-s]{margin-top:24px}@media (min-width: 768px){.VPTeamPageTitle+.VPTeamPageSection[data-v-10b00018-s]{margin-top:16px}.VPTeamPageSection+.VPTeamPageSection[data-v-10b00018-s],.VPTeamMembers+.VPTeamPageSection[data-v-10b00018-s]{margin-top:96px}}.VPTeamMembers[data-v-10b00018-s]{padding:0 24px}@media (min-width: 768px){.VPTeamMembers[data-v-10b00018-s]{padding:0 48px}}@media (min-width: 960px){.VPTeamMembers[data-v-10b00018-s]{padding:0 64px}}.VPTeamPageTitle[data-v-bf2cbdac]{padding:48px 32px;text-align:center}@media (min-width: 768px){.VPTeamPageTitle[data-v-bf2cbdac]{padding:64px 48px 48px}}@media (min-width: 960px){.VPTeamPageTitle[data-v-bf2cbdac]{padding:80px 64px 48px}}.title[data-v-bf2cbdac]{letter-spacing:0;line-height:44px;font-size:36px;font-weight:500}@media (min-width: 768px){.title[data-v-bf2cbdac]{letter-spacing:-.5px;line-height:56px;font-size:48px}}.lead[data-v-bf2cbdac]{margin:0 auto;max-width:512px;padding-top:12px;line-height:24px;font-size:16px;font-weight:500;color:var(--vp-c-text-2)}@media (min-width: 768px){.lead[data-v-bf2cbdac]{max-width:592px;letter-spacing:.15px;line-height:28px;font-size:20px}}.VPTeamPageSection[data-v-b1a88750]{padding:0 32px}@media (min-width: 768px){.VPTeamPageSection[data-v-b1a88750]{padding:0 48px}}@media (min-width: 960px){.VPTeamPageSection[data-v-b1a88750]{padding:0 64px}}.title[data-v-b1a88750]{position:relative;margin:0 auto;max-width:1152px;text-align:center;color:var(--vp-c-text-2)}.title-line[data-v-b1a88750]{position:absolute;top:16px;left:0;width:100%;height:1px;background-color:var(--vp-c-divider)}.title-text[data-v-b1a88750]{position:relative;display:inline-block;padding:0 24px;letter-spacing:0;line-height:32px;font-size:20px;font-weight:500;background-color:var(--vp-c-bg)}.lead[data-v-b1a88750]{margin:0 auto;max-width:480px;padding-top:12px;text-align:center;line-height:24px;font-size:16px;font-weight:500;color:var(--vp-c-text-2)}.members[data-v-b1a88750]{padding-top:40px}.VPTeamMembersItem[data-v-a3462077]{display:flex;flex-direction:column;gap:2px;border-radius:12px;width:100%;height:100%;overflow:hidden}.VPTeamMembersItem.small .profile[data-v-a3462077]{padding:32px}.VPTeamMembersItem.small .data[data-v-a3462077]{padding-top:20px}.VPTeamMembersItem.small .avatar[data-v-a3462077]{width:64px;height:64px}.VPTeamMembersItem.small .name[data-v-a3462077]{line-height:24px;font-size:16px}.VPTeamMembersItem.small .affiliation[data-v-a3462077]{padding-top:4px;line-height:20px;font-size:14px}.VPTeamMembersItem.small .desc[data-v-a3462077]{padding-top:12px;line-height:20px;font-size:14px}.VPTeamMembersItem.small .links[data-v-a3462077]{margin:0 -16px -20px;padding:10px 0 0}.VPTeamMembersItem.medium .profile[data-v-a3462077]{padding:48px 32px}.VPTeamMembersItem.medium .data[data-v-a3462077]{padding-top:24px;text-align:center}.VPTeamMembersItem.medium .avatar[data-v-a3462077]{width:96px;height:96px}.VPTeamMembersItem.medium .name[data-v-a3462077]{letter-spacing:.15px;line-height:28px;font-size:20px}.VPTeamMembersItem.medium .affiliation[data-v-a3462077]{padding-top:4px;font-size:16px}.VPTeamMembersItem.medium .desc[data-v-a3462077]{padding-top:16px;max-width:288px;font-size:16px}.VPTeamMembersItem.medium .links[data-v-a3462077]{margin:0 -16px -12px;padding:16px 12px 0}.profile[data-v-a3462077]{flex-grow:1;background-color:var(--vp-c-bg-soft)}.data[data-v-a3462077]{text-align:center}.avatar[data-v-a3462077]{position:relative;flex-shrink:0;margin:0 auto;border-radius:50%;box-shadow:var(--vp-shadow-3)}.avatar-img[data-v-a3462077]{position:absolute;top:0;right:0;bottom:0;left:0;border-radius:50%;object-fit:cover}.name[data-v-a3462077]{margin:0;font-weight:600}.affiliation[data-v-a3462077]{margin:0;font-weight:500;color:var(--vp-c-text-2)}.org.link[data-v-a3462077]{color:var(--vp-c-text-2);transition:color .25s}.org.link[data-v-a3462077]:hover{color:var(--vp-c-brand)}.desc[data-v-a3462077]{margin:0 auto}.desc[data-v-a3462077] a{font-weight:500;color:var(--vp-c-brand);text-decoration-style:dotted;transition:color .25s}.links[data-v-a3462077]{display:flex;justify-content:center;height:56px}.sp-link[data-v-a3462077]{display:flex;justify-content:center;align-items:center;text-align:center;padding:16px;font-size:14px;font-weight:500;color:var(--vp-c-sponsor);background-color:var(--vp-c-bg-soft);transition:color .25s,background-color .25s}.sp .sp-link.link[data-v-a3462077]:hover,.sp .sp-link.link[data-v-a3462077]:focus{outline:none;color:var(--vp-c-white);background-color:var(--vp-c-sponsor)}.sp-icon[data-v-a3462077]{margin-right:8px;width:16px;height:16px;fill:currentColor}.VPTeamMembers.small .container[data-v-04685dce]{grid-template-columns:repeat(auto-fit,minmax(224px,1fr))}.VPTeamMembers.small.count-1 .container[data-v-04685dce]{max-width:276px}.VPTeamMembers.small.count-2 .container[data-v-04685dce]{max-width:576px}.VPTeamMembers.small.count-3 .container[data-v-04685dce]{max-width:876px}.VPTeamMembers.medium .container[data-v-04685dce]{grid-template-columns:repeat(auto-fit,minmax(256px,1fr))}@media (min-width: 375px){.VPTeamMembers.medium .container[data-v-04685dce]{grid-template-columns:repeat(auto-fit,minmax(288px,1fr))}}.VPTeamMembers.medium.count-1 .container[data-v-04685dce]{max-width:368px}.VPTeamMembers.medium.count-2 .container[data-v-04685dce]{max-width:760px}.container[data-v-04685dce]{display:grid;gap:24px;margin:0 auto;max-width:1152px}.github-link{margin-top:2rem}.note{background-color:#eaecef66;border-radius:6px;-webkit-border-radius:6px;-moz-border-radius:6px;padding:.5rem 1.5rem;margin:1rem 0rem}.warning,.warn{background-color:#f63;color:#333;border-radius:6px;-webkit-border-radius:6px;-moz-border-radius:6px;padding:.5rem 1.5rem}.loading{color:gray}@media screen and (max-width: 719px){.note{border-radius:0;-webkit-border-radius:0px;-moz-border-radius:0px;padding:.5rem 1.5rem;margin:0rem -1.5rem}}canvas{background-color:#000}.webgpu_example_button{height:33px;font-size:14px;padding:0 8px;margin-top:16px;border:1px solid rgba(60,60,60,.15);border-radius:8px}.webgpu_example_button:hover{border-color:#059669}.auto-github-link{margin-top:1rem;line-height:40px}#wasm-example canvas{background-color:#000}#wasm-example button{height:33px;font-size:14px;padding:0 8px;border:1px solid rgba(60,60,60,.15);border-radius:8px}#wasm-example button:hover{border-color:#059669}body{margin:0}.a{color:#5a6}#simuverse_container{position:fixed;top:64px;left:0;right:0;bottom:0;background-color:#353535;min-width:450px;min-height:500px}#alert,#loading{text-align:center;justify-content:center;align-items:center;color:#fff;font-size:20px;margin-top:64px} diff --git a/assets/template.3e66407c.png b/assets/template.ZaQEvDur.png similarity index 100% rename from assets/template.3e66407c.png rename to assets/template.ZaQEvDur.png diff --git a/assets/tools.c24f4092.png b/assets/tools.Eg3x9gRT.png similarity index 100% rename from assets/tools.c24f4092.png rename to assets/tools.Eg3x9gRT.png diff --git a/assets/trace.b6c48e7e.png b/assets/trace.PJbClu6C.png similarity index 100% rename from assets/trace.b6c48e7e.png rename to assets/trace.PJbClu6C.png diff --git a/assets/triangle.59fc4a55.png b/assets/triangle.o8T8tm7l.png similarity index 100% rename from assets/triangle.59fc4a55.png rename to assets/triangle.o8T8tm7l.png diff --git a/assets/triangle.a62d2783.png b/assets/triangle.swPHx-oi.png similarity index 100% rename from assets/triangle.a62d2783.png rename to assets/triangle.swPHx-oi.png diff --git a/assets/tutorial3-pipeline-triangle.d560b6f2.png b/assets/tutorial3-pipeline-triangle.OIWoZQoT.png similarity index 100% rename from assets/tutorial3-pipeline-triangle.d560b6f2.png rename to assets/tutorial3-pipeline-triangle.OIWoZQoT.png diff --git a/assets/tutorial3-pipeline-vertices.eaa25f33.png b/assets/tutorial3-pipeline-vertices.potCe9z0.png similarity index 100% rename from assets/tutorial3-pipeline-vertices.eaa25f33.png rename to assets/tutorial3-pipeline-vertices.potCe9z0.png diff --git a/assets/upside-down.3b0ae8b3.png b/assets/upside-down.sY39m84B.png similarity index 100% rename from assets/upside-down.3b0ae8b3.png rename to assets/upside-down.sY39m84B.png diff --git a/assets/vb_desc.4c4c981a.png b/assets/vb_desc.PyYTSl9s.png similarity index 100% rename from assets/vb_desc.4c4c981a.png rename to assets/vb_desc.PyYTSl9s.png diff --git a/assets/wasm/compute-pipeline.js b/assets/wasm/compute-pipeline.js index 932897f94..c51671089 100644 --- a/assets/wasm/compute-pipeline.js +++ b/assets/wasm/compute-pipeline.js @@ -214,23 +214,23 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h300369ce7e710f4e(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h2213d552877e232c(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_29(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0c747d32a9ab072a(arg0, arg1); +function __wbg_adapter_43(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hbebeb9ac6216930c(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1_mut__h49796bab97d4a65f(arg0, arg1, addHeapObject(arg2)); + wasm.wasm_bindgen__convert__closures__invoke1_mut__h05e4f7a2fc1c0dc9(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_54(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1305,56 +1305,56 @@ function __wbg_get_imports() { const ret = wasm.memory; return addHeapObject(ret); }; + imports.wbg.__wbindgen_closure_wrapper492 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); + return addHeapObject(ret); + }; imports.wbg.__wbindgen_closure_wrapper494 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper496 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_29); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper498 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper500 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper502 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper504 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper506 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper508 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_closure_wrapper510 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 136, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_43); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper698 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 214, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper699 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 215, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1030 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 426, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper1031 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 427, __wbg_adapter_49); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1032 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 426, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper1033 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 427, __wbg_adapter_49); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1049 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 433, __wbg_adapter_54); + imports.wbg.__wbindgen_closure_wrapper1050 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 434, __wbg_adapter_54); return addHeapObject(ret); }; diff --git a/assets/wasm/compute-pipeline_bg.wasm b/assets/wasm/compute-pipeline_bg.wasm index 2186fa81e..26a6f81d0 100644 Binary files a/assets/wasm/compute-pipeline_bg.wasm and b/assets/wasm/compute-pipeline_bg.wasm differ diff --git a/assets/wasm/hilbert-curve.js b/assets/wasm/hilbert-curve.js index dea0414b9..2750fadce 100644 --- a/assets/wasm/hilbert-curve.js +++ b/assets/wasm/hilbert-curve.js @@ -214,23 +214,23 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h06a85094f1e211aa(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0fee9440d57d55f4(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_31(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h340a3003d4b60592(arg0, arg1); +function __wbg_adapter_43(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0f44ce51fcd9cc45(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1_mut__h49796bab97d4a65f(arg0, arg1, addHeapObject(arg2)); + wasm.wasm_bindgen__convert__closures__invoke1_mut__h05e4f7a2fc1c0dc9(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_54(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1314,7 +1314,7 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper386 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_31); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper388 = function(arg0, arg1, arg2) { @@ -1338,23 +1338,23 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper398 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_43); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper576 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 165, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper577 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 166, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper898 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 377, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper899 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 378, __wbg_adapter_49); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper900 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 377, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper901 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 378, __wbg_adapter_49); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper917 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 384, __wbg_adapter_54); + imports.wbg.__wbindgen_closure_wrapper918 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 385, __wbg_adapter_54); return addHeapObject(ret); }; diff --git a/assets/wasm/hilbert-curve_bg.wasm b/assets/wasm/hilbert-curve_bg.wasm index e19f34127..b3d5260e2 100644 Binary files a/assets/wasm/hilbert-curve_bg.wasm and b/assets/wasm/hilbert-curve_bg.wasm differ diff --git a/assets/wasm/tutorial1-window.js b/assets/wasm/tutorial1-window.js index 0a4b81814..d78c6477d 100644 --- a/assets/wasm/tutorial1-window.js +++ b/assets/wasm/tutorial1-window.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_18(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h07c0c76d19be22af(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0af5f2df7fad9b7f(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_31(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hd0cb2462e88df5fc(arg0, arg1); +function __wbg_adapter_35(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h8f3c68c767ea0c15(arg0, arg1); } function __wbg_adapter_38(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_41(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } /** @@ -722,7 +722,7 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper205 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 56, __wbg_adapter_31); + const ret = makeMutClosure(arg0, arg1, 56, __wbg_adapter_18); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper207 = function(arg0, arg1, arg2) { @@ -730,15 +730,15 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper209 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 56, __wbg_adapter_18); + const ret = makeMutClosure(arg0, arg1, 56, __wbg_adapter_35); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper332 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 134, __wbg_adapter_38); + imports.wbg.__wbindgen_closure_wrapper331 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 133, __wbg_adapter_38); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper401 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 164, __wbg_adapter_41); + imports.wbg.__wbindgen_closure_wrapper400 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 163, __wbg_adapter_41); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial1-window_bg.wasm b/assets/wasm/tutorial1-window_bg.wasm index 7ec75a5c8..906dd93a5 100644 Binary files a/assets/wasm/tutorial1-window_bg.wasm and b/assets/wasm/tutorial1-window_bg.wasm differ diff --git a/assets/wasm/tutorial10-lighting.js b/assets/wasm/tutorial10-lighting.js index d3a31865f..8b9b266b1 100644 --- a/assets/wasm/tutorial10-lighting.js +++ b/assets/wasm/tutorial10-lighting.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_32(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1_mut__h0bfdfd0684d286ea(arg0, arg1, addHeapObject(arg2)); + wasm.wasm_bindgen__convert__closures__invoke1_mut__h0b8fb5708fe0b24e(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_41(arg0, arg1) { - wasm.wasm_bindgen__convert__closures__invoke0_mut__h87ff0dd39bcfc6bc(arg0, arg1); +function __wbg_adapter_47(arg0, arg1) { + wasm.wasm_bindgen__convert__closures__invoke0_mut__hc7c05ca4a315887a(arg0, arg1); } function __wbg_adapter_52(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_55(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1771,7 +1771,7 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper628 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 102, __wbg_adapter_41); + const ret = makeMutClosure(arg0, arg1, 102, __wbg_adapter_32); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper630 = function(arg0, arg1, arg2) { @@ -1783,19 +1783,19 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper634 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 102, __wbg_adapter_32); + const ret = makeMutClosure(arg0, arg1, 102, __wbg_adapter_47); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper636 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 102, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1148 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 365, __wbg_adapter_52); + imports.wbg.__wbindgen_closure_wrapper1149 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 366, __wbg_adapter_52); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2715 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 975, __wbg_adapter_55); + imports.wbg.__wbindgen_closure_wrapper2718 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 976, __wbg_adapter_55); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial10-lighting_bg.wasm b/assets/wasm/tutorial10-lighting_bg.wasm index 6700a12ea..a530c001c 100644 Binary files a/assets/wasm/tutorial10-lighting_bg.wasm and b/assets/wasm/tutorial10-lighting_bg.wasm differ diff --git a/assets/wasm/tutorial11-normals.js b/assets/wasm/tutorial11-normals.js index a63e780ee..46e28c093 100644 --- a/assets/wasm/tutorial11-normals.js +++ b/assets/wasm/tutorial11-normals.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_32(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1_mut__h11b46933a7e39a5e(arg0, arg1, addHeapObject(arg2)); + wasm.wasm_bindgen__convert__closures__invoke1_mut__h1639a88787a8f17f(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_35(arg0, arg1) { - wasm.wasm_bindgen__convert__closures__invoke0_mut__he579c1b5ca5e9216(arg0, arg1); +function __wbg_adapter_47(arg0, arg1) { + wasm.wasm_bindgen__convert__closures__invoke0_mut__h63606ec861b47a98(arg0, arg1); } function __wbg_adapter_52(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_55(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1754,48 +1754,48 @@ function __wbg_get_imports() { const ret = wasm.memory; return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper578 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper577 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper580 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_35); + imports.wbg.__wbindgen_closure_wrapper579 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper582 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper581 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper584 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper583 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper586 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper585 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper588 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper587 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper590 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper589 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper592 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper591 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_47); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper594 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper593 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 92, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1153 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 366, __wbg_adapter_52); + imports.wbg.__wbindgen_closure_wrapper1152 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 365, __wbg_adapter_52); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2720 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 976, __wbg_adapter_55); + imports.wbg.__wbindgen_closure_wrapper2721 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 975, __wbg_adapter_55); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial11-normals_bg.wasm b/assets/wasm/tutorial11-normals_bg.wasm index 568acf5a5..afebe3595 100644 Binary files a/assets/wasm/tutorial11-normals_bg.wasm and b/assets/wasm/tutorial11-normals_bg.wasm differ diff --git a/assets/wasm/tutorial12-camera.js b/assets/wasm/tutorial12-camera.js index 3536a59a6..68ca3ea7a 100644 --- a/assets/wasm/tutorial12-camera.js +++ b/assets/wasm/tutorial12-camera.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_32(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1_mut__h07dfeb7a4055b111(arg0, arg1, addHeapObject(arg2)); + wasm.wasm_bindgen__convert__closures__invoke1_mut__h47808bd57bbe371f(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_41(arg0, arg1) { - wasm.wasm_bindgen__convert__closures__invoke0_mut__h49ce77123c90135e(arg0, arg1); +function __wbg_adapter_37(arg0, arg1) { + wasm.wasm_bindgen__convert__closures__invoke0_mut__h7ebc7250a54a358c(arg0, arg1); } function __wbg_adapter_52(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_55(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1754,48 +1754,48 @@ function __wbg_get_imports() { const ret = wasm.memory; return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper772 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper773 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper774 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper775 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper776 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper777 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_37); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper778 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper779 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper780 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_41); + imports.wbg.__wbindgen_closure_wrapper781 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper782 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper783 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper784 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper785 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper786 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper787 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper788 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper789 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 216, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1159 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 370, __wbg_adapter_52); + imports.wbg.__wbindgen_closure_wrapper1158 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 369, __wbg_adapter_52); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2726 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 980, __wbg_adapter_55); + imports.wbg.__wbindgen_closure_wrapper2727 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 979, __wbg_adapter_55); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial12-camera_bg.wasm b/assets/wasm/tutorial12-camera_bg.wasm index 92e56f466..e0c98f696 100644 Binary files a/assets/wasm/tutorial12-camera_bg.wasm and b/assets/wasm/tutorial12-camera_bg.wasm differ diff --git a/assets/wasm/tutorial2-surface.js b/assets/wasm/tutorial2-surface.js index 9cebfd8a6..06922b714 100644 --- a/assets/wasm/tutorial2-surface.js +++ b/assets/wasm/tutorial2-surface.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h770d3cfa19c2d7a4(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0a82c96b5aa38731(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_37(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h611c26adb7826813(arg0, arg1); + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hb3c8af13390b6bf9(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } /** @@ -1618,12 +1618,12 @@ function __wbg_get_imports() { const ret = makeMutClosure(arg0, arg1, 88, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper619 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 159, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper618 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 158, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper4182 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 1578, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper4186 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 1579, __wbg_adapter_49); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial2-surface_bg.wasm b/assets/wasm/tutorial2-surface_bg.wasm index 19353a498..13e2be403 100644 Binary files a/assets/wasm/tutorial2-surface_bg.wasm and b/assets/wasm/tutorial2-surface_bg.wasm differ diff --git a/assets/wasm/tutorial3-pipeline.js b/assets/wasm/tutorial3-pipeline.js index db0304702..08ea4d56a 100644 --- a/assets/wasm/tutorial3-pipeline.js +++ b/assets/wasm/tutorial3-pipeline.js @@ -213,20 +213,20 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } -function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h1065d27eb2e1fa1a(arg0, arg1, addHeapObject(arg2)); +function __wbg_adapter_26(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hdad0ea79fb5bf4b1(arg0, arg1); } -function __wbg_adapter_39(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h1eab21021fbe6734(arg0, arg1); +function __wbg_adapter_29(arg0, arg1, arg2) { + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h3d1b7823fa3a0f20(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1591,43 +1591,43 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper484 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper486 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper488 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper490 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper492 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper494 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_39); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper496 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper498 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_29); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper608 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 148, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper609 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 149, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper683 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 179, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper684 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 180, __wbg_adapter_49); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial3-pipeline_bg.wasm b/assets/wasm/tutorial3-pipeline_bg.wasm index 30ea8c7ae..946339fff 100644 Binary files a/assets/wasm/tutorial3-pipeline_bg.wasm and b/assets/wasm/tutorial3-pipeline_bg.wasm differ diff --git a/assets/wasm/tutorial4-buffer.js b/assets/wasm/tutorial4-buffer.js index 3bd4debfe..cad611f25 100644 --- a/assets/wasm/tutorial4-buffer.js +++ b/assets/wasm/tutorial4-buffer.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h19e750331e7f5829(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0d688f6c922153bf(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_35(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0c1c41b1d11b243f(arg0, arg1); + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h1b021f461a59207c(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1622,12 +1622,12 @@ function __wbg_get_imports() { const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper606 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 146, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper605 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 145, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper681 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 177, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper680 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 176, __wbg_adapter_49); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial4-buffer_bg.wasm b/assets/wasm/tutorial4-buffer_bg.wasm index 7f482fb9c..aab902976 100644 Binary files a/assets/wasm/tutorial4-buffer_bg.wasm and b/assets/wasm/tutorial4-buffer_bg.wasm differ diff --git a/assets/wasm/tutorial5-textures.js b/assets/wasm/tutorial5-textures.js index c8ce79e64..cffb2c99d 100644 --- a/assets/wasm/tutorial5-textures.js +++ b/assets/wasm/tutorial5-textures.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h1cc6ecc38eedc15c(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h1afc40b88819ca10(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_31(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hde2555ba9b93338c(arg0, arg1); +function __wbg_adapter_35(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h2cf1bb9a9bb76b0a(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1586,48 +1586,48 @@ function __wbg_get_imports() { const ret = wasm.memory; return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper593 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper594 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper595 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper596 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper597 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_31); + imports.wbg.__wbindgen_closure_wrapper598 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper599 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper600 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper601 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper602 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_35); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper603 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper604 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper605 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper606 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper607 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper608 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper609 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 153, __wbg_adapter_26); + imports.wbg.__wbindgen_closure_wrapper610 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 154, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper635 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 174, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper636 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 175, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2182 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 776, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper2185 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 777, __wbg_adapter_49); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial5-textures_bg.wasm b/assets/wasm/tutorial5-textures_bg.wasm index 4ee1b9ebe..00f4cde8b 100644 Binary files a/assets/wasm/tutorial5-textures_bg.wasm and b/assets/wasm/tutorial5-textures_bg.wasm differ diff --git a/assets/wasm/tutorial6-uniforms.js b/assets/wasm/tutorial6-uniforms.js index 63ae43897..f706a3ef1 100644 --- a/assets/wasm/tutorial6-uniforms.js +++ b/assets/wasm/tutorial6-uniforms.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h488bd8efd3f3013b(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h2c2be46c9c53b868(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_33(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hbf226d2b02b3c666(arg0, arg1); +function __wbg_adapter_39(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h78ee740d6cee36d3(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1599,7 +1599,7 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper520 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 94, __wbg_adapter_33); + const ret = makeMutClosure(arg0, arg1, 94, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper522 = function(arg0, arg1, arg2) { @@ -1611,7 +1611,7 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper526 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 94, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 94, __wbg_adapter_39); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper528 = function(arg0, arg1, arg2) { @@ -1622,12 +1622,12 @@ function __wbg_get_imports() { const ret = makeMutClosure(arg0, arg1, 94, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper637 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 175, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper636 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 174, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2184 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 778, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper2185 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 777, __wbg_adapter_49); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial6-uniforms_bg.wasm b/assets/wasm/tutorial6-uniforms_bg.wasm index 60483cfc1..ce965a6c8 100644 Binary files a/assets/wasm/tutorial6-uniforms_bg.wasm and b/assets/wasm/tutorial6-uniforms_bg.wasm differ diff --git a/assets/wasm/tutorial7-instancing.js b/assets/wasm/tutorial7-instancing.js index 672ce2d55..c576ada06 100644 --- a/assets/wasm/tutorial7-instancing.js +++ b/assets/wasm/tutorial7-instancing.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h105cd232c70acf30(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h1915d66911f5ff2c(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_33(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h250f956d6d0b50ee(arg0, arg1); +function __wbg_adapter_29(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hb10e7d18c837666e(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1591,7 +1591,7 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper597 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 151, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 151, __wbg_adapter_29); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper599 = function(arg0, arg1, arg2) { @@ -1599,7 +1599,7 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper601 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 151, __wbg_adapter_33); + const ret = makeMutClosure(arg0, arg1, 151, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper603 = function(arg0, arg1, arg2) { @@ -1626,7 +1626,7 @@ function __wbg_get_imports() { const ret = makeMutClosure(arg0, arg1, 175, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2189 = function(arg0, arg1, arg2) { + imports.wbg.__wbindgen_closure_wrapper2191 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 777, __wbg_adapter_49); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial7-instancing_bg.wasm b/assets/wasm/tutorial7-instancing_bg.wasm index 9e7e6eb08..4f0589cf8 100644 Binary files a/assets/wasm/tutorial7-instancing_bg.wasm and b/assets/wasm/tutorial7-instancing_bg.wasm differ diff --git a/assets/wasm/tutorial8-depth.js b/assets/wasm/tutorial8-depth.js index 370e08628..986599b98 100644 --- a/assets/wasm/tutorial8-depth.js +++ b/assets/wasm/tutorial8-depth.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_26(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h2aefc82b0b4e7b2a(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h25800e4456414ef7(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_35(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h43a1676f3cf7e9f2(arg0, arg1); +function __wbg_adapter_33(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h6c56f95415770d0a(arg0, arg1); } function __wbg_adapter_46(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_49(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1599,11 +1599,11 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper586 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 137, __wbg_adapter_26); + const ret = makeMutClosure(arg0, arg1, 137, __wbg_adapter_33); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper588 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 137, __wbg_adapter_35); + const ret = makeMutClosure(arg0, arg1, 137, __wbg_adapter_26); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper590 = function(arg0, arg1, arg2) { @@ -1622,12 +1622,12 @@ function __wbg_get_imports() { const ret = makeMutClosure(arg0, arg1, 137, __wbg_adapter_26); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper647 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 177, __wbg_adapter_46); + imports.wbg.__wbindgen_closure_wrapper646 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 176, __wbg_adapter_46); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2194 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 779, __wbg_adapter_49); + imports.wbg.__wbindgen_closure_wrapper2195 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 778, __wbg_adapter_49); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial8-depth_bg.wasm b/assets/wasm/tutorial8-depth_bg.wasm index 1ef3fc257..90f061ef0 100644 Binary files a/assets/wasm/tutorial8-depth_bg.wasm and b/assets/wasm/tutorial8-depth_bg.wasm differ diff --git a/assets/wasm/tutorial9-models.js b/assets/wasm/tutorial9-models.js index 9916c0103..dbdcfd435 100644 --- a/assets/wasm/tutorial9-models.js +++ b/assets/wasm/tutorial9-models.js @@ -214,19 +214,19 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_32(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1_mut__h0abba28a8063876a(arg0, arg1, addHeapObject(arg2)); + wasm.wasm_bindgen__convert__closures__invoke1_mut__h041e738bb75040f2(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_41(arg0, arg1) { - wasm.wasm_bindgen__convert__closures__invoke0_mut__h151af09169eabe9a(arg0, arg1); +function __wbg_adapter_43(arg0, arg1) { + wasm.wasm_bindgen__convert__closures__invoke0_mut__h3cf3e5a45fe4ea53(arg0, arg1); } function __wbg_adapter_52(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_55(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1754,48 +1754,48 @@ function __wbg_get_imports() { const ret = wasm.memory; return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper744 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper741 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper746 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper743 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper748 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper745 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper750 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper747 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper752 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_41); + imports.wbg.__wbindgen_closure_wrapper749 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper754 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper751 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_43); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper756 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper753 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper758 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper755 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper760 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 192, __wbg_adapter_32); + imports.wbg.__wbindgen_closure_wrapper757 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 189, __wbg_adapter_32); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1152 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 370, __wbg_adapter_52); + imports.wbg.__wbindgen_closure_wrapper1151 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 369, __wbg_adapter_52); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper2718 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 980, __wbg_adapter_55); + imports.wbg.__wbindgen_closure_wrapper2719 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 979, __wbg_adapter_55); return addHeapObject(ret); }; diff --git a/assets/wasm/tutorial9-models_bg.wasm b/assets/wasm/tutorial9-models_bg.wasm index efe56c12d..faff2d9c7 100644 Binary files a/assets/wasm/tutorial9-models_bg.wasm and b/assets/wasm/tutorial9-models_bg.wasm differ diff --git a/assets/wasm/vertex-animation.js b/assets/wasm/vertex-animation.js index 393d778fb..bad09e527 100644 --- a/assets/wasm/vertex-animation.js +++ b/assets/wasm/vertex-animation.js @@ -214,23 +214,23 @@ function makeMutClosure(arg0, arg1, dtor, f) { return real; } function __wbg_adapter_30(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h12d83ff195b13ab5(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h0e5a27e039975e73(arg0, arg1, addHeapObject(arg2)); } -function __wbg_adapter_45(arg0, arg1) { - wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hc2c2eae2deb7494e(arg0, arg1); +function __wbg_adapter_43(arg0, arg1) { + wasm._dyn_core__ops__function__FnMut_____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hbffe33672dfeaada(arg0, arg1); } function __wbg_adapter_50(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h904146b8a6c6db5f(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h81cab1e04708970f(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_53(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1_mut__h49796bab97d4a65f(arg0, arg1, addHeapObject(arg2)); + wasm.wasm_bindgen__convert__closures__invoke1_mut__h05e4f7a2fc1c0dc9(arg0, arg1, addHeapObject(arg2)); } function __wbg_adapter_58(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h87d82818838775fc(arg0, arg1, addHeapObject(arg2)); + wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h94792571f3b4178c(arg0, arg1, addHeapObject(arg2)); } function handleError(f, args) { @@ -1380,31 +1380,31 @@ function __wbg_get_imports() { return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper411 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_30); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_43); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper413 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_45); + const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_30); return addHeapObject(ret); }; imports.wbg.__wbindgen_closure_wrapper415 = function(arg0, arg1, arg2) { const ret = makeMutClosure(arg0, arg1, 76, __wbg_adapter_30); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper774 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 227, __wbg_adapter_50); + imports.wbg.__wbindgen_closure_wrapper773 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 226, __wbg_adapter_50); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1113 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 439, __wbg_adapter_53); + imports.wbg.__wbindgen_closure_wrapper1112 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 438, __wbg_adapter_53); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1115 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 439, __wbg_adapter_53); + imports.wbg.__wbindgen_closure_wrapper1114 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 438, __wbg_adapter_53); return addHeapObject(ret); }; - imports.wbg.__wbindgen_closure_wrapper1143 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 451, __wbg_adapter_58); + imports.wbg.__wbindgen_closure_wrapper1142 = function(arg0, arg1, arg2) { + const ret = makeMutClosure(arg0, arg1, 450, __wbg_adapter_58); return addHeapObject(ret); }; diff --git a/assets/wasm/vertex-animation_bg.wasm b/assets/wasm/vertex-animation_bg.wasm index 494c90729..65c2c14ce 100644 Binary files a/assets/wasm/vertex-animation_bg.wasm and b/assets/wasm/vertex-animation_bg.wasm differ diff --git a/assets/workgroups.e0cdd028.png b/assets/workgroups.ZXZigntI.png similarity index 100% rename from assets/workgroups.e0cdd028.png rename to assets/workgroups.ZXZigntI.png diff --git a/beginner/tutorial1-window.html b/beginner/tutorial1-window.html index 1e81aeace..8063d5b96 100644 --- a/beginner/tutorial1-window.html +++ b/beginner/tutorial1-window.html @@ -5,135 +5,137 @@ 依赖与窗口 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

依赖与窗口

部分读者可能已经熟悉如何在 Rust 中打开窗口程序,且有自己偏好的窗口管理库。但本教程是为所有人设计的,所以不免要涉及这部分的内容。所幸你可以跳过这部分,但有一点值得了解,即无论使用什么样的窗口解决方案,都需要实现 raw-window-handle 里定义的 raw_window_handle()raw_display_handle() 两个抽象接口。如果有兴趣自己动手来为 wgpu 实现一个基础的窗口,可以参考 wgpu-in-app与 Android App 集成这一章节也有详情的介绍。

我们要使用哪些包?

我们将尽量保持基础部分的简单性。后续我们会逐渐添加依赖,先列出相关的 Cargo.toml 依赖项如下:

toml
[dependencies]
-winit = "0.28.7"
-env_logger = "0.10"
-log = "0.4"
-wgpu = "0.17"

使用 Rust 的新版解析器

自 0.10 版本起,wgpu 需要使用 cargo 的 新版特性解析器,这在 Rust 的 2021 edition(即任何基于 Rust 1.56.0 或更新版本的新项目)中是默认启用的。但如果你仍在使用 2018 edition,那么就需要在单项目 Cargo.toml[package] 配置中,或者在⼯作空间的根级 Cargo.toml[workspace] 配置中添加 resolver = "2" 项。

关于 env_logger

通过 env_logger::init() 来启用日志是非常重要的。当 wgpu 遇到各类错误时,它都会用一条通用性的消息抛出 panic,并通过日志来记录实际的错误信息。 也就是说,如果不添加 env_logger::init(),wgpu 将静默地退出,从而令你非常困惑!
(下面的代码中已经启用)

创建一个新项目

运行 cargo new xxx,xxx 是指你的项目名称。
(下面的例子中我使用了 tutorial1_window)

示例代码

这一部分没有什么特别之处,所以直接贴出完整的代码。只需将其粘贴到你的 main.rs 中即可:

rust
use winit::{
-    event::*,
-    event_loop::{ControlFlow, EventLoop},
-    window::WindowBuilder,
-};
+    
Skip to content

依赖与窗口

部分读者可能已经熟悉如何在 Rust 中打开窗口程序,且有自己偏好的窗口管理库。但本教程是为所有人设计的,所以不免要涉及这部分的内容。所幸你可以跳过这部分,但有一点值得了解,即无论使用什么样的窗口解决方案,都需要实现 raw-window-handle 里定义的 raw_window_handle()raw_display_handle() 两个抽象接口。如果有兴趣自己动手来为 wgpu 实现一个基础的窗口,可以参考 wgpu-in-app与 Android App 集成这一章节也有详情的介绍。

我们要使用哪些包?

我们将尽量保持基础部分的简单性。后续我们会逐渐添加依赖,先列出相关的 Cargo.toml 依赖项如下:

toml
[dependencies]
+winit = "0.28.7"
+env_logger = "0.10"
+log = "0.4"
+wgpu = "0.17"

使用 Rust 的新版解析器

自 0.10 版本起,wgpu 需要使用 cargo 的 新版特性解析器,这在 Rust 的 2021 edition(即任何基于 Rust 1.56.0 或更新版本的新项目)中是默认启用的。但如果你仍在使用 2018 edition,那么就需要在单项目 Cargo.toml[package] 配置中,或者在⼯作空间的根级 Cargo.toml[workspace] 配置中添加 resolver = "2" 项。

关于 env_logger

通过 env_logger::init() 来启用日志是非常重要的。当 wgpu 遇到各类错误时,它都会用一条通用性的消息抛出 panic,并通过日志来记录实际的错误信息。 也就是说,如果不添加 env_logger::init(),wgpu 将静默地退出,从而令你非常困惑!
(下面的代码中已经启用)

创建一个新项目

运行 cargo new xxx,xxx 是指你的项目名称。
(下面的例子中我使用了 tutorial1_window)

示例代码

这一部分没有什么特别之处,所以直接贴出完整的代码。只需将其粘贴到你的 main.rs 中即可:

rust
use winit::{
+    event::*,
+    event_loop::{ControlFlow, EventLoop},
+    window::WindowBuilder,
+};
 
-pub fn run() {
-    env_logger::init();
-    let event_loop = EventLoop::new();
-    let window = WindowBuilder::new().build(&event_loop).unwrap();
+pub fn run() {
+    env_logger::init();
+    let event_loop = EventLoop::new();
+    let window = WindowBuilder::new().build(&event_loop).unwrap();
 
-    event_loop.run(move |event, _, control_flow| match event {
-        Event::WindowEvent {
-            ref event,
-            window_id,
-        } if window_id == window.id() => match event {
-            WindowEvent::CloseRequested
-            | WindowEvent::KeyboardInput {
-                input:
-                    KeyboardInput {
-                        state: ElementState::Pressed,
-                        virtual_keycode: Some(VirtualKeyCode::Escape),
-                        ..
-                    },
-                ..
-            } => *control_flow = ControlFlow::Exit,
-            _ => {}
-        },
-        _ => {}
-    });
-}

上述代码所做的全部工作就是创建了一个窗口,并在用户关闭或按下 escape 键前使其保持打开。接下来,我们需要在入口函数中运行这些代码。很简单,只需在 main() 函数中调用 run(),然后运行!

rust
fn main() {
-    run();
-}

(其中 tutorial1_window 是你之前用 cargo 创建的项目的名称)

当你只打算支持桌面环境时,上边这些就是全部所要做的!在下一个教程中,我们将真正开始使用 wgpu!

添加对 web 的支持

如果讲完了这个关于 WebGPU 的教程,却不提如何在 web 上使用它,那么这个教程就是不完整的。幸运的是,让一个 wgpu 程序在浏览器中运行并不难。

让我们从修改 Cargo.toml 开始:

toml
[lib]
-crate-type = ["cdylib", "rlib"]

这几行告诉 cargo 允许项目构建(build)一个本地的 Rust 静态库(rlib)和一个 C/C++ 兼容库(cdylib)。 我们需要 rlib 来在桌面环境中运行 wgpu,需要 cdylib 来构建在浏览器中运行的 Web Assembly。

仅在需要将项目做为其他 Rust 项目的(crate)提供时,[lib] 项的配置才是必须的。所以我们的示例程序可以省略上面这一步。

添加上述 [lib] 内容依赖于像原作者那样将主要代码写入一个 lib.rs 文件,而如果想要通过下文的 wasm-pack 方法构建,则需要进行上述步骤。

Web Assembly

Web Assembly 即 WASM,是大多数现代浏览器支持的二进制格式,它令 Rust 等底层语言能在网页上运行。这允许我们用 Rust 编写应用程序,并使用几行 Javascript 来加载它到 Web 浏览器中运行。

现在,我们仅需添加一些专门用于在 WASM 中运行的依赖项:

toml
[dependencies]
-cfg-if = "1"
-# 其他常规依赖...
+    event_loop.run(move |event, _, control_flow| match event {
+        Event::WindowEvent {
+            ref event,
+            window_id,
+        } if window_id == window.id() => match event {
+            WindowEvent::CloseRequested
+            | WindowEvent::KeyboardInput {
+                input:
+                    KeyboardInput {
+                        state: ElementState::Pressed,
+                        virtual_keycode: Some(VirtualKeyCode::Escape),
+                        ..
+                    },
+                ..
+            } => *control_flow = ControlFlow::Exit,
+            _ => {}
+        },
+        _ => {}
+    });
+}

上述代码所做的全部工作就是创建了一个窗口,并在用户关闭或按下 escape 键前使其保持打开。接下来,我们需要在入口函数中运行这些代码。很简单,只需在 main() 函数中调用 run(),然后运行!

rust
fn main() {
+    run();
+}

(其中 tutorial1_window 是你之前用 cargo 创建的项目的名称)

当你只打算支持桌面环境时,上边这些就是全部所要做的!在下一个教程中,我们将真正开始使用 wgpu!

添加对 web 的支持

如果讲完了这个关于 WebGPU 的教程,却不提如何在 web 上使用它,那么这个教程就是不完整的。幸运的是,让一个 wgpu 程序在浏览器中运行并不难。

让我们从修改 Cargo.toml 开始:

toml
[lib]
+crate-type = ["cdylib", "rlib"]

这几行告诉 cargo 允许项目构建(build)一个本地的 Rust 静态库(rlib)和一个 C/C++ 兼容库(cdylib)。 我们需要 rlib 来在桌面环境中运行 wgpu,需要 cdylib 来构建在浏览器中运行的 Web Assembly。

仅在需要将项目做为其他 Rust 项目的(crate)提供时,[lib] 项的配置才是必须的。所以我们的示例程序可以省略上面这一步。

添加上述 [lib] 内容依赖于像原作者那样将主要代码写入一个 lib.rs 文件,而如果想要通过下文的 wasm-pack 方法构建,则需要进行上述步骤。

Web Assembly

Web Assembly 即 WASM,是大多数现代浏览器支持的二进制格式,它令 Rust 等底层语言能在网页上运行。这允许我们用 Rust 编写应用程序,并使用几行 Javascript 来加载它到 Web 浏览器中运行。

现在,我们仅需添加一些专门用于在 WASM 中运行的依赖项:

toml
[dependencies]
+cfg-if = "1"
+# 其他常规依赖...
 
-[target.'cfg(target_arch = "wasm32")'.dependencies]
-console_error_panic_hook = "0.1.7"
-console_log = "1.0"
-wasm-bindgen = "0.2.87"
-wasm-bindgen-futures = "0.4.34"
-web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-]}

cfg-if提供了一个宏,使得更加容易管理特定平台的代码。

[target.'cfg(target_arch = "wasm32")'.dependencies] 行告诉 cargo,如果我们的目标是 wasm32 架构,则只包括这些依赖项。接下来的几个依赖项只是让我们与 javascript 的交互更容易。

  • console_error_panic_hook 配置 panic! 宏以将错误发送到 javascript 控制台。如果没有这个,当遇到程序崩溃时,你就会对导致崩溃的原因一无所知。
  • console_log 实现了 log API。它将所有日志发送到 javascript 控制台。它还可以配置为仅发送特定级别的日志,这非常适合用于调试。
  • 当我们想在大多数当前浏览器上运行时,就需要在 wgpu 上启用 WebGL 功能。因为目前只在 Firefox Nightly、Chrome/Edge 113+、 Chrome/Edge Canary 才支持直接使用 WebGPU API。
    为了简单起见,教程大部分代码的编译会使用 WebGL 功能,直到 WebGPU API 达到一个更稳定的状态。
    如果你想了解更多详细信息,请查看 wgpu 源码仓库 上的 web 编译指南
  • wasm-bindgen 是此列表中最重要的依赖项。它负责生成样板代码,并告诉浏览器如何使用我们的项目。它还允许我们在 Rust 中公开可在 Javascript 中使用的函数,反之亦然。
    我不会详细介绍 wasm-bindgen,所以如果你需要入门(或者是复习),请查看这里
  • web-sys 是一个包含了许多在 javascript 程序中可用的函数结构体的工具箱,如:get_element_by_idappend_childfeatures = [...] 数组里列出的是我们目前最低限度需要的功能。

更多示例代码

首先, 我们需要在 main.rs 内引入 wasm-bindgen :

rust
#[cfg(target_arch="wasm32")]
-use wasm_bindgen::prelude::*;

接下来,需要告诉 wasm-bindgen 在 WASM 被加载后执行我们的 run() 函数。

rust
#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
-pub async fn run() {
-    // 省略的代码...
-}

然后需要根据是否在 WASM 环境来切换我们正在使用的日志。在 run() 函数内添加以下代码替换 env_logger::init() 行。

rust
cfg_if::cfg_if! {
-    if #[cfg(target_arch = "wasm32")] {
-        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
-        console_log::init_with_level(log::Level::Warn).expect("无法初始化日志库");
-    } else {
-        env_logger::init();
-    }
-}

上边的代码判断了构建目标,在 web 构建中设置 console_logconsole_error_panic_hook。这很重要,因为 env_logger 目前不支持 Web Assembly。

另一种实现

在第 3~8 章,run() 函数及遍历 event_loop 的代码被统一封装到了 framework.rs 中, 还定义了 Action trait 来抽象每一章中不同的 State 。 然后通过调用 wasm_bindgen_futures 包的 spawn_local 函数来创建 State 实例并处理 JS 异常。

第 1 ~ 2 章的代码通过 cargo run-wasm --bin xxx 运行时,在浏览器的控制台中会看到的 ...Using exceptions for control flow, don't mind me. This isn't actually an error! 错误现在被消除了:

rust
#[cfg(target_arch = "wasm32")]
-pub fn run<A: Action + 'static>() {
-    // ...
-    wasm_bindgen_futures::spawn_local(async move {
-        let (event_loop, instance) = create_action_instance::<A>().await;
-        let run_closure = Closure::once_into_js(move || start_event_loop::<A>(event_loop, instance));
+[target.'cfg(target_arch = "wasm32")'.dependencies]
+console_error_panic_hook = "0.1.7"
+console_log = "1.0"
+wasm-bindgen = "0.2.87"
+wasm-bindgen-futures = "0.4.34"
+web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+]}

cfg-if提供了一个宏,使得更加容易管理特定平台的代码。

[target.'cfg(target_arch = "wasm32")'.dependencies] 行告诉 cargo,如果我们的目标是 wasm32 架构,则只包括这些依赖项。接下来的几个依赖项只是让我们与 javascript 的交互更容易。

  • console_error_panic_hook 配置 panic! 宏以将错误发送到 javascript 控制台。如果没有这个,当遇到程序崩溃时,你就会对导致崩溃的原因一无所知。
  • console_log 实现了 log API。它将所有日志发送到 javascript 控制台。它还可以配置为仅发送特定级别的日志,这非常适合用于调试。
  • 当我们想在大多数当前浏览器上运行时,就需要在 wgpu 上启用 WebGL 功能。因为目前只在 Firefox Nightly、Chrome/Edge 113+、 Chrome/Edge Canary 才支持直接使用 WebGPU API。
    为了简单起见,教程大部分代码的编译会使用 WebGL 功能,直到 WebGPU API 达到一个更稳定的状态。
    如果你想了解更多详细信息,请查看 wgpu 源码仓库 上的 web 编译指南
  • wasm-bindgen 是此列表中最重要的依赖项。它负责生成样板代码,并告诉浏览器如何使用我们的项目。它还允许我们在 Rust 中公开可在 Javascript 中使用的函数,反之亦然。
    我不会详细介绍 wasm-bindgen,所以如果你需要入门(或者是复习),请查看这里
  • web-sys 是一个包含了许多在 javascript 程序中可用的函数结构体的工具箱,如:get_element_by_idappend_childfeatures = [...] 数组里列出的是我们目前最低限度需要的功能。

更多示例代码

首先, 我们需要在 main.rs 内引入 wasm-bindgen :

rust
#[cfg(target_arch="wasm32")]
+use wasm_bindgen::prelude::*;

接下来,需要告诉 wasm-bindgen 在 WASM 被加载后执行我们的 run() 函数。

rust
#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
+pub async fn run() {
+    // 省略的代码...
+}

然后需要根据是否在 WASM 环境来切换我们正在使用的日志。在 run() 函数内添加以下代码替换 env_logger::init() 行。

rust
cfg_if::cfg_if! {
+    if #[cfg(target_arch = "wasm32")] {
+        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
+        console_log::init_with_level(log::Level::Warn).expect("无法初始化日志库");
+    } else {
+        env_logger::init();
+    }
+}

上边的代码判断了构建目标,在 web 构建中设置 console_logconsole_error_panic_hook。这很重要,因为 env_logger 目前不支持 Web Assembly。

另一种实现

在第 3~8 章,run() 函数及遍历 event_loop 的代码被统一封装到了 framework.rs 中, 还定义了 Action trait 来抽象每一章中不同的 State 。 然后通过调用 wasm_bindgen_futures 包的 spawn_local 函数来创建 State 实例并处理 JS 异常。

第 1 ~ 2 章的代码通过 cargo run-wasm --bin xxx 运行时,在浏览器的控制台中会看到的 ...Using exceptions for control flow, don't mind me. This isn't actually an error! 错误现在被消除了:

rust
#[cfg(target_arch = "wasm32")]
+pub fn run<A: Action + 'static>() {
+    // ...
+    wasm_bindgen_futures::spawn_local(async move {
+        let (event_loop, instance) = create_action_instance::<A>().await;
+        let run_closure = Closure::once_into_js(move || start_event_loop::<A>(event_loop, instance));
 
-        // 处理运行过程中抛出的 JS 异常。
-        // 否则 wasm_bindgen_futures 队列将中断,且不再处理任何任务。
-        if let Err(error) = call_catch(&run_closure) {
-            // ...
-        }
-    }
-}

接下来,在创建了事件循环与窗口之后,我们需要在应用程序所在的 HTML 网页中添加一个画布(canvas):

rust
#[cfg(target_arch = "wasm32")]
-{
-    // Winit 不允许用 CSS 调整大小,所以在 web 环境里我们必须手动设置大小。
-    use winit::dpi::PhysicalSize;
-    window.set_inner_size(PhysicalSize::new(450, 400));
+        // 处理运行过程中抛出的 JS 异常。
+        // 否则 wasm_bindgen_futures 队列将中断,且不再处理任何任务。
+        if let Err(error) = call_catch(&run_closure) {
+            // ...
+        }
+    }
+}

接下来,在创建了事件循环与窗口之后,我们需要在应用程序所在的 HTML 网页中添加一个画布(canvas):

rust
#[cfg(target_arch = "wasm32")]
+{
+    // Winit 不允许用 CSS 调整大小,所以在 web 环境里我们必须手动设置大小。
+    use winit::dpi::PhysicalSize;
+    window.set_inner_size(PhysicalSize::new(450, 400));
 
-    use winit::platform::web::WindowExtWebSys;
-    web_sys::window()
-        .and_then(|win| win.document())
-        .and_then(|doc| {
-            let dst = doc.get_element_by_id("wasm-example")?;
-            let canvas = web_sys::Element::from(window.canvas());
-            dst.append_child(&canvas).ok()?;
-            Some(())
-        })
-        .expect("无法将画布添加到网页上");
-}

"wasm-example" 这个 ID 是针对我的项目(也就是本教程)的。你可以你在 HTML 中使用任何 ID 来代替,或者,你也可以直接将画布添加到 <body> 中,就像 wgpu 源码仓库中所做的那样,这部分最终由你决定。

上边这些就是我们现在需要的所有 web 专用代码。接下来要做的就是构建 Web Assembly 本身。

译者注:以下关于 wasm-pack 的内容来自原文。但是由于它和 WebGPU 接口都尚未稳定,译者暂时不推荐用它构建此教程中的项目。参考本教程和原作者的仓库,这里给出一个使用 cargo build 的简易构建过程,如有疏漏请 PR 指正。

  1. 如果要支持 WebGL,那么在 Cargo.toml 中加入以下描述来启用 cargo 的 --features 参数,参考 wgpu 的运行指南
toml
[features]
-default = []
-webgl = ["wgpu/webgl"]
  1. 运行 cargo build --target wasm32-unknown-unknown --features webgl
  2. 安装 wasm-bindgen 并运行:
shell
cargo install -f wasm-bindgen-cli --version 0.2.84
-wasm-bindgen --no-typescript --out-dir {你的输出目录,例如 ./tutorial1_window_output} --web {wasm 所在的目录,例如 .\target\wasm32-unknown-unknown\release\tutorial1_window.wasm}
  1. 此时会得到一个包含 .wasm 和 .js 文件的文件夹。可以用下文的 html 引入该 .js 文件。如果直接在浏览器打开该 html 文件,可能遇到 CORS 问题;如果正常运行,则可能出现一个警告 Using exceptions for control flow, don't mind me. This isn't actually an error!,忽略即可。

Wasm Pack

你可以只用 wasm-bindgen 来构建一个 wgpu 应用程序,但我在这样做的时候遇到了一些问题。首先,你需要在电脑上安装 wasm-bindgen,并将其作为一个依赖项。作为依赖关系的版本需要与你安装的版本完全一致,否则构建将会失败。

为了克服这个缺点,并使阅读这篇教程的人更容易上手,我选择在组合中加入 wasm-pack。wasm-pack 可以为你安装正确的 wasm-bindgen 版本,而且它还支持为不同类型的 web 目标进行构建:浏览器、NodeJS 和 webpack 等打包工具。

使用 wasm-pack 前,你需要先安装

完成安装后,就可以用它来构建我们的项目了。当你的项目是一个独立的(crate)时,可以直接使用 wasm-pack build。如果是工作区(workspace),就必须指定你要构建的包。想象一下是一个名为 game 的目录,你就会使用:

bash
wasm-pack build game

译者注wasm-pack build 需要如之前所说的那样加入 [lib] 等来构建静态库。

一旦 wasm-pack 完成构建,在你的目录下就会有一个 pkg 目录,运行 WASM 代码所需的所有 javascript 代码都在这里。然后在 javascript 中导入 WASM 模块:

js
const init = await import("./pkg/game.js");
-init().then(() => console.log("WASM Loaded"));

这个网站使用了 VitePress,并且是在 Vue 组件中加载 WASM。如果想看看我是怎么做的,可以查看这里

如果打算在一个普通的 HTML 网站中使用你的 WASM 模块,只需告诉 wasm-pack 以 web 为构建目标:

bash
wasm-pack build --target web

然后就可以在一个 ES6 模块中运行 WASM 代码:

html
<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="UTF-8" />
-    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
-    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <title>Pong with WASM</title>
-  </head>
+    use winit::platform::web::WindowExtWebSys;
+    web_sys::window()
+        .and_then(|win| win.document())
+        .and_then(|doc| {
+            let dst = doc.get_element_by_id("wasm-example")?;
+            let canvas = web_sys::Element::from(window.canvas());
+            dst.append_child(&canvas).ok()?;
+            Some(())
+        })
+        .expect("无法将画布添加到网页上");
+}

"wasm-example" 这个 ID 是针对我的项目(也就是本教程)的。你可以你在 HTML 中使用任何 ID 来代替,或者,你也可以直接将画布添加到 <body> 中,就像 wgpu 源码仓库中所做的那样,这部分最终由你决定。

上边这些就是我们现在需要的所有 web 专用代码。接下来要做的就是构建 Web Assembly 本身。

译者注:以下关于 wasm-pack 的内容来自原文。但是由于它和 WebGPU 接口都尚未稳定,译者暂时不推荐用它构建此教程中的项目。参考本教程和原作者的仓库,这里给出一个使用 cargo build 的简易构建过程,如有疏漏请 PR 指正。

  1. 如果要支持 WebGL,那么在 Cargo.toml 中加入以下描述来启用 cargo 的 --features 参数,参考 wgpu 的运行指南
toml
[features]
+default = []
+webgl = ["wgpu/webgl"]
  1. 运行 cargo build --target wasm32-unknown-unknown --features webgl
  2. 安装 wasm-bindgen 并运行:
shell
cargo install -f wasm-bindgen-cli --version 0.2.84
+wasm-bindgen --no-typescript --out-dir {你的输出目录,例如 ./tutorial1_window_output} --web {wasm 所在的目录,例如 .\target\wasm32-unknown-unknown\release\tutorial1_window.wasm}
  1. 此时会得到一个包含 .wasm 和 .js 文件的文件夹。可以用下文的 html 引入该 .js 文件。如果直接在浏览器打开该 html 文件,可能遇到 CORS 问题;如果正常运行,则可能出现一个警告 Using exceptions for control flow, don't mind me. This isn't actually an error!,忽略即可。

Wasm Pack

你可以只用 wasm-bindgen 来构建一个 wgpu 应用程序,但我在这样做的时候遇到了一些问题。首先,你需要在电脑上安装 wasm-bindgen,并将其作为一个依赖项。作为依赖关系的版本需要与你安装的版本完全一致,否则构建将会失败。

为了克服这个缺点,并使阅读这篇教程的人更容易上手,我选择在组合中加入 wasm-pack。wasm-pack 可以为你安装正确的 wasm-bindgen 版本,而且它还支持为不同类型的 web 目标进行构建:浏览器、NodeJS 和 webpack 等打包工具。

使用 wasm-pack 前,你需要先安装

完成安装后,就可以用它来构建我们的项目了。当你的项目是一个独立的(crate)时,可以直接使用 wasm-pack build。如果是工作区(workspace),就必须指定你要构建的包。想象一下是一个名为 game 的目录,你就会使用:

bash
wasm-pack build game

译者注wasm-pack build 需要如之前所说的那样加入 [lib] 等来构建静态库。

一旦 wasm-pack 完成构建,在你的目录下就会有一个 pkg 目录,运行 WASM 代码所需的所有 javascript 代码都在这里。然后在 javascript 中导入 WASM 模块:

js
const init = await import("./pkg/game.js");
+init().then(() => console.log("WASM Loaded"));

这个网站使用了 VitePress,并且是在 Vue 组件中加载 WASM。如果想看看我是怎么做的,可以查看这里

如果打算在一个普通的 HTML 网站中使用你的 WASM 模块,只需告诉 wasm-pack 以 web 为构建目标:

bash
wasm-pack build --target web

然后就可以在一个 ES6 模块中运行 WASM 代码:

html
<!DOCTYPE html>
+<html lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>Pong with WASM</title>
+  </head>
 
-  <body id="wasm-example">
-    <script type="module">
-      import init from "./pkg/pong.js";
-      init().then(() => {
-        console.log("WASM Loaded");
-      });
-    </script>
-    <style>
-      canvas {
-        background-color: black;
-      }
-    </style>
-  </body>
-</html>

点击下面的按钮查看示例代码运行!

- + <body id="wasm-example"> + <script type="module"> + import init from "./pkg/pong.js"; + init().then(() => { + console.log("WASM Loaded"); + }); + </script> + <style> + canvas { + background-color: black; + } + </style> + </body> +</html>

点击下面的按钮查看示例代码运行!

+ \ No newline at end of file diff --git a/beginner/tutorial2-surface/index.html b/beginner/tutorial2-surface/index.html index 48195b1c4..fa40b18cc 100644 --- a/beginner/tutorial2-surface/index.html +++ b/beginner/tutorial2-surface/index.html @@ -5,271 +5,273 @@ 展示平面 (Surface) | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

展示平面 (Surface)

封装 State

为方便起见,我们将所有字段封装在一个结构体内,并在其上添加一些函数:

rust
// lib.rs
-use winit::window::Window;
+    
Skip to content

展示平面 (Surface)

封装 State

为方便起见,我们将所有字段封装在一个结构体内,并在其上添加一些函数:

rust
// lib.rs
+use winit::window::Window;
 
-struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-}
+struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+}
 
-impl State {
-    // 创建某些 wgpu 类型需要使用异步代码
-    async fn new(window: &Window) -> Self {
-        todo!()
-    }
+impl State {
+    // 创建某些 wgpu 类型需要使用异步代码
+    async fn new(window: &Window) -> Self {
+        todo!()
+    }
 
-    fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-        todo!()
-    }
+    fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+        todo!()
+    }
 
-    fn input(&mut self, event: &WindowEvent) -> bool {
-        todo!()
-    }
+    fn input(&mut self, event: &WindowEvent) -> bool {
+        todo!()
+    }
 
-    fn update(&mut self) {
-        todo!()
-    }
+    fn update(&mut self) {
+        todo!()
+    }
 
-    fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
-        todo!()
-    }
-}

此处省略了 State 的字段概述,在后续章节中解释这些函数背后的代码时,它们才会变得更有意义。

surfacedevicequeueconfig 等对象是每个 wgpu 程序都需要的,且它们的创建过程涉及到很多模板代码,所以,从第 3 章开始,我将它们统一封装到了 AppSurface 对象中。

State 中的这些函数在所有章节示例中都有用到,所以,在第 3 ~ 8 章,我将其抽象为了 Action trait:

rust
pub trait Action {
-    fn new(app: app_surface::AppSurface) -> Self;
-    fn get_adapter_info(&self) -> wgpu::AdapterInfo;
-    fn current_window_id(&self) -> WindowId;
-    fn resize(&mut self);
-    fn request_redraw(&mut self);
-    fn input(&mut self, _event: &WindowEvent) -> bool {
-        false
-    }
-    fn update(&mut self) {}
-    fn render(&mut self) -> Result<(), wgpu::SurfaceError>;
-}

实例化 State

这段代码很简单,但还是值得好好讲讲:

rust
impl State {
-    // ...
-    async fn new(window: &Window) -> Self {
-        let size = window.inner_size();
+    fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
+        todo!()
+    }
+}

此处省略了 State 的字段概述,在后续章节中解释这些函数背后的代码时,它们才会变得更有意义。

surfacedevicequeueconfig 等对象是每个 wgpu 程序都需要的,且它们的创建过程涉及到很多模板代码,所以,从第 3 章开始,我将它们统一封装到了 AppSurface 对象中。

State 中的这些函数在所有章节示例中都有用到,所以,在第 3 ~ 8 章,我将其抽象为了 Action trait:

rust
pub trait Action {
+    fn new(app: app_surface::AppSurface) -> Self;
+    fn get_adapter_info(&self) -> wgpu::AdapterInfo;
+    fn current_window_id(&self) -> WindowId;
+    fn resize(&mut self);
+    fn request_redraw(&mut self);
+    fn input(&mut self, _event: &WindowEvent) -> bool {
+        false
+    }
+    fn update(&mut self) {}
+    fn render(&mut self) -> Result<(), wgpu::SurfaceError>;
+}

实例化 State

这段代码很简单,但还是值得好好讲讲:

rust
impl State {
+    // ...
+    async fn new(window: &Window) -> Self {
+        let size = window.inner_size();
 
-        // instance 变量是 GPU 实例
-        // Backends::all 对应 Vulkan、Metal、DX12、WebGL 等所有后端图形驱动
-        let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
-            backends: wgpu::Backends::all(),
-            ..Default::default()
-        });
-        let surface = unsafe { instance.create_surface(window).unwrap() };
-        let adapter = instance
-            .request_adapter(&wgpu::RequestAdapterOptions {
-                compatible_surface: Some(&surface),
-                ..Default::default()
-            }).await.unwrap();

GPU 实例与适配器

GPU 实例(Instance)是使用 wgpu 时所需创建的第一个对象,其主要用途是创建适配器(Adapter)和展示平面(Surface)。

适配器(Adapter)是指向 WebGPU API 实现的实例,一个系统上往往存在多个 WebGPU API 实现实例。也就是说,适配器是固定在特定图形后端的。假如你使用的是 Windows 且有 2 个显卡(集成显卡 + 独立显卡),则至少有 4 个适配器可供使用,分别有 2 个固定在 Vulkan 和 DirectX 后端。我们可以用它获取关联显卡的信息,例如显卡名称与其所适配到的后端图形驱动等。稍后我们会用它来创建逻辑设备命令队列。现在先讨论一下 RequestAdapterOptions 所涉及的字段。

  • power_preference 枚举有两个可选项:LowPowerHighPerformanceLowPower 对应偏向于高电池续航的适配器(如集成显卡上的 WebGPU 实现实例),HighPerformance 对应高功耗高性能的适配器(如独立显卡上的 WebGPU 实现实例)。一旦不存在符合 HighPerformance 选项的适配器,wgpu 就会选择 LowPower
  • compatible_surface 字段告诉 wgpu 找到与所传入的展示平面兼容的适配器。
  • force_fallback_adapter 强制 wgpu 选择一个能在所有系统上工作的适配器,这通常意味着渲染后端将使用一个软渲染系统,而非 GPU 这样的硬件。需要注意的是:WebGPU 标准并没有要求所有系统上都必须实现 fallback adapter

此处传递给 request_adapter 的参数不能保证对所有设备都有效,但是应该对大多数设备都有效。当 wgpu 找不到符合要求的适配器,request_adapter 将返回 None。如果你想获取某个特定图形后端的所有适配器,可以使用 enumerate_adapters 函数,它会返回一个迭代器,你可以遍历检查其中是否有满足需求的适配器。

rust
let adapter = instance
-    .enumerate_adapters(wgpu::Backends::all())
-    .filter(|adapter| {
-        // 检查该适配器是否兼容我们的展示平面
-        adapter.is_surface_supported(&surface)
-    })
-    .next()
-    .unwrap();

更多可用于优化适配器搜索的函数,请查看文档

展示平面

展示平面(Surface)是我们绘制到窗口的部分,需要它来将绘制结果展示(或者说,呈现)到屏幕上。窗口程序需要实现 raw-window-handle HasRawWindowHandle trait 来创建展示平面。所幸 winit 的 Window 符合这个要求。我们还需要展示平面来请求适配器

逻辑设备与命令队列

让我们使用适配器来创建逻辑设备 (Device) 和命令队列 (Queue)。

rust
let (device, queue) = adapter.request_device(
-    &wgpu::DeviceDescriptor {
-        features: wgpu::Features::empty(),
-        // WebGL 后端并不支持 wgpu 的所有功能,
-        // 所以如果要以 web 为构建目标,就必须禁用一些功能。
-        limits: if cfg!(target_arch = "wasm32") {
-            wgpu::Limits::downlevel_webgl2_defaults()
-        } else {
-            wgpu::Limits::default()
-        },
-        label: None,
-    },
-    None, // 追踪 API 调用路径
-).await.unwrap();

DeviceDescriptor上的 features 字段允许我们指定想要的扩展功能。对于这个简单的例子,我决定不使用任何额外的功能。

显卡会限制可用的扩展功能,所以如果想使用某些功能,你可能需要限制支持的设备或提供变通函数。

可以使用 adapter.features()device.features() 获取设备支持的扩展功能列表。

如果有需要,请查看完整的扩展功能列表

limits 字段描述了创建某些类型的资源的限制。我们在本教程中使用默认值,所以可以支持大多数设备。你可以在这里查看限制列表。

rust
let caps = surface.get_capabilities(&adapter);
-let config = wgpu::SurfaceConfiguration {
-    usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
-    format: caps.formats[0],
-    width: size.width,
-    height: size.height,
-    present_mode: wgpu::PresentMode::Fifo,
-    alpha_mode: caps.alpha_modes[0],
-    view_formats: vec![],
-};
-surface.configure(&device, &config);

这里我们为展示平面定义了一个配置。它将定义展示平面如何创建其底层的 SurfaceTexture。讲 render 函数时我们再具体讨论 SurfaceTexture,现在先谈谈此配置的字段。

usage 字段描述了 SurfaceTexture 如何被使用。RENDER_ATTACHMENT 指定将被用来渲染到屏幕的纹理(我们将在后面讨论更多的 TextureUsages 枚举值)。

format 定义了 SurfaceTexture 在 GPU 内存上如何被存储。不同的显示设备偏好不同的纹理格式。我们使用surface.get_capabilities(&adapter).formats 来获取当前显示设备的最佳格式。

widthheight 指定 SurfaceTexture 的宽度和高度(物理像素,等于逻辑像素乘以屏幕缩放因子)。这通常就是窗口的宽和高。

需要确保 SurfaceTexture 的宽高不能为 0,这会导致你的应用程序崩溃。

present_mode 指定的 wgpu::PresentMode 枚举值决定了展示平面如何与显示设备同步。我们选择的PresentMode::Fifo 指定了显示设备的刷新率做为渲染的帧速率,这本质上就是垂直同步(VSync),所有平台都得支持这种呈现模式(PresentMode)。你可以在文档中查看所有的模式。

当你想让用户来选择他们使用的呈现模式时,可以使用 surface.get_capabilities(&adapter) 获取展示平面支持的所有呈现模式的列表:

rust
let modes = surface.get_capabilities(&adapter).present_modes;

PresentMode::Fifo 模式无论如何都是被支持的,PresentMode::AutoVsyncPresentMode::AutoNoVsync 支持回退,因此也能工作在所有平台上。

现在已经正确地配置了展示平面,我们在函数的末尾添加上这些新字段:

rust
Self {
-            surface,
-            device,
-            queue,
-            config,
-            size,
-        }
-    }
-    // ...
-}

由于 State::new() 函数是异步的,因此需要把 run() 也改成异步的,以便可以在函数调用处等待它。

rust
pub async fn run() {
-    // 窗口设置...
+        // instance 变量是 GPU 实例
+        // Backends::all 对应 Vulkan、Metal、DX12、WebGL 等所有后端图形驱动
+        let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
+            backends: wgpu::Backends::all(),
+            ..Default::default()
+        });
+        let surface = unsafe { instance.create_surface(window).unwrap() };
+        let adapter = instance
+            .request_adapter(&wgpu::RequestAdapterOptions {
+                compatible_surface: Some(&surface),
+                ..Default::default()
+            }).await.unwrap();

GPU 实例与适配器

GPU 实例(Instance)是使用 wgpu 时所需创建的第一个对象,其主要用途是创建适配器(Adapter)和展示平面(Surface)。

适配器(Adapter)是指向 WebGPU API 实现的实例,一个系统上往往存在多个 WebGPU API 实现实例。也就是说,适配器是固定在特定图形后端的。假如你使用的是 Windows 且有 2 个显卡(集成显卡 + 独立显卡),则至少有 4 个适配器可供使用,分别有 2 个固定在 Vulkan 和 DirectX 后端。我们可以用它获取关联显卡的信息,例如显卡名称与其所适配到的后端图形驱动等。稍后我们会用它来创建逻辑设备命令队列。现在先讨论一下 RequestAdapterOptions 所涉及的字段。

  • power_preference 枚举有两个可选项:LowPowerHighPerformanceLowPower 对应偏向于高电池续航的适配器(如集成显卡上的 WebGPU 实现实例),HighPerformance 对应高功耗高性能的适配器(如独立显卡上的 WebGPU 实现实例)。一旦不存在符合 HighPerformance 选项的适配器,wgpu 就会选择 LowPower
  • compatible_surface 字段告诉 wgpu 找到与所传入的展示平面兼容的适配器。
  • force_fallback_adapter 强制 wgpu 选择一个能在所有系统上工作的适配器,这通常意味着渲染后端将使用一个软渲染系统,而非 GPU 这样的硬件。需要注意的是:WebGPU 标准并没有要求所有系统上都必须实现 fallback adapter

此处传递给 request_adapter 的参数不能保证对所有设备都有效,但是应该对大多数设备都有效。当 wgpu 找不到符合要求的适配器,request_adapter 将返回 None。如果你想获取某个特定图形后端的所有适配器,可以使用 enumerate_adapters 函数,它会返回一个迭代器,你可以遍历检查其中是否有满足需求的适配器。

rust
let adapter = instance
+    .enumerate_adapters(wgpu::Backends::all())
+    .filter(|adapter| {
+        // 检查该适配器是否兼容我们的展示平面
+        adapter.is_surface_supported(&surface)
+    })
+    .next()
+    .unwrap();

更多可用于优化适配器搜索的函数,请查看文档

展示平面

展示平面(Surface)是我们绘制到窗口的部分,需要它来将绘制结果展示(或者说,呈现)到屏幕上。窗口程序需要实现 raw-window-handle HasRawWindowHandle trait 来创建展示平面。所幸 winit 的 Window 符合这个要求。我们还需要展示平面来请求适配器

逻辑设备与命令队列

让我们使用适配器来创建逻辑设备 (Device) 和命令队列 (Queue)。

rust
let (device, queue) = adapter.request_device(
+    &wgpu::DeviceDescriptor {
+        features: wgpu::Features::empty(),
+        // WebGL 后端并不支持 wgpu 的所有功能,
+        // 所以如果要以 web 为构建目标,就必须禁用一些功能。
+        limits: if cfg!(target_arch = "wasm32") {
+            wgpu::Limits::downlevel_webgl2_defaults()
+        } else {
+            wgpu::Limits::default()
+        },
+        label: None,
+    },
+    None, // 追踪 API 调用路径
+).await.unwrap();

DeviceDescriptor上的 features 字段允许我们指定想要的扩展功能。对于这个简单的例子,我决定不使用任何额外的功能。

显卡会限制可用的扩展功能,所以如果想使用某些功能,你可能需要限制支持的设备或提供变通函数。

可以使用 adapter.features()device.features() 获取设备支持的扩展功能列表。

如果有需要,请查看完整的扩展功能列表

limits 字段描述了创建某些类型的资源的限制。我们在本教程中使用默认值,所以可以支持大多数设备。你可以在这里查看限制列表。

rust
let caps = surface.get_capabilities(&adapter);
+let config = wgpu::SurfaceConfiguration {
+    usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
+    format: caps.formats[0],
+    width: size.width,
+    height: size.height,
+    present_mode: wgpu::PresentMode::Fifo,
+    alpha_mode: caps.alpha_modes[0],
+    view_formats: vec![],
+};
+surface.configure(&device, &config);

这里我们为展示平面定义了一个配置。它将定义展示平面如何创建其底层的 SurfaceTexture。讲 render 函数时我们再具体讨论 SurfaceTexture,现在先谈谈此配置的字段。

usage 字段描述了 SurfaceTexture 如何被使用。RENDER_ATTACHMENT 指定将被用来渲染到屏幕的纹理(我们将在后面讨论更多的 TextureUsages 枚举值)。

format 定义了 SurfaceTexture 在 GPU 内存上如何被存储。不同的显示设备偏好不同的纹理格式。我们使用surface.get_capabilities(&adapter).formats 来获取当前显示设备的最佳格式。

widthheight 指定 SurfaceTexture 的宽度和高度(物理像素,等于逻辑像素乘以屏幕缩放因子)。这通常就是窗口的宽和高。

需要确保 SurfaceTexture 的宽高不能为 0,这会导致你的应用程序崩溃。

present_mode 指定的 wgpu::PresentMode 枚举值决定了展示平面如何与显示设备同步。我们选择的PresentMode::Fifo 指定了显示设备的刷新率做为渲染的帧速率,这本质上就是垂直同步(VSync),所有平台都得支持这种呈现模式(PresentMode)。你可以在文档中查看所有的模式。

当你想让用户来选择他们使用的呈现模式时,可以使用 surface.get_capabilities(&adapter) 获取展示平面支持的所有呈现模式的列表:

rust
let modes = surface.get_capabilities(&adapter).present_modes;

PresentMode::Fifo 模式无论如何都是被支持的,PresentMode::AutoVsyncPresentMode::AutoNoVsync 支持回退,因此也能工作在所有平台上。

现在已经正确地配置了展示平面,我们在函数的末尾添加上这些新字段:

rust
        Self {
+            surface,
+            device,
+            queue,
+            config,
+            size,
+        }
+    }
+    // ...
+}

由于 State::new() 函数是异步的,因此需要把 run() 也改成异步的,以便可以在函数调用处等待它。

rust
pub async fn run() {
+    // 窗口设置...
 
-    let mut state = State::new(&window).await;
+    let mut state = State::new(&window).await;
 
-    // 事件遍历...
-}

现在 run() 是异步的了,main() 需要某种方式来等待它执行完成。我们可以使用 tokioasync-std 等异步,但我打算使用更轻量级的 pollster。在 "Cargo.toml" 中添加以下依赖:

toml
[dependencies]
-# 其他依赖...
-pollster = "0.3"

然后我们使用 pollster 提供的 block_on 函数来等待异步任务执行完成:

rust
fn main() {
-    pollster::block_on(run());
-}

WASM 环境中不能在异步函数里使用 block_onFuture(异步函数的返回对象)必须使用浏览器的执行器来运行。如果你试图使用自己的执行器,一旦遇到没有立即执行的 Future 时代码就会崩溃。

如果现在尝试构建 WASM 将会失败,因为 wasm-bindgen 不支持使用异步函数作为“开始”函数。你可以改成在 javascript 中手动调用 run,但为了简单起见,我们将把 wasm-bindgen-futures 添加到 WASM 依赖项中,因为这不需要改变任何代码。你的依赖项应该是这样的:

toml
[dependencies]
-cfg-if = "1"
-winit = "0.28.7"
-env_logger = "0.10"
-log = "0.4"
-wgpu = "0.17"
-pollster = "0.3"
+    // 事件遍历...
+}

现在 run() 是异步的了,main() 需要某种方式来等待它执行完成。我们可以使用 tokioasync-std 等异步,但我打算使用更轻量级的 pollster。在 "Cargo.toml" 中添加以下依赖:

toml
[dependencies]
+# 其他依赖...
+pollster = "0.3"

然后我们使用 pollster 提供的 block_on 函数来等待异步任务执行完成:

rust
fn main() {
+    pollster::block_on(run());
+}

WASM 环境中不能在异步函数里使用 block_onFuture(异步函数的返回对象)必须使用浏览器的执行器来运行。如果你试图使用自己的执行器,一旦遇到没有立即执行的 Future 时代码就会崩溃。

如果现在尝试构建 WASM 将会失败,因为 wasm-bindgen 不支持使用异步函数作为“开始”函数。你可以改成在 javascript 中手动调用 run,但为了简单起见,我们将把 wasm-bindgen-futures 添加到 WASM 依赖项中,因为这不需要改变任何代码。你的依赖项应该是这样的:

toml
[dependencies]
+cfg-if = "1"
+winit = "0.28.7"
+env_logger = "0.10"
+log = "0.4"
+wgpu = "0.17"
+pollster = "0.3"
 
-[target.'cfg(target_arch = "wasm32")'.dependencies]
-console_error_panic_hook = "0.1.7"
-console_log = "1.0"
-wasm-bindgen = "0.2.87"
-wasm-bindgen-futures = "0.4.34"
-web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-]}

调整展示平面的宽高

如果要在应用程序中支持调整展示平面的宽高,将需要在每次窗口的大小改变时重新配置 surface。这就是我们存储物理 size 和用于配置 surfaceconfig 的原因。有了这些,实现 resize 函数就非常简单了。

rust
// impl State
-pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-    if new_size.width > 0 && new_size.height > 0 {
-        self.size = new_size;
-        self.config.width = new_size.width;
-        self.config.height = new_size.height;
-        self.surface.configure(&self.device, &self.config);
-    }
-}

这里和最初的 surface 配置没什么不同,所以就不再赘述。

run() 函数的事件循环中,我们在以下事件中调用 resize() 函数。

rust
match event {
-    // ...
+[target.'cfg(target_arch = "wasm32")'.dependencies]
+console_error_panic_hook = "0.1.7"
+console_log = "1.0"
+wasm-bindgen = "0.2.87"
+wasm-bindgen-futures = "0.4.34"
+web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+]}

调整展示平面的宽高

如果要在应用程序中支持调整展示平面的宽高,将需要在每次窗口的大小改变时重新配置 surface。这就是我们存储物理 size 和用于配置 surfaceconfig 的原因。有了这些,实现 resize 函数就非常简单了。

rust
// impl State
+pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+    if new_size.width > 0 && new_size.height > 0 {
+        self.size = new_size;
+        self.config.width = new_size.width;
+        self.config.height = new_size.height;
+        self.surface.configure(&self.device, &self.config);
+    }
+}

这里和最初的 surface 配置没什么不同,所以就不再赘述。

run() 函数的事件循环中,我们在以下事件中调用 resize() 函数。

rust
match event {
+    // ...
 
-    } if window_id == window.id() => if !state.input(event) {
-        match event {
-            // ...
+    } if window_id == window.id() => if !state.input(event) {
+        match event {
+            // ...
 
-            WindowEvent::Resized(physical_size) => {
-                state.resize(*physical_size);
-            }
-            WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
-                // new_inner_size 是 &&mut 类型,因此需要解引用两次
-                state.resize(**new_inner_size);
-            }
-            // ...
-}

事件输入

input() 函数返回一个 bool(布尔值),表示一个事件是否已经被处理。如果该函数返回 true,主循环就不再继续处理该事件。

我们现在没有任何想要捕获的事件,只需要返回 false。

rust
// impl State
-fn input(&mut self, event: &WindowEvent) -> bool {
-    false
-}

还需要在事件循环中多做一点工作,我们希望 Staterun() 函数内的事件处理中拥有第一优先级。修改后(加上之前的修改)的代码看起来像是这样的:

rust
// run()
-event_loop.run(move |event, _, control_flow| {
-    match event {
-        Event::WindowEvent {
-            ref event,
-            window_id,
-        } if window_id == window.id() => if !state.input(event) { // 更新!
-            match event {
-                WindowEvent::CloseRequested
-                | WindowEvent::KeyboardInput {
-                    input:
-                        KeyboardInput {
-                            state: ElementState::Pressed,
-                            virtual_keycode: Some(VirtualKeyCode::Escape),
-                            ..
-                        },
-                    ..
-                } => *control_flow = ControlFlow::Exit,
-                WindowEvent::Resized(physical_size) => {
-                    state.resize(*physical_size);
-                }
-                WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
-                    state.resize(**new_inner_size);
-                }
-                _ => {}
-            }
-        }
-        _ => {}
-    }
-});

更新

目前还没有任何东西需要更新,所以令这个函数为空。

rust
fn update(&mut self) {
-    // remove `todo!()`
-}

我们稍后将在这里添加一些代码,以便让绘制对象动起来。

渲染

这里就是奇迹发生的地方。首先,我们需要获取一个(Frame)对象以供渲染:

rust
// impl State
+            WindowEvent::Resized(physical_size) => {
+                state.resize(*physical_size);
+            }
+            WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
+                // new_inner_size 是 &&mut 类型,因此需要解引用两次
+                state.resize(**new_inner_size);
+            }
+            // ...
+}

事件输入

input() 函数返回一个 bool(布尔值),表示一个事件是否已经被处理。如果该函数返回 true,主循环就不再继续处理该事件。

我们现在没有任何想要捕获的事件,只需要返回 false。

rust
// impl State
+fn input(&mut self, event: &WindowEvent) -> bool {
+    false
+}

还需要在事件循环中多做一点工作,我们希望 Staterun() 函数内的事件处理中拥有第一优先级。修改后(加上之前的修改)的代码看起来像是这样的:

rust
// run()
+event_loop.run(move |event, _, control_flow| {
+    match event {
+        Event::WindowEvent {
+            ref event,
+            window_id,
+        } if window_id == window.id() => if !state.input(event) { // 更新!
+            match event {
+                WindowEvent::CloseRequested
+                | WindowEvent::KeyboardInput {
+                    input:
+                        KeyboardInput {
+                            state: ElementState::Pressed,
+                            virtual_keycode: Some(VirtualKeyCode::Escape),
+                            ..
+                        },
+                    ..
+                } => *control_flow = ControlFlow::Exit,
+                WindowEvent::Resized(physical_size) => {
+                    state.resize(*physical_size);
+                }
+                WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
+                    state.resize(**new_inner_size);
+                }
+                _ => {}
+            }
+        }
+        _ => {}
+    }
+});

更新

目前还没有任何东西需要更新,所以令这个函数为空。

rust
fn update(&mut self) {
+    // remove `todo!()`
+}

我们稍后将在这里添加一些代码,以便让绘制对象动起来。

渲染

这里就是奇迹发生的地方。首先,我们需要获取一个(Frame)对象以供渲染:

rust
// impl State
 
-fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
-    let output = self.surface.get_current_texture()?;

get_current_texture 函数会等待 surface 提供一个新的 SurfaceTexture。我们将它存储在 output 变量中以便后续使用。

rust
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());

这一行创建了一个默认设置的纹理视图(TextureView),渲染代码需要利用纹理视图来与纹理交互。

我们还需要创建一个命令编码器(CommandEncoder)来记录实际的命令发送给 GPU。大多数现代图形框架希望命令在被发送到 GPU 之前存储在一个命令缓冲区中。命令编码器创建了一个命令缓冲区,然后我们可以将其发送给 GPU。

rust
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-    label: Some("Render Encoder"),
-});

现在可以开始执行期盼已久的清屏(用统一的颜色填充指定渲染区域)了。我们需要使用 encoder 来创建渲染通道RenderPass)。渲染通道编码所有实际绘制的命令。创建渲染通道的代码嵌套层级有点深,所以在谈论它之前,我先把代码全部复制到这里:

rust
{
-        let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-            label: Some("Render Pass"),
-            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
-                view: &view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(wgpu::Color {
-                        r: 0.1,
-                        g: 0.2,
-                        b: 0.3,
-                        a: 1.0,
-                    }),
-                    store: wgpu::StoreOp::Store
-                },
-            })],
-            ..Default::default()
-        });
-    }
+fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
+    let output = self.surface.get_current_texture()?;

get_current_texture 函数会等待 surface 提供一个新的 SurfaceTexture。我们将它存储在 output 变量中以便后续使用。

rust
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());

这一行创建了一个默认设置的纹理视图(TextureView),渲染代码需要利用纹理视图来与纹理交互。

我们还需要创建一个命令编码器(CommandEncoder)来记录实际的命令发送给 GPU。大多数现代图形框架希望命令在被发送到 GPU 之前存储在一个命令缓冲区中。命令编码器创建了一个命令缓冲区,然后我们可以将其发送给 GPU。

rust
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+    label: Some("Render Encoder"),
+});

现在可以开始执行期盼已久的清屏(用统一的颜色填充指定渲染区域)了。我们需要使用 encoder 来创建渲染通道RenderPass)。渲染通道编码所有实际绘制的命令。创建渲染通道的代码嵌套层级有点深,所以在谈论它之前,我先把代码全部复制到这里:

rust
    {
+        let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+            label: Some("Render Pass"),
+            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
+                view: &view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(wgpu::Color {
+                        r: 0.1,
+                        g: 0.2,
+                        b: 0.3,
+                        a: 1.0,
+                    }),
+                    store: wgpu::StoreOp::Store
+                },
+            })],
+            ..Default::default()
+        });
+    }
 
-    // submit 命令能接受任何实现了 IntoIter trait 的参数
-    self.queue.submit(std::iter::once(encoder.finish()));
-    output.present();
+    // submit 命令能接受任何实现了 IntoIter trait 的参数
+    self.queue.submit(std::iter::once(encoder.finish()));
+    output.present();
 
-    Ok(())
-}

首先,我们来谈谈 encoder.begin_render_pass(...) 周围用 {} 开辟出来的块空间。begin_render_pass() 以可变方式借用了encoder(又称 &mut self),在释放这个可变借用之前,我们不能调用 encoder.finish()。这个块空间告诉 rust,当代码离开这个范围时,丢弃其中的任何变量,从而释放 encoder 上的可变借用,并允许我们 finish() 它。如果你不喜欢 {},也可以使用 drop(render_pass) 来达到同样的效果。

代码的最后几行告诉 wgpu 完成命令缓冲区,并将其提交给 gpu 的渲染队列

我们需再次更新事件循环以调用 render() 函数,还会在它之前先调用 update()

rust
// run()
-event_loop.run(move |event, _, control_flow| {
-    match event {
-        // ...
-        Event::RedrawRequested(window_id) if window_id == window.id() => {
-            state.update();
-            match state.render() {
-                Ok(_) => {}
-                // 当展示平面的上下文丢失,就需重新配置
-                Err(wgpu::SurfaceError::Lost) => state.resize(state.size),
-                // 系统内存不足时,程序应该退出。
-                Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
-                // 所有其他错误(过期、超时等)应在下一帧解决
-                Err(e) => eprintln!("{:?}", e),
-            }
-        }
-        Event::MainEventsCleared => {
-            // 除非我们手动请求,RedrawRequested 将只会触发一次。
-            window.request_redraw();
-        }
-        // ...
-    }
-});

基于以上这些,你就能获得如下渲染效果:

蓝色背景的窗口

关于渲染通道描述符

部分读者可能光看代码就能理解,但如果我不把它介绍一遍,那就是失职。让我们再看一下代码:

rust
&wgpu::RenderPassDescriptor {
-    label: Some("Render Pass"),
-    color_attachments: &[
-        // ...
-    ],
-    ..Default::default()
-}

渲染通道描述符RenderPassDescriptor)只有三个字段: label, color_attachmentsdepth_stencil_attachmentcolor_attachments 描述了要将颜色绘制到哪里。我们使用之前创建的纹理视图来确保渲染到屏幕上。

color_attachments 字段是一个稀疏数组。这允许你使用有多个渲染目标的管线,并且最终只绘制到你所关心的某个渲染目标。

我们后面会使用到 depth_stencil_attachment,现在先将它设置为 None

rust
Some(wgpu::RenderPassColorAttachment {
-    view: &view,
-    resolve_target: None,
-    ops: wgpu::Operations {
-        load: wgpu::LoadOp::Clear(wgpu::Color {
-            r: 0.1,
-            g: 0.2,
-            b: 0.3,
-            a: 1.0,
-        }),
-        store: wgpu::StoreOp::Store
-    },
-})

RenderPassColorAttachment 有一个 view 字段,用于通知 wgpu 将颜色保存到什么纹理。这里我们指定使用 surface.get_current_texture() 创建的 view,这意味着向此附件(Attachment)上绘制的任何颜色都会被绘制到屏幕上。

resolve_target 是接收多重采样解析输出的纹理。除非启用了多重采样, 否则不需要设置它,保留为 None 即可。

ops 字段需要一个 wpgu::Operations 对象。它告诉 wgpu 如何处理屏幕上的颜色(由 view 指定)。load 字段告诉 wgpu 如何处理存储在前一帧的颜色。目前,我们正在用蓝色清屏store 字段告诉 wgpu 是否要将渲染的结果存储到纹理视图后面的纹理上(在这个例子中是 SurfaceTexture )。我们希望存储渲染结果,所以设置为 true

当屏幕被场景对象完全遮挡,那么不清屏是很常见的。但如果你的场景没有覆盖整个屏幕,就会出现类似下边的情况:

./no-clear.png

验证错误?

如果你的机器上运行的是 Vulkan SDK 的旧版本, wgpu 在你的机器上使用 Vulkan 后端时可能会遇到验证错误。至少需要使用 1.2.182 及以上版本,因为旧版本可能会产生一些误报。如果错误持续存在,那可能是遇到了 wgpu 的错误。你可以在 https://github.com/gfx-rs/wgpu 上提交此问题。

挑战

修改 input() 函数以捕获鼠标事件,并使用该函数来更新清屏的颜色。提示:你可能需要用到 WindowEvent::CursorMoved

加入 wgpu 微信学习交流群

- + Ok(()) +}

首先,我们来谈谈 encoder.begin_render_pass(...) 周围用 {} 开辟出来的块空间。begin_render_pass() 以可变方式借用了encoder(又称 &mut self),在释放这个可变借用之前,我们不能调用 encoder.finish()。这个块空间告诉 rust,当代码离开这个范围时,丢弃其中的任何变量,从而释放 encoder 上的可变借用,并允许我们 finish() 它。如果你不喜欢 {},也可以使用 drop(render_pass) 来达到同样的效果。

代码的最后几行告诉 wgpu 完成命令缓冲区,并将其提交给 gpu 的渲染队列

我们需再次更新事件循环以调用 render() 函数,还会在它之前先调用 update()

rust
// run()
+event_loop.run(move |event, _, control_flow| {
+    match event {
+        // ...
+        Event::RedrawRequested(window_id) if window_id == window.id() => {
+            state.update();
+            match state.render() {
+                Ok(_) => {}
+                // 当展示平面的上下文丢失,就需重新配置
+                Err(wgpu::SurfaceError::Lost) => state.resize(state.size),
+                // 系统内存不足时,程序应该退出。
+                Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
+                // 所有其他错误(过期、超时等)应在下一帧解决
+                Err(e) => eprintln!("{:?}", e),
+            }
+        }
+        Event::MainEventsCleared => {
+            // 除非我们手动请求,RedrawRequested 将只会触发一次。
+            window.request_redraw();
+        }
+        // ...
+    }
+});

基于以上这些,你就能获得如下渲染效果:

蓝色背景的窗口

关于渲染通道描述符

部分读者可能光看代码就能理解,但如果我不把它介绍一遍,那就是失职。让我们再看一下代码:

rust
&wgpu::RenderPassDescriptor {
+    label: Some("Render Pass"),
+    color_attachments: &[
+        // ...
+    ],
+    ..Default::default()
+}

渲染通道描述符RenderPassDescriptor)只有三个字段: label, color_attachmentsdepth_stencil_attachmentcolor_attachments 描述了要将颜色绘制到哪里。我们使用之前创建的纹理视图来确保渲染到屏幕上。

color_attachments 字段是一个稀疏数组。这允许你使用有多个渲染目标的管线,并且最终只绘制到你所关心的某个渲染目标。

我们后面会使用到 depth_stencil_attachment,现在先将它设置为 None

rust
Some(wgpu::RenderPassColorAttachment {
+    view: &view,
+    resolve_target: None,
+    ops: wgpu::Operations {
+        load: wgpu::LoadOp::Clear(wgpu::Color {
+            r: 0.1,
+            g: 0.2,
+            b: 0.3,
+            a: 1.0,
+        }),
+        store: wgpu::StoreOp::Store
+    },
+})

RenderPassColorAttachment 有一个 view 字段,用于通知 wgpu 将颜色保存到什么纹理。这里我们指定使用 surface.get_current_texture() 创建的 view,这意味着向此附件(Attachment)上绘制的任何颜色都会被绘制到屏幕上。

resolve_target 是接收多重采样解析输出的纹理。除非启用了多重采样, 否则不需要设置它,保留为 None 即可。

ops 字段需要一个 wpgu::Operations 对象。它告诉 wgpu 如何处理屏幕上的颜色(由 view 指定)。load 字段告诉 wgpu 如何处理存储在前一帧的颜色。目前,我们正在用蓝色清屏store 字段告诉 wgpu 是否要将渲染的结果存储到纹理视图后面的纹理上(在这个例子中是 SurfaceTexture )。我们希望存储渲染结果,所以设置为 true

当屏幕被场景对象完全遮挡,那么不清屏是很常见的。但如果你的场景没有覆盖整个屏幕,就会出现类似下边的情况:

./no-clear.png

验证错误?

如果你的机器上运行的是 Vulkan SDK 的旧版本, wgpu 在你的机器上使用 Vulkan 后端时可能会遇到验证错误。至少需要使用 1.2.182 及以上版本,因为旧版本可能会产生一些误报。如果错误持续存在,那可能是遇到了 wgpu 的错误。你可以在 https://github.com/gfx-rs/wgpu 上提交此问题。

挑战

修改 input() 函数以捕获鼠标事件,并使用该函数来更新清屏的颜色。提示:你可能需要用到 WindowEvent::CursorMoved

加入 wgpu 微信学习交流群

+ \ No newline at end of file diff --git a/beginner/tutorial3-pipeline/index.html b/beginner/tutorial3-pipeline/index.html index fc52b1f20..88377b5e7 100644 --- a/beginner/tutorial3-pipeline/index.html +++ b/beginner/tutorial3-pipeline/index.html @@ -5,138 +5,140 @@ 管线 (Pipeline) | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

管线 (Pipeline)

什么是管线?

管线ComputePipelineRenderPipeline)由一系列资源绑定、可编程阶段(着色器)设置及固定功能状态组成。它代表了由 GPU 硬件、驱动程序和用户代理组合而成的完整功能对象,描述了 GPU 将对一组数据执行的所有操作。在本节中,我们将具体创建一个渲染管线RenderPipeline)。

什么是着色器?

着色器(Shader)是你发送给 GPU 的微型程序,用于对数据进行操作。有三种主要类型的着色器:顶点(Vertex)、片元(Fragment)和计算(Compute)着色器。另外还有其他的如几何着色器,但它们属于进阶话题。现在,我们只需要使用顶点和片元着色器。

什么是顶点和片元?

顶点(Vertex)就是三维(或二维)空间中的一个点。这些顶点会两个一组以构成线段集合,或者三个一组以构成三角形集合。

Vertices Graphic

从简单的立方体到复杂的人体结构,大多数现代渲染系统都使用三角形来建模所有图形。这些三角形被存储为构成三角形角的顶点。

我们使用顶点着色器来操作顶点,以便按我们想要的样子做图形的变换。

然后顶点经过光栅化(rasterization)后流转到片元着色阶段,片元着色器决定了片元的颜色。渲染结果图像中的每个像素至少对应一个片元,每个片元可输出一个颜色,该颜色会被存储到其相应的像素上(准确的说,片元的输出是存储到 Color Attachment 的纹素上)。

WebGPU 着色器语言: WGSL

WGSL (WebGPU Shading Language) 是 WebGPU 的着色器语言。 WGSL 的开发重点是让它轻松转换为与后端对应的着色器语言;例如,Vulkan 的 SPIR-V、Metal 的 MSL、DX12 的 HLSL 和 OpenGL 的 GLSL。 这种转换是在内部完成的,我们不需要关心这些细节。 就 wgpu 而言,它是由名为 naga完成的。

WGSL 着色器语言 一章中,有对 WGSL 的由来及语法的更详细介绍。

WGSL 规范及其在 WGPU 中的应用仍在开发中。如果在使用中遇到问题,你或许希望 https://app.element.io/#/room/#wgpu:matrix.org 社区的人帮忙看一下你的代码。

编写着色器

main.rs 所在的目录中创建一个 shader.wgsl 文件。在其中写入以下代码:

rust
// 顶点着色器
+    
Skip to content

管线 (Pipeline)

什么是管线?

管线ComputePipelineRenderPipeline)由一系列资源绑定、可编程阶段(着色器)设置及固定功能状态组成。它代表了由 GPU 硬件、驱动程序和用户代理组合而成的完整功能对象,描述了 GPU 将对一组数据执行的所有操作。在本节中,我们将具体创建一个渲染管线RenderPipeline)。

什么是着色器?

着色器(Shader)是你发送给 GPU 的微型程序,用于对数据进行操作。有三种主要类型的着色器:顶点(Vertex)、片元(Fragment)和计算(Compute)着色器。另外还有其他的如几何着色器,但它们属于进阶话题。现在,我们只需要使用顶点和片元着色器。

什么是顶点和片元?

顶点(Vertex)就是三维(或二维)空间中的一个点。这些顶点会两个一组以构成线段集合,或者三个一组以构成三角形集合。

Vertices Graphic

从简单的立方体到复杂的人体结构,大多数现代渲染系统都使用三角形来建模所有图形。这些三角形被存储为构成三角形角的顶点。

我们使用顶点着色器来操作顶点,以便按我们想要的样子做图形的变换。

然后顶点经过光栅化(rasterization)后流转到片元着色阶段,片元着色器决定了片元的颜色。渲染结果图像中的每个像素至少对应一个片元,每个片元可输出一个颜色,该颜色会被存储到其相应的像素上(准确的说,片元的输出是存储到 Color Attachment 的纹素上)。

WebGPU 着色器语言: WGSL

WGSL (WebGPU Shading Language) 是 WebGPU 的着色器语言。 WGSL 的开发重点是让它轻松转换为与后端对应的着色器语言;例如,Vulkan 的 SPIR-V、Metal 的 MSL、DX12 的 HLSL 和 OpenGL 的 GLSL。 这种转换是在内部完成的,我们不需要关心这些细节。 就 wgpu 而言,它是由名为 naga完成的。

WGSL 着色器语言 一章中,有对 WGSL 的由来及语法的更详细介绍。

WGSL 规范及其在 WGPU 中的应用仍在开发中。如果在使用中遇到问题,你或许希望 https://app.element.io/#/room/#wgpu:matrix.org 社区的人帮忙看一下你的代码。

编写着色器

main.rs 所在的目录中创建一个 shader.wgsl 文件。在其中写入以下代码:

rust
// 顶点着色器
 
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-};
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+};
 
-@vertex
-fn vs_main(
-    @builtin(vertex_index) in_vertex_index: u32,
-) -> VertexOutput {
-    var out: VertexOutput;
-    let x = f32(1 - i32(in_vertex_index)) * 0.5;
-    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
-    out.clip_position = vec4f(x, y, 0.0, 1.0);
-    return out;
-}

首先,声明一个 struct 来存储顶点着色器的输出。目前只有一个字段,即 clip_position@builtin(position) 属性标记了此字段将作为顶点在裁剪坐标系中的位置来使用。这类似于 GLSL 的 gl_Position 变量。

形如 vec4 的向量类型是泛型。目前你必须指定向量将包含的值的类型。因此一个使用 32 位浮点数的 3 维向量写做 vec3f

着色器代码的下一部分是 vs_main 函数。@vertex 属性标记了这个函数是顶点着色器的有效入口。我们预期有一个 u32 类型的变量 in_vertex_index,它的值来自 @builtin(vertex_index)

然后使用 VertexOutput 结构体声明一个名为 out 的变量。我们为顶点的裁剪空间坐标创建另外两个 x y 变量。

f32()i32() 表示类型强制转换,将括号里的值转换为此类型。

现在我们可以把 clip_position 保存到 out。然后只需返回 out 就完成了顶点着色器的工作!

我们也可以不使用 stuct,直接按以下代码来实现:

rust
@vertex
-fn vs_main(
-    @builtin(vertex_index) in_vertex_index: u32
-) -> @builtin(position) vec4f {
-    // 顶点着色器 code...
-}

接下来是片元着色器。还是在 shader.wgsl 中添加以下代码:

rust
// 片元着色器
+@vertex
+fn vs_main(
+    @builtin(vertex_index) in_vertex_index: u32,
+) -> VertexOutput {
+    var out: VertexOutput;
+    let x = f32(1 - i32(in_vertex_index)) * 0.5;
+    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
+    out.clip_position = vec4f(x, y, 0.0, 1.0);
+    return out;
+}

首先,声明一个 struct 来存储顶点着色器的输出。目前只有一个字段,即 clip_position@builtin(position) 属性标记了此字段将作为顶点在裁剪坐标系中的位置来使用。这类似于 GLSL 的 gl_Position 变量。

形如 vec4 的向量类型是泛型。目前你必须指定向量将包含的值的类型。因此一个使用 32 位浮点数的 3 维向量写做 vec3f

着色器代码的下一部分是 vs_main 函数。@vertex 属性标记了这个函数是顶点着色器的有效入口。我们预期有一个 u32 类型的变量 in_vertex_index,它的值来自 @builtin(vertex_index)

然后使用 VertexOutput 结构体声明一个名为 out 的变量。我们为顶点的裁剪空间坐标创建另外两个 x y 变量。

f32()i32() 表示类型强制转换,将括号里的值转换为此类型。

现在我们可以把 clip_position 保存到 out。然后只需返回 out 就完成了顶点着色器的工作!

我们也可以不使用 stuct,直接按以下代码来实现:

rust
@vertex
+fn vs_main(
+    @builtin(vertex_index) in_vertex_index: u32
+) -> @builtin(position) vec4f {
+    // 顶点着色器 code...
+}

接下来是片元着色器。还是在 shader.wgsl 中添加以下代码:

rust
// 片元着色器
 
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(0.3, 0.2, 0.1, 1.0);
-}

这将当前片元的颜色设置为棕色。

注意,顶点和片元着色器的入口点分别被命名为 vs_mainfs_main。在 wgpu 的早期版本中,这两个函数有相同的名字是可以的,但较新版本的 WGSL spec 要求这些名字必须不同。因此在整个教程中都使用(从 wgpu demo 中采用)上述命名方案。

@location(0) 属性标记了该函数返回的 vec4 值将存储在第一个颜色附件(Color Attachment)中。

使用着色器

终于要用到本章节标题提到的概念 管线(Pipeline)了。首先,我们来修改 State 以包括以下代码。

rust
// lib.rs
-struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-    // 新添加!
-    render_pipeline: wgpu::RenderPipeline,
-}

现在,开始在 new() 函数内创建管线。我们需要载入先前写的,渲染管线所需要的着色器。

rust
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
-    label: Some("Shader"),
-    source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
-});

也可以使用 include_wgsl! 宏作为创建 ShaderModuleDescriptor 的快捷方式。

rust
let shader = device.create_shader_module(include_wgsl!("shader.wgsl"));

还需要创建一个 PipelineLayout。在讲完缓冲区Buffer)之后,我们会对它有更多地了解。

rust
let render_pipeline_layout =
-    device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-        label: Some("Render Pipeline Layout"),
-        bind_group_layouts: &[],
-        push_constant_ranges: &[],
-    });

最后,我们就获得了创建 render_pipeline 所需的全部资源:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    label: Some("Render Pipeline"),
-    layout: Some(&render_pipeline_layout),
-    vertex: wgpu::VertexState {
-        module: &shader,
-        entry_point: "vs_main", // 1.
-        buffers: &[], // 2.
-    },
-    fragment: Some(wgpu::FragmentState { // 3.
-        module: &shader,
-        entry_point: "fs_main",
-        targets: &[Some(wgpu::ColorTargetState { // 4.
-            format: config.format,
-            blend: Some(wgpu::BlendState::REPLACE),
-            write_mask: wgpu::ColorWrites::ALL,
-        })],
-    }),
-    // ...

有几点需要注意:

  1. 可以在这里指定着色器中的哪个函数应该是入口点( entry_point)。那是我们用 @vertex@fragment 标记的函数。
  2. buffers 字段告诉 wgpu 要把什么类型的顶点数据传递给顶点着色器。我们会在顶点着色器中指定顶点,所以这里先留空。下一个教程中会在此加入一些数据。
  3. fragment 字段是 Option 类型,所以必须用 Some() 来包装 FragmentState 实例。如果想把颜色数据存储到 surface 就需要用到它 。
  4. targets 字段告诉 wgpu 应该设置哪些颜色输出目标。目前只需设置一个输出目标。格式指定为使用 surface 的格式,并且指定混合模式为仅用新的像素数据替换旧的。我们还告诉 wgpu 可写入全部 4 个颜色通道:红、蓝、绿和透明度。在讨论纹理时会更多地介绍 color_state
rust
primitive: wgpu::PrimitiveState {
-    topology: wgpu::PrimitiveTopology::TriangleList, // 1.
-    strip_index_format: None,
-    front_face: wgpu::FrontFace::Ccw, // 2.
-    cull_mode: Some(wgpu::Face::Back),
-    // 将此设置为 Fill 以外的任何值都要需要开启 Feature::NON_FILL_POLYGON_MODE
-    polygon_mode: wgpu::PolygonMode::Fill,
-    // 需要开启 Features::DEPTH_CLIP_CONTROL
-    unclipped_depth: false,
-    // 需要开启 Features::CONSERVATIVE_RASTERIZATION
-    conservative: false,
-},
-// continued ...

图元(primitive)字段描述了将如何解释顶点来转换为三角形。

  1. PrimitiveTopology::TriangleList 意味着每三个顶点组成一个三角形。
  2. front_face 字段告诉 wgpu 如何确定三角形的朝向。FrontFace::Ccw 指定顶点的帧缓冲区坐标(framebuffer coordinates)按逆时针顺序给出的三角形为朝前(面向屏幕外)。
  3. cull_mode 字段告诉 wgpu 如何做三角形剔除。CullMode::Back 指定朝后(面向屏幕内)的三角形会被剔除(不被渲染)。我们会在讨论缓冲区(Buffer)时详细介绍剔除问题。
rust
depth_stencil: None, // 1.
-    multisample: wgpu::MultisampleState {
-        count: 1, // 2.
-        mask: !0, // 3.
-        alpha_to_coverage_enabled: false, // 4.
-    },
-    multiview: None, // 5.
-});

该函数的其余部分非常简单:

  1. 我们目前没有使用深度/模板缓冲区,因此将 depth_stencil 保留为 None以后会用到
  2. count 确定管线将使用多少个采样。多重采样是一个复杂的主题,因此不会在这里展开讨论。
  3. mask 指定哪些采样应处于活动状态。目前我们使用全部采样。
  4. alpha_to_coverage_enabled 与抗锯齿有关。在这里不介绍抗锯齿,因此将其保留为 false。
  5. multiview 表示渲染附件可以有多少数组层。我们不会渲染到数组纹理,因此将其设置为 None

现在我们要做的就是把 render_pipeline 添加到 State,然后就可以使用它了!

rust
// new()
-Self {
-    surface,
-    device,
-    queue,
-    config,
-    size,
-    // 新添加!
-    render_pipeline,
-}

使用管线

如果现在运行程序,它会花更多的时间来启动,但仍然只会显示我们在上一节得到的蓝屏。因为虽然我们创建了 render_pipeline,但还需要修改 render() 函数中的代码来实际使用它:

rust
// render()
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(0.3, 0.2, 0.1, 1.0);
+}

这将当前片元的颜色设置为棕色。

注意,顶点和片元着色器的入口点分别被命名为 vs_mainfs_main。在 wgpu 的早期版本中,这两个函数有相同的名字是可以的,但较新版本的 WGSL spec 要求这些名字必须不同。因此在整个教程中都使用(从 wgpu demo 中采用)上述命名方案。

@location(0) 属性标记了该函数返回的 vec4 值将存储在第一个颜色附件(Color Attachment)中。

使用着色器

终于要用到本章节标题提到的概念 管线(Pipeline)了。首先,我们来修改 State 以包括以下代码。

rust
// lib.rs
+struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+    // 新添加!
+    render_pipeline: wgpu::RenderPipeline,
+}

现在,开始在 new() 函数内创建管线。我们需要载入先前写的,渲染管线所需要的着色器。

rust
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
+    label: Some("Shader"),
+    source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
+});

也可以使用 include_wgsl! 宏作为创建 ShaderModuleDescriptor 的快捷方式。

rust
let shader = device.create_shader_module(include_wgsl!("shader.wgsl"));

还需要创建一个 PipelineLayout。在讲完缓冲区Buffer)之后,我们会对它有更多地了解。

rust
let render_pipeline_layout =
+    device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+        label: Some("Render Pipeline Layout"),
+        bind_group_layouts: &[],
+        push_constant_ranges: &[],
+    });

最后,我们就获得了创建 render_pipeline 所需的全部资源:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    label: Some("Render Pipeline"),
+    layout: Some(&render_pipeline_layout),
+    vertex: wgpu::VertexState {
+        module: &shader,
+        entry_point: "vs_main", // 1.
+        buffers: &[], // 2.
+    },
+    fragment: Some(wgpu::FragmentState { // 3.
+        module: &shader,
+        entry_point: "fs_main",
+        targets: &[Some(wgpu::ColorTargetState { // 4.
+            format: config.format,
+            blend: Some(wgpu::BlendState::REPLACE),
+            write_mask: wgpu::ColorWrites::ALL,
+        })],
+    }),
+    // ...

有几点需要注意:

  1. 可以在这里指定着色器中的哪个函数应该是入口点( entry_point)。那是我们用 @vertex@fragment 标记的函数。
  2. buffers 字段告诉 wgpu 要把什么类型的顶点数据传递给顶点着色器。我们会在顶点着色器中指定顶点,所以这里先留空。下一个教程中会在此加入一些数据。
  3. fragment 字段是 Option 类型,所以必须用 Some() 来包装 FragmentState 实例。如果想把颜色数据存储到 surface 就需要用到它 。
  4. targets 字段告诉 wgpu 应该设置哪些颜色输出目标。目前只需设置一个输出目标。格式指定为使用 surface 的格式,并且指定混合模式为仅用新的像素数据替换旧的。我们还告诉 wgpu 可写入全部 4 个颜色通道:红、蓝、绿和透明度。在讨论纹理时会更多地介绍 color_state
rust
primitive: wgpu::PrimitiveState {
+    topology: wgpu::PrimitiveTopology::TriangleList, // 1.
+    strip_index_format: None,
+    front_face: wgpu::FrontFace::Ccw, // 2.
+    cull_mode: Some(wgpu::Face::Back),
+    // 将此设置为 Fill 以外的任何值都要需要开启 Feature::NON_FILL_POLYGON_MODE
+    polygon_mode: wgpu::PolygonMode::Fill,
+    // 需要开启 Features::DEPTH_CLIP_CONTROL
+    unclipped_depth: false,
+    // 需要开启 Features::CONSERVATIVE_RASTERIZATION
+    conservative: false,
+},
+// continued ...

图元(primitive)字段描述了将如何解释顶点来转换为三角形。

  1. PrimitiveTopology::TriangleList 意味着每三个顶点组成一个三角形。
  2. front_face 字段告诉 wgpu 如何确定三角形的朝向。FrontFace::Ccw 指定顶点的帧缓冲区坐标(framebuffer coordinates)按逆时针顺序给出的三角形为朝前(面向屏幕外)。
  3. cull_mode 字段告诉 wgpu 如何做三角形剔除。CullMode::Back 指定朝后(面向屏幕内)的三角形会被剔除(不被渲染)。我们会在讨论缓冲区(Buffer)时详细介绍剔除问题。
rust
    depth_stencil: None, // 1.
+    multisample: wgpu::MultisampleState {
+        count: 1, // 2.
+        mask: !0, // 3.
+        alpha_to_coverage_enabled: false, // 4.
+    },
+    multiview: None, // 5.
+});

该函数的其余部分非常简单:

  1. 我们目前没有使用深度/模板缓冲区,因此将 depth_stencil 保留为 None以后会用到
  2. count 确定管线将使用多少个采样。多重采样是一个复杂的主题,因此不会在这里展开讨论。
  3. mask 指定哪些采样应处于活动状态。目前我们使用全部采样。
  4. alpha_to_coverage_enabled 与抗锯齿有关。在这里不介绍抗锯齿,因此将其保留为 false。
  5. multiview 表示渲染附件可以有多少数组层。我们不会渲染到数组纹理,因此将其设置为 None

现在我们要做的就是把 render_pipeline 添加到 State,然后就可以使用它了!

rust
// new()
+Self {
+    surface,
+    device,
+    queue,
+    config,
+    size,
+    // 新添加!
+    render_pipeline,
+}

使用管线

如果现在运行程序,它会花更多的时间来启动,但仍然只会显示我们在上一节得到的蓝屏。因为虽然我们创建了 render_pipeline,但还需要修改 render() 函数中的代码来实际使用它:

rust
// render()
 
-// ...
-{
-    // 1.
-    let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-        label: Some("Render Pass"),
-        color_attachments: &[
-            // 这就是片元着色器中 @location(0) 标记指向的颜色附件
-            Some(wgpu::RenderPassColorAttachment {
-                view: &view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(
-                        wgpu::Color {
-                            r: 0.1,
-                            g: 0.2,
-                            b: 0.3,
-                            a: 1.0,
-                        }
-                    ),
-                    store: wgpu::StoreOp::Store
-                }
-            })
-        ],
-        ..Default::default()
-    });
+// ...
+{
+    // 1.
+    let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+        label: Some("Render Pass"),
+        color_attachments: &[
+            // 这就是片元着色器中 @location(0) 标记指向的颜色附件
+            Some(wgpu::RenderPassColorAttachment {
+                view: &view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(
+                        wgpu::Color {
+                            r: 0.1,
+                            g: 0.2,
+                            b: 0.3,
+                            a: 1.0,
+                        }
+                    ),
+                    store: wgpu::StoreOp::Store
+                }
+            })
+        ],
+        ..Default::default()
+    });
 
-    // 新添加!
-    render_pass.set_pipeline(&self.render_pipeline); // 2.
-    render_pass.draw(0..3, 0..1); // 3.
-}
-// ...

上面代码所做的少量修改:

  1. _render_pass 声明为可变变量并重命名为 render_pass
  2. render_pass 上设置刚刚创建的管线
  3. 告诉 wgpu 用 3 个顶点和 1 个实例(实例的索引就是 @builtin(vertex_index) 的由来)来进行绘制。

修改完代码后,运行程序应该就能看到一个可爱的棕色三角形:

可爱的棕色三角形

挑战

创建第二个管线,使用三角形顶点的位置数据来创建一个颜色并发送给片元着色器。当你按下空格键时让应用程序交替使用两个管线。提示:你需要修改 VertexOutput

- + // 新添加! + render_pass.set_pipeline(&self.render_pipeline); // 2. + render_pass.draw(0..3, 0..1); // 3. +} +// ...

上面代码所做的少量修改:

  1. _render_pass 声明为可变变量并重命名为 render_pass
  2. render_pass 上设置刚刚创建的管线
  3. 告诉 wgpu 用 3 个顶点和 1 个实例(实例的索引就是 @builtin(vertex_index) 的由来)来进行绘制。

修改完代码后,运行程序应该就能看到一个可爱的棕色三角形:

可爱的棕色三角形

挑战

创建第二个管线,使用三角形顶点的位置数据来创建一个颜色并发送给片元着色器。当你按下空格键时让应用程序交替使用两个管线。提示:你需要修改 VertexOutput

+ \ No newline at end of file diff --git a/beginner/tutorial4-buffer/index.html b/beginner/tutorial4-buffer/index.html index aaf7cc915..e68de8275 100644 --- a/beginner/tutorial4-buffer/index.html +++ b/beginner/tutorial4-buffer/index.html @@ -5,243 +5,245 @@ 缓冲区与索引 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

缓冲区与索引

终于要讨论它们了!

你可能已经厌倦了我老说 "我们会在讨论缓冲区的时候再详细介绍" 之类的话。现在终于到了谈论缓冲区的时候了,但首先...

什么是缓冲区?

缓冲区(Buffer)一个可用于 GPU 操作的内存块。缓冲区数据是以线性布局存储的,这意味着分配的每个字节都可以通过其从缓冲区开始的偏移量来寻址,但要根据操作的不同而有对齐限制。

缓冲区常用于存储结构体或数组等简单的数据,但也可以存储更复杂的数据,如树等图结构(只要所有节点都存储在一起,且不引用缓冲区以外的任何数据)。我们会经常用到缓冲区,所以让我们从最重要的两个开始:顶点缓冲区(Vertex Buffer)和索引缓冲区(Index Buffer)。

顶点缓冲区

之前我们是直接在顶点着色器中存储的顶点数据。这在学习的起始阶段很有效,但这不是长远之计,因为需要绘制的对象的类型会有不同的大小,且每当需要更新模型时就得重新编译着色器,这会大大减慢我们的程序。我们将改为使用顶点缓冲区来存储想要绘制的顶点数据。在此之前,需要创建一个新的结构体来描述顶点:

rust
// lib.rs
-#[repr(C)]
-#[derive(Copy, Clone, Debug)]
-struct Vertex {
-    position: [f32; 3],
-    color: [f32; 3],
-}

每个顶点都会有一个位置(position)和颜色(color)字段。位置代表顶点在三维空间中的 x、y 和 z 坐标。颜色是顶点的红、绿、蓝三通道色值。我们需要令 Vertex 支持 Copy trait,这样就可以用它创建一个缓冲区。

接下来,需要构成三角形的实际顶点数据。在 Vertex 下面添加以下代码:

rust
// lib.rs
-const VERTICES: &[Vertex] = &[
-    Vertex { position: [0.0, 0.5, 0.0], color: [1.0, 0.0, 0.0] },
-    Vertex { position: [-0.5, -0.5, 0.0], color: [0.0, 1.0, 0.0] },
-    Vertex { position: [0.5, -0.5, 0.0], color: [0.0, 0.0, 1.0] },
-];

按逆时针顺序排列顶点:上、左下、右下。这样做的部分理由是出于惯例,但主要是因为我们在 render_pipelineprimitive 中指定了三角形的 front_faceCcw(counter-clockwise),这样就可以做背面剔除。这意味着任何面向我们的三角形的顶点都应该是按逆时针顺序排列。

现在有了顶点数据,需要将其存储在一个缓冲区中。让我们给 State 添加再一个 vertex_buffer 字段:

rust
// lib.rs
-struct State {
-    // ...
-    render_pipeline: wgpu::RenderPipeline,
+    
Skip to content

缓冲区与索引

终于要讨论它们了!

你可能已经厌倦了我老说 "我们会在讨论缓冲区的时候再详细介绍" 之类的话。现在终于到了谈论缓冲区的时候了,但首先...

什么是缓冲区?

缓冲区(Buffer)一个可用于 GPU 操作的内存块。缓冲区数据是以线性布局存储的,这意味着分配的每个字节都可以通过其从缓冲区开始的偏移量来寻址,但要根据操作的不同而有对齐限制。

缓冲区常用于存储结构体或数组等简单的数据,但也可以存储更复杂的数据,如树等图结构(只要所有节点都存储在一起,且不引用缓冲区以外的任何数据)。我们会经常用到缓冲区,所以让我们从最重要的两个开始:顶点缓冲区(Vertex Buffer)和索引缓冲区(Index Buffer)。

顶点缓冲区

之前我们是直接在顶点着色器中存储的顶点数据。这在学习的起始阶段很有效,但这不是长远之计,因为需要绘制的对象的类型会有不同的大小,且每当需要更新模型时就得重新编译着色器,这会大大减慢我们的程序。我们将改为使用顶点缓冲区来存储想要绘制的顶点数据。在此之前,需要创建一个新的结构体来描述顶点:

rust
// lib.rs
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+struct Vertex {
+    position: [f32; 3],
+    color: [f32; 3],
+}

每个顶点都会有一个位置(position)和颜色(color)字段。位置代表顶点在三维空间中的 x、y 和 z 坐标。颜色是顶点的红、绿、蓝三通道色值。我们需要令 Vertex 支持 Copy trait,这样就可以用它创建一个缓冲区。

接下来,需要构成三角形的实际顶点数据。在 Vertex 下面添加以下代码:

rust
// lib.rs
+const VERTICES: &[Vertex] = &[
+    Vertex { position: [0.0, 0.5, 0.0], color: [1.0, 0.0, 0.0] },
+    Vertex { position: [-0.5, -0.5, 0.0], color: [0.0, 1.0, 0.0] },
+    Vertex { position: [0.5, -0.5, 0.0], color: [0.0, 0.0, 1.0] },
+];

按逆时针顺序排列顶点:上、左下、右下。这样做的部分理由是出于惯例,但主要是因为我们在 render_pipelineprimitive 中指定了三角形的 front_faceCcw(counter-clockwise),这样就可以做背面剔除。这意味着任何面向我们的三角形的顶点都应该是按逆时针顺序排列。

现在有了顶点数据,需要将其存储在一个缓冲区中。让我们给 State 添加再一个 vertex_buffer 字段:

rust
// lib.rs
+struct State {
+    // ...
+    render_pipeline: wgpu::RenderPipeline,
 
-    // 新添加!
-    vertex_buffer: wgpu::Buffer,
+    // 新添加!
+    vertex_buffer: wgpu::Buffer,
 
-    // ...
-}

接着在 new() 函数中创建顶点缓冲区:

rust
// new()
-let vertex_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Vertex Buffer"),
-        contents: bytemuck::cast_slice(VERTICES),
-        usage: wgpu::BufferUsages::VERTEX,
-    }
-);

为了访问 wgpu::Device 上的 create_buffer_init 方法,我们须导入 DeviceExt 扩展 trait。关于扩展 trait 的更多信息,请查看这篇文章

要导入扩展 trait,只需在 lib.rs 的顶部放上这一行:

rust
use wgpu::util::DeviceExt;

你应该注意到我们使用了 bytemuck 来将 VERTICES 转换为 &[u8]create_buffer_init() 函数的参数类型是 &[u8],而 bytemuck::cast_slice 为我们实现了此类型转换。为此需在 Cargo.toml 中添加以下依赖项:

toml
bytemuck = { version = "1.14", features = [ "derive" ] }

我们还需要实现两个 trait 来使 bytemuck 工作。它们是 bytemuck::Podbytemuck::ZeroablePod 表示 Vertex"Plain Old Data" 数据类型,因此可以被解释为 &[u8] 类型。Zeroable 表示可以对其使用 std::mem::zeroed()。下面修改 Vertex 结构体来派生这些 trait:

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-struct Vertex {
-    position: [f32; 3],
-    color: [f32; 3],
-}

结构体里包含了没有实现 PodZeroable 的类型时,就需要手动实现这些 trait。这些 trait 不需要我们实现任何函数,只需像下面这样来让代码工作:

rust
unsafe impl bytemuck::Pod for Vertex {}
-unsafe impl bytemuck::Zeroable for Vertex {}

最终,我们可以把 vertex_buffer 添加到 State 结构体中了:

rust
Self {
-    surface,
-    device,
-    queue,
-    config,
-    size,
-    render_pipeline,
-    vertex_buffer,
-}

接下来怎么做?

我们需要告诉 render_pipeline 在绘制时使用这个缓冲区,但首先需要告诉它如何读取此缓冲区。顶点缓冲区布局(VertexBufferLayout)对象和 vertex_buffers 字段可以用来完成这件事,我保证在创建 render_pipeline 时会详细讨论这个问题。

顶点缓冲区布局对象定义了缓冲区在内存中的表示方式,render_pipeline 需要它来在着色器中映射缓冲区。下面是填充了顶点的一个缓冲区的布局:

rust
wgpu::VertexBufferLayout {
-    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress, // 1.
-    step_mode: wgpu::VertexStepMode::Vertex, // 2.
-    attributes: &[ // 3.
-        wgpu::VertexAttribute {
-            offset: 0, // 4.
-            shader_location: 0, // 5.
-            format: wgpu::VertexFormat::Float32x3, // 6.
-        },
-        wgpu::VertexAttribute {
-            offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-            shader_location: 1,
-            format: wgpu::VertexFormat::Float32x3,
-        }
-    ]
-}
  1. array_stride 定义了一个顶点所占的字节数。当着色器读取下一个顶点时,它将跳过 array_stride 的字节数。在我们的例子中,array_stride 是 24 个字节。
  2. step_mode 告诉管线此缓冲区中的数组数据中的每个元素代表的是每个顶点还是每个实例的数据,如果只想在开始绘制一个新实例时改变顶点,就可以设置为 wgpu::VertexStepMode::Instance。在后面的教程里我们会讲解实例化绘制。
  3. attributes 描述顶点的各个属性(Attribute)的布局。一般来说,这与结构体的字段是 1:1 映射的,在我们的案例中也是如此。
  4. offset 定义了属性在一个顶点元素中的字节偏移量。对于第一个属性,偏移量通常为零。其后属性的偏移量应为在其之前各属性的 size_of 之和。
  5. shader_location 告诉着色器要在什么位置存储这个属性。例如 @location(0) x: vec3f 在顶点着色器中对应于 Vertex 结构体的 position 字段,而 @location(1) x: vec3f 对应 color 字段。
  6. format 告诉着色器该属性的数据格式。Float32x3对应于着色器代码中的 vec3f。我们可以在一个属性中存储的最大值是Float32x4Uint32x4Sint32x4 也可以)。当我们需要存储比 Float32x4 更大的东西时请记住这一点。

对于视觉学习者来说,我们的顶点缓冲区看起来是这样的:

A figure of the VertexBufferLayout

让我们在 Vertex 上创建一个静态函数来返回此布局对象:

rust
// lib.rs
-impl Vertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        wgpu::VertexBufferLayout {
-            array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    shader_location: 0,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-                    shader_location: 1,
-                    format: wgpu::VertexFormat::Float32x3,
-                }
-            ]
-        }
-    }
-}

像上边那样指定属性是非常冗长的。我们可以使用 wgpu 提供的 vertex_attr_array 宏来清理一下。现在 VertexBufferLayout 变成了这样:

rust
wgpu::VertexBufferLayout {
-    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
-    step_mode: wgpu::VertexStepMode::Vertex,
-    attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3],
-}

这无疑很棒,但 Rust 认为 vertex_attr_array 的结果是一个临时值,所以需要进行调整才能从一个函数中返回。我们可以将wgpu::VertexBufferLayout 的生命周期改为 'static,或者使其成为 const。示例如下:

rust
impl Vertex {
-    const ATTRIBS: [wgpu::VertexAttribute; 2] =
-        wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3];
+    // ...
+}

接着在 new() 函数中创建顶点缓冲区:

rust
// new()
+let vertex_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Vertex Buffer"),
+        contents: bytemuck::cast_slice(VERTICES),
+        usage: wgpu::BufferUsages::VERTEX,
+    }
+);

为了访问 wgpu::Device 上的 create_buffer_init 方法,我们须导入 DeviceExt 扩展 trait。关于扩展 trait 的更多信息,请查看这篇文章

要导入扩展 trait,只需在 lib.rs 的顶部放上这一行:

rust
use wgpu::util::DeviceExt;

你应该注意到我们使用了 bytemuck 来将 VERTICES 转换为 &[u8]create_buffer_init() 函数的参数类型是 &[u8],而 bytemuck::cast_slice 为我们实现了此类型转换。为此需在 Cargo.toml 中添加以下依赖项:

toml
bytemuck = { version = "1.14", features = [ "derive" ] }

我们还需要实现两个 trait 来使 bytemuck 工作。它们是 bytemuck::Podbytemuck::ZeroablePod 表示 Vertex"Plain Old Data" 数据类型,因此可以被解释为 &[u8] 类型。Zeroable 表示可以对其使用 std::mem::zeroed()。下面修改 Vertex 结构体来派生这些 trait:

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+struct Vertex {
+    position: [f32; 3],
+    color: [f32; 3],
+}

结构体里包含了没有实现 PodZeroable 的类型时,就需要手动实现这些 trait。这些 trait 不需要我们实现任何函数,只需像下面这样来让代码工作:

rust
unsafe impl bytemuck::Pod for Vertex {}
+unsafe impl bytemuck::Zeroable for Vertex {}

最终,我们可以把 vertex_buffer 添加到 State 结构体中了:

rust
Self {
+    surface,
+    device,
+    queue,
+    config,
+    size,
+    render_pipeline,
+    vertex_buffer,
+}

接下来怎么做?

我们需要告诉 render_pipeline 在绘制时使用这个缓冲区,但首先需要告诉它如何读取此缓冲区。顶点缓冲区布局(VertexBufferLayout)对象和 vertex_buffers 字段可以用来完成这件事,我保证在创建 render_pipeline 时会详细讨论这个问题。

顶点缓冲区布局对象定义了缓冲区在内存中的表示方式,render_pipeline 需要它来在着色器中映射缓冲区。下面是填充了顶点的一个缓冲区的布局:

rust
wgpu::VertexBufferLayout {
+    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress, // 1.
+    step_mode: wgpu::VertexStepMode::Vertex, // 2.
+    attributes: &[ // 3.
+        wgpu::VertexAttribute {
+            offset: 0, // 4.
+            shader_location: 0, // 5.
+            format: wgpu::VertexFormat::Float32x3, // 6.
+        },
+        wgpu::VertexAttribute {
+            offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+            shader_location: 1,
+            format: wgpu::VertexFormat::Float32x3,
+        }
+    ]
+}
  1. array_stride 定义了一个顶点所占的字节数。当着色器读取下一个顶点时,它将跳过 array_stride 的字节数。在我们的例子中,array_stride 是 24 个字节。
  2. step_mode 告诉管线此缓冲区中的数组数据中的每个元素代表的是每个顶点还是每个实例的数据,如果只想在开始绘制一个新实例时改变顶点,就可以设置为 wgpu::VertexStepMode::Instance。在后面的教程里我们会讲解实例化绘制。
  3. attributes 描述顶点的各个属性(Attribute)的布局。一般来说,这与结构体的字段是 1:1 映射的,在我们的案例中也是如此。
  4. offset 定义了属性在一个顶点元素中的字节偏移量。对于第一个属性,偏移量通常为零。其后属性的偏移量应为在其之前各属性的 size_of 之和。
  5. shader_location 告诉着色器要在什么位置存储这个属性。例如 @location(0) x: vec3f 在顶点着色器中对应于 Vertex 结构体的 position 字段,而 @location(1) x: vec3f 对应 color 字段。
  6. format 告诉着色器该属性的数据格式。Float32x3对应于着色器代码中的 vec3f。我们可以在一个属性中存储的最大值是Float32x4Uint32x4Sint32x4 也可以)。当我们需要存储比 Float32x4 更大的东西时请记住这一点。

对于视觉学习者来说,我们的顶点缓冲区看起来是这样的:

A figure of the VertexBufferLayout

让我们在 Vertex 上创建一个静态函数来返回此布局对象:

rust
// lib.rs
+impl Vertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        wgpu::VertexBufferLayout {
+            array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    shader_location: 0,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+                    shader_location: 1,
+                    format: wgpu::VertexFormat::Float32x3,
+                }
+            ]
+        }
+    }
+}

像上边那样指定属性是非常冗长的。我们可以使用 wgpu 提供的 vertex_attr_array 宏来清理一下。现在 VertexBufferLayout 变成了这样:

rust
wgpu::VertexBufferLayout {
+    array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
+    step_mode: wgpu::VertexStepMode::Vertex,
+    attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3],
+}

这无疑很棒,但 Rust 认为 vertex_attr_array 的结果是一个临时值,所以需要进行调整才能从一个函数中返回。我们可以将wgpu::VertexBufferLayout 的生命周期改为 'static,或者使其成为 const。示例如下:

rust
impl Vertex {
+    const ATTRIBS: [wgpu::VertexAttribute; 2] =
+        wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3];
 
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
 
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<Self>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &Self::ATTRIBS,
-        }
-    }
-}

不管怎么说,我觉得展示数据如何被映射是件好事,所以暂时不会使用这个宏。

现在我们可以在创建 render_pipeline 时使用它了:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    vertex: wgpu::VertexState {
-        // ...
-        buffers: &[
-            Vertex::desc(),
-        ],
-    },
-    // ...
-});

还需要在渲染函数中实际设置顶点缓冲区,否则程序会崩溃。

rust
// render()
-render_pass.set_pipeline(&self.render_pipeline);
-// 新添加!
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.draw(0..3, 0..1);

set_vertex_buffer 函数接收两个参数,第一个参数是顶点缓冲区要使用的缓冲槽索引。你可以连续设置多个顶点缓冲区。

第二个参数是要使用的缓冲区的数据片断。你可以在硬件允许的情况下在一个缓冲区中存储尽可能多的对象,所以 slice 允许我们指定使用缓冲区的哪一部分。我们用 .. 来指定整个缓冲区。

在继续之前,我们需要修改 render_pass.draw() 的调用来使用 VERTICES 所指定的顶点数量。在 State 中添加一个num_vertices,令其值等于 VERTICES.len()

rust
// lib.rs
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<Self>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &Self::ATTRIBS,
+        }
+    }
+}

不管怎么说,我觉得展示数据如何被映射是件好事,所以暂时不会使用这个宏。

现在我们可以在创建 render_pipeline 时使用它了:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    vertex: wgpu::VertexState {
+        // ...
+        buffers: &[
+            Vertex::desc(),
+        ],
+    },
+    // ...
+});

还需要在渲染函数中实际设置顶点缓冲区,否则程序会崩溃。

rust
// render()
+render_pass.set_pipeline(&self.render_pipeline);
+// 新添加!
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.draw(0..3, 0..1);

set_vertex_buffer 函数接收两个参数,第一个参数是顶点缓冲区要使用的缓冲槽索引。你可以连续设置多个顶点缓冲区。

第二个参数是要使用的缓冲区的数据片断。你可以在硬件允许的情况下在一个缓冲区中存储尽可能多的对象,所以 slice 允许我们指定使用缓冲区的哪一部分。我们用 .. 来指定整个缓冲区。

在继续之前,我们需要修改 render_pass.draw() 的调用来使用 VERTICES 所指定的顶点数量。在 State 中添加一个num_vertices,令其值等于 VERTICES.len()

rust
// lib.rs
 
-struct State {
-    // ...
-    num_vertices: u32,
-}
+struct State {
+    // ...
+    num_vertices: u32,
+}
 
-impl State {
-    // ...
-    fn new(...) -> Self {
-        // ...
-        let num_vertices = VERTICES.len() as u32;
+impl State {
+    // ...
+    fn new(...) -> Self {
+        // ...
+        let num_vertices = VERTICES.len() as u32;
 
-        Self {
-            surface,
-            device,
-            queue,
-            config,
-            render_pipeline,
-            vertex_buffer,
-            num_vertices,
-            size,
-        }
-    }
-}

然后在绘制命令中使用它:

rust
// render
-render_pass.draw(0..self.num_vertices, 0..1);

在上面的修改生效之前,还需要更新着色器,以便从顶点缓冲区中获取数据。

rust
// 顶点着色器
+        Self {
+            surface,
+            device,
+            queue,
+            config,
+            render_pipeline,
+            vertex_buffer,
+            num_vertices,
+            size,
+        }
+    }
+}

然后在绘制命令中使用它:

rust
// render
+render_pass.draw(0..self.num_vertices, 0..1);

在上面的修改生效之前,还需要更新着色器,以便从顶点缓冲区中获取数据。

rust
// 顶点着色器
 
-struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) color: vec3f,
-};
+struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) color: vec3f,
+};
 
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) color: vec3f,
-};
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) color: vec3f,
+};
 
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    var out: VertexOutput;
-    out.color = model.color;
-    out.clip_position = vec4f(model.position, 1.0);
-    return out;
-}
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    var out: VertexOutput;
+    out.color = model.color;
+    out.clip_position = vec4f(model.position, 1.0);
+    return out;
+}
 
-// 片元着色器
+// 片元着色器
 
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(in.color, 1.0);
-}

如果做的正确无误,运行程序应该就能看到一个下边这样的三角形:

A colorful triangle

索引缓冲区

从技术的角度来看,目前的示例并不需要索引缓冲区,但它们仍然很有用。当开始使用有大量三角形的模型时,索引缓冲区就会发挥作用。考虑一下下边的五边形:

A pentagon made of 3 triangles

它总共有 5 个顶点和 3 个三角形。现在,如果我们想只用顶点来显示这样的东西,我们就需要以下顶点数据:

rust
const VERTICES: &[Vertex] = &[
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(in.color, 1.0);
+}

如果做的正确无误,运行程序应该就能看到一个下边这样的三角形:

A colorful triangle

索引缓冲区

从技术的角度来看,目前的示例并不需要索引缓冲区,但它们仍然很有用。当开始使用有大量三角形的模型时,索引缓冲区就会发挥作用。考虑一下下边的五边形:

A pentagon made of 3 triangles

它总共有 5 个顶点和 3 个三角形。现在,如果我们想只用顶点来显示这样的东西,我们就需要以下顶点数据:

rust
const VERTICES: &[Vertex] = &[
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
 
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
 
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
-];

你会注意到有些顶点被使用了不止一次。C 和 B 顶点被使用了两次,而 E 顶点被重复使用了 3 次。假设每个浮点数是 4 个字节,那么这意味着在我们用于 VERTICES 的 216 个字节中,有 96 个字节是重复的数据。如果能只把这些顶点列出来一次不是很好吗?我们可以做到这一点!

这,就是索引缓冲区发挥作用的地方。

大体上来说,我们在 VERTICES 中存储所有唯一的顶点,我们创建另一个缓冲区,将索引存储在 VERTICES 中的元素以创建三角形。下面还是以五边形为例:

rust
// lib.rs
-const VERTICES: &[Vertex] = &[
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
-];
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+];

你会注意到有些顶点被使用了不止一次。C 和 B 顶点被使用了两次,而 E 顶点被重复使用了 3 次。假设每个浮点数是 4 个字节,那么这意味着在我们用于 VERTICES 的 216 个字节中,有 96 个字节是重复的数据。如果能只把这些顶点列出来一次不是很好吗?我们可以做到这一点!

这,就是索引缓冲区发挥作用的地方。

大体上来说,我们在 VERTICES 中存储所有唯一的顶点,我们创建另一个缓冲区,将索引存储在 VERTICES 中的元素以创建三角形。下面还是以五边形为例:

rust
// lib.rs
+const VERTICES: &[Vertex] = &[
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], color: [0.5, 0.0, 0.5] }, // E
+];
 
-const INDICES: &[u16] = &[
-    0, 1, 4,
-    1, 2, 4,
-    2, 3, 4,
-];

现在这种设置下,VERTICES 占用了 120 个字节,而 INDICES 只有 18 个字节,因为 u16 类型是 2 个字节长。在这种情况下,wgpu 会自动增加 2 个字节的填充,以确保缓冲区被对齐到 4 个字节,但它仍然只有 20 个字节。五边形总共是 140 字节,这意味着我们节省了 76 个字节! 这可能看起来不多,但当处理数十万的三角形时,索引可以节省大量的内存。

为了使用索引,有几处我们需要修改。首先需要创建一个缓冲区来存储索引。在 Statenew() 函数中,创建了 vertex_buffer 之后创建 index_buffer。同时将 num_vertices 改为num_indices,令其值等于 INDICES.len()

rust
let vertex_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Vertex Buffer"),
-        contents: bytemuck::cast_slice(VERTICES),
-        usage: wgpu::BufferUsages::VERTEX,
-    }
-);
-// 新添加!
-let index_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Index Buffer"),
-        contents: bytemuck::cast_slice(INDICES),
-        usage: wgpu::BufferUsages::INDEX,
-    }
-);
-let num_indices = INDICES.len() as u32;

我们不需要为索引实现 PodZeroable,因为 bytemuck 已经为 u16 等基本类型实现了它们。只需将 index_buffernum_indices 添加到 State 结构体中。

rust
struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-    render_pipeline: wgpu::RenderPipeline,
-    vertex_buffer: wgpu::Buffer,
-    // 新添加!
-    index_buffer: wgpu::Buffer,
-    num_indices: u32,
-}

然后在构造函数中填充这些字段:

rust
Self {
-    surface,
-    device,
-    queue,
-    config,
-    size,
-    render_pipeline,
-    vertex_buffer,
-    // 新添加!
-    index_buffer,
-    num_indices,
-}

我们现在所要做的就是更新 render() 函数来使用 index_buffer

rust
// render()
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); // 1.
-render_pass.draw_indexed(0..self.num_indices, 0, 0..1); // 2.

有几点需要注意:

  1. 命令名称是 set_index_buffer 而不是 set_index_buffers, 一次绘制(draw_XXX())只能设置一个索引缓冲区。但是,你可以在一个渲染通道内调用多次绘制,每次都设置不同的索引缓冲区。
  2. 当使用索引缓冲区时,需使用 draw_indexed 来绘制,draw 命令会忽略索引缓冲区。还需确保你使用的是索引数(num_indices)而非顶点数,否则你的模型要么画错,要么因为没有足够的索引数而导致程序恐慌(panic)。

完成这些后,运行程序应该就能看到窗口里有一个洋红色的五边形了:

Magenta pentagon in window

颜色校正

如果在洋红色五角星上使用取色器,你会得到一个 #BC00BC 的十六进制值。如果把它转换成 RGB 值会得到(188, 0, 188),将这些值除以 255 使其映射进 [0,1] 范围,大致会得到(0.737254902,0,0.737254902)。这与我们赋给顶点颜色的值不同,后者是(0.5, 0.0, 0.5)。其原因与色彩空间(Color Space)有关。

大多数显示器使用的色彩空间被称为 sRGB(事实上,目前市面上的中高端显示器已经支持 DisplayP3 甚至是 BT.2100 等广色域色彩空间,macOS 与 iOS 设备默认使用的就是 DisplayP3 色彩空间)。我们的展示平面(完全取决于从 surface.get_capabilities(&adapter).formats 返回的格式)默认支持 sRGB 纹理格式。sRGB 格式是根据颜色的相对亮度而不是实际亮度来存储的。其原因是人眼对光线的感知不是线性的。我们注意到较深的颜色比较浅的颜色有更多差异。

可以用下面的公式得到一个正确颜色的近似值。srgb_color = (rgb_color / 255) ^ 2.2。在 RGB 值为 (188, 0, 188) 的情况下,我们将得到 (0.511397819, 0.0, 0.511397819)。与我们的(0.5, 0.0, 0.5)有点偏差。虽然你可以通过调整公式来获得所需的数值,但使用纹理可能会节省很多时间,因为它们默认是以 sRGB 方式存储的,所以不会像顶点颜色那样出现颜色不准确的情况。我们会在下一课中介绍纹理。

假如你的显示设备使用的是 DisplayP3 或 BT.2100 等广色域色彩空间,那么当你使用取色器检查屏幕上的渲染结果时,拾取到的色值将与着色器内的返回值不一致。

这是因为目前 WebGPU 仅支持较小色域的 sRGB 色彩空间,而硬件会执行色彩空间转换(color space conversion)将 sRGB 色值映射到更广的色域来显示到屏幕上,因此,使用取色器拾取到的色值是经过转换后的值。

挑战

使用顶点缓冲区和索引缓冲区创建一个比教程里做的更复杂的形状(也就是三个以上的三角形),并用空格键在两者之间切换。

- +const INDICES: &[u16] = &[ + 0, 1, 4, + 1, 2, 4, + 2, 3, 4, +];

现在这种设置下,VERTICES 占用了 120 个字节,而 INDICES 只有 18 个字节,因为 u16 类型是 2 个字节长。在这种情况下,wgpu 会自动增加 2 个字节的填充,以确保缓冲区被对齐到 4 个字节,但它仍然只有 20 个字节。五边形总共是 140 字节,这意味着我们节省了 76 个字节! 这可能看起来不多,但当处理数十万的三角形时,索引可以节省大量的内存。

为了使用索引,有几处我们需要修改。首先需要创建一个缓冲区来存储索引。在 Statenew() 函数中,创建了 vertex_buffer 之后创建 index_buffer。同时将 num_vertices 改为num_indices,令其值等于 INDICES.len()

rust
let vertex_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Vertex Buffer"),
+        contents: bytemuck::cast_slice(VERTICES),
+        usage: wgpu::BufferUsages::VERTEX,
+    }
+);
+// 新添加!
+let index_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Index Buffer"),
+        contents: bytemuck::cast_slice(INDICES),
+        usage: wgpu::BufferUsages::INDEX,
+    }
+);
+let num_indices = INDICES.len() as u32;

我们不需要为索引实现 PodZeroable,因为 bytemuck 已经为 u16 等基本类型实现了它们。只需将 index_buffernum_indices 添加到 State 结构体中。

rust
struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+    render_pipeline: wgpu::RenderPipeline,
+    vertex_buffer: wgpu::Buffer,
+    // 新添加!
+    index_buffer: wgpu::Buffer,
+    num_indices: u32,
+}

然后在构造函数中填充这些字段:

rust
Self {
+    surface,
+    device,
+    queue,
+    config,
+    size,
+    render_pipeline,
+    vertex_buffer,
+    // 新添加!
+    index_buffer,
+    num_indices,
+}

我们现在所要做的就是更新 render() 函数来使用 index_buffer

rust
// render()
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); // 1.
+render_pass.draw_indexed(0..self.num_indices, 0, 0..1); // 2.

有几点需要注意:

  1. 命令名称是 set_index_buffer 而不是 set_index_buffers, 一次绘制(draw_XXX())只能设置一个索引缓冲区。但是,你可以在一个渲染通道内调用多次绘制,每次都设置不同的索引缓冲区。
  2. 当使用索引缓冲区时,需使用 draw_indexed 来绘制,draw 命令会忽略索引缓冲区。还需确保你使用的是索引数(num_indices)而非顶点数,否则你的模型要么画错,要么因为没有足够的索引数而导致程序恐慌(panic)。

完成这些后,运行程序应该就能看到窗口里有一个洋红色的五边形了:

Magenta pentagon in window

颜色校正

如果在洋红色五角星上使用取色器,你会得到一个 #BC00BC 的十六进制值。如果把它转换成 RGB 值会得到(188, 0, 188),将这些值除以 255 使其映射进 [0,1] 范围,大致会得到(0.737254902,0,0.737254902)。这与我们赋给顶点颜色的值不同,后者是(0.5, 0.0, 0.5)。其原因与色彩空间(Color Space)有关。

大多数显示器使用的色彩空间被称为 sRGB(事实上,目前市面上的中高端显示器已经支持 DisplayP3 甚至是 BT.2100 等广色域色彩空间,macOS 与 iOS 设备默认使用的就是 DisplayP3 色彩空间)。我们的展示平面(完全取决于从 surface.get_capabilities(&adapter).formats 返回的格式)默认支持 sRGB 纹理格式。sRGB 格式是根据颜色的相对亮度而不是实际亮度来存储的。其原因是人眼对光线的感知不是线性的。我们注意到较深的颜色比较浅的颜色有更多差异。

可以用下面的公式得到一个正确颜色的近似值。srgb_color = (rgb_color / 255) ^ 2.2。在 RGB 值为 (188, 0, 188) 的情况下,我们将得到 (0.511397819, 0.0, 0.511397819)。与我们的(0.5, 0.0, 0.5)有点偏差。虽然你可以通过调整公式来获得所需的数值,但使用纹理可能会节省很多时间,因为它们默认是以 sRGB 方式存储的,所以不会像顶点颜色那样出现颜色不准确的情况。我们会在下一课中介绍纹理。

假如你的显示设备使用的是 DisplayP3 或 BT.2100 等广色域色彩空间,那么当你使用取色器检查屏幕上的渲染结果时,拾取到的色值将与着色器内的返回值不一致。

这是因为目前 WebGPU 仅支持较小色域的 sRGB 色彩空间,而硬件会执行色彩空间转换(color space conversion)将 sRGB 色值映射到更广的色域来显示到屏幕上,因此,使用取色器拾取到的色值是经过转换后的值。

挑战

使用顶点缓冲区和索引缓冲区创建一个比教程里做的更复杂的形状(也就是三个以上的三角形),并用空格键在两者之间切换。

+ \ No newline at end of file diff --git a/beginner/tutorial5-textures/index.html b/beginner/tutorial5-textures/index.html index ec36dd4e1..e555cfcbc 100644 --- a/beginner/tutorial5-textures/index.html +++ b/beginner/tutorial5-textures/index.html @@ -5,369 +5,371 @@ 纹理和绑定组 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

纹理和绑定组

目前为止,我们一直在绘制简单的图形。当然可以只用三角形来做游戏,而试图绘制高精度的对象又会极大地限制能运行我们游戏的设备。不过,可以用 纹理 来解决此问题。

纹理(Textures)是叠加在三角形网格(Mesh)上的图像,使其看起来有丰富的细节。有多种类型的纹理,如法线贴图(Normal Maps,也就是法线纹理)、凹凸贴图(Bump Maps)、镜面贴图和漫反射贴图。下边将讨论漫反射贴图,简单来说也就是颜色纹理。

加载图像文件

要把一个图像映射到对象网格上,首先是需要有一个图像文件。就使用下边这棵快乐的小树吧:

一棵快乐的树

我们将使用 image 包 来加载这棵树。先把它添加到依赖项中:

toml
[dependencies.image]
-version = "0.24"
-default-features = false
-features = ["png", "jpeg"]

image 包含的 jpeg 解码器使用 rayon 来加速线程的解码速度。WASM 目前不支持线程,所以我们需要禁用这一特性,这样代码在尝试加载网络上的 jpeg 时就不会崩溃。

在 WASM 中解码 jpeg 性能不高。如果你想在 WASM 中加快图像加载速度,可以选择使用浏览器的内置解码器来替换 wasm-bindgen 构建时使用 的 image。这涉及到在 Rust 中创建一个 <img> 标记来获取图像,然后创建一个 <canvas> 来获取像素数据,我把这留作读者的练习。

Statenew() 函数中,于 surface.configure() 之后添加以下代码:

rust
surface.configure(&device, &config);
-// 新添加!
+    
Skip to content

纹理和绑定组

目前为止,我们一直在绘制简单的图形。当然可以只用三角形来做游戏,而试图绘制高精度的对象又会极大地限制能运行我们游戏的设备。不过,可以用 纹理 来解决此问题。

纹理(Textures)是叠加在三角形网格(Mesh)上的图像,使其看起来有丰富的细节。有多种类型的纹理,如法线贴图(Normal Maps,也就是法线纹理)、凹凸贴图(Bump Maps)、镜面贴图和漫反射贴图。下边将讨论漫反射贴图,简单来说也就是颜色纹理。

加载图像文件

要把一个图像映射到对象网格上,首先是需要有一个图像文件。就使用下边这棵快乐的小树吧:

一棵快乐的树

我们将使用 image 包 来加载这棵树。先把它添加到依赖项中:

toml
[dependencies.image]
+version = "0.24"
+default-features = false
+features = ["png", "jpeg"]

image 包含的 jpeg 解码器使用 rayon 来加速线程的解码速度。WASM 目前不支持线程,所以我们需要禁用这一特性,这样代码在尝试加载网络上的 jpeg 时就不会崩溃。

在 WASM 中解码 jpeg 性能不高。如果你想在 WASM 中加快图像加载速度,可以选择使用浏览器的内置解码器来替换 wasm-bindgen 构建时使用 的 image。这涉及到在 Rust 中创建一个 <img> 标记来获取图像,然后创建一个 <canvas> 来获取像素数据,我把这留作读者的练习。

Statenew() 函数中,于 surface.configure() 之后添加以下代码:

rust
surface.configure(&device, &config);
+// 新添加!
 
-let diffuse_bytes = include_bytes!("happy-tree.png");
-let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
-let diffuse_rgba = diffuse_image.to_rgba8();
+let diffuse_bytes = include_bytes!("happy-tree.png");
+let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
+let diffuse_rgba = diffuse_image.to_rgba8();
 
-use image::GenericImageView;
-let dimensions = diffuse_image.dimensions();

此处代码从图像文件中读取字节,并将其加载到 image 对象中,然后转换为 rgba 动态数组。我们还保存了图像的尺寸信息以便在创建实际纹理时使用。

现在我们来创建纹理:

rust
let texture_size = wgpu::Extent3d {
-    width: dimensions.0,
-    height: dimensions.1,
-    depth_or_array_layers: 1,
-};
-let diffuse_texture = device.create_texture(
-    &wgpu::TextureDescriptor {
-        // 所有纹理都是以 3D 形式存储的,我们通过设置深度 1 来表示 2D 纹理
-        size: texture_size,
-        mip_level_count: 1, // 后面会详细介绍此字段
-        sample_count: 1,
-        dimension: wgpu::TextureDimension::D2,
-        // 大多数图像都是使用 sRGB 来存储的,我们需要在这里指定。
-        format: wgpu::TextureFormat::Rgba8UnormSrgb,
-        // TEXTURE_BINDING 表示我们要在着色器中使用这个纹理。
-        // COPY_DST 表示我们能将数据复制到这个纹理上。
-        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
-        label: Some("diffuse_texture"),
-        view_formats: &[],
-    }
-);

填充数据到纹理中

Texture 结构体没有函数可以直接与数据交互。但我们可以使用之前创建的命令队列上的 write_texture 命令来填充纹理数据。下边是具体代码:

rust
queue.write_texture(
-    // 告诉 wgpu 从何处复制像素数据
-    wgpu::ImageCopyTexture {
-        texture: &diffuse_texture,
-        mip_level: 0,
-        origin: wgpu::Origin3d::ZERO,
-        aspect: wgpu::TextureAspect::All,
-    },
-    // 实际像素数据
-    &diffuse_rgba,
-    // 纹理的内存布局
-    wgpu::ImageDataLayout {
-        offset: 0,
-        bytes_per_row: Some(4 * dimensions.0),
-        rows_per_image: Some(dimensions.1),
-    },
-    texture_size,
-);

填充纹理数据的经典方式是将像素数据先复制到一个缓冲区,然后再从缓冲区复制到纹理中。使用 write_texture 更有效率,因为它少用了一个缓冲区 -- 不过这里还是介绍一下,以防读者有需要:

rust
let buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Temp Buffer"),
-        contents: &diffuse_rgba,
-        usage: wgpu::BufferUsages::COPY_SRC,
-    }
-);
+use image::GenericImageView;
+let dimensions = diffuse_image.dimensions();

此处代码从图像文件中读取字节,并将其加载到 image 对象中,然后转换为 rgba 动态数组。我们还保存了图像的尺寸信息以便在创建实际纹理时使用。

现在我们来创建纹理:

rust
let texture_size = wgpu::Extent3d {
+    width: dimensions.0,
+    height: dimensions.1,
+    depth_or_array_layers: 1,
+};
+let diffuse_texture = device.create_texture(
+    &wgpu::TextureDescriptor {
+        // 所有纹理都是以 3D 形式存储的,我们通过设置深度 1 来表示 2D 纹理
+        size: texture_size,
+        mip_level_count: 1, // 后面会详细介绍此字段
+        sample_count: 1,
+        dimension: wgpu::TextureDimension::D2,
+        // 大多数图像都是使用 sRGB 来存储的,我们需要在这里指定。
+        format: wgpu::TextureFormat::Rgba8UnormSrgb,
+        // TEXTURE_BINDING 表示我们要在着色器中使用这个纹理。
+        // COPY_DST 表示我们能将数据复制到这个纹理上。
+        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
+        label: Some("diffuse_texture"),
+        view_formats: &[],
+    }
+);

填充数据到纹理中

Texture 结构体没有函数可以直接与数据交互。但我们可以使用之前创建的命令队列上的 write_texture 命令来填充纹理数据。下边是具体代码:

rust
queue.write_texture(
+    // 告诉 wgpu 从何处复制像素数据
+    wgpu::ImageCopyTexture {
+        texture: &diffuse_texture,
+        mip_level: 0,
+        origin: wgpu::Origin3d::ZERO,
+        aspect: wgpu::TextureAspect::All,
+    },
+    // 实际像素数据
+    &diffuse_rgba,
+    // 纹理的内存布局
+    wgpu::ImageDataLayout {
+        offset: 0,
+        bytes_per_row: Some(4 * dimensions.0),
+        rows_per_image: Some(dimensions.1),
+    },
+    texture_size,
+);

填充纹理数据的经典方式是将像素数据先复制到一个缓冲区,然后再从缓冲区复制到纹理中。使用 write_texture 更有效率,因为它少用了一个缓冲区 -- 不过这里还是介绍一下,以防读者有需要:

rust
let buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Temp Buffer"),
+        contents: &diffuse_rgba,
+        usage: wgpu::BufferUsages::COPY_SRC,
+    }
+);
 
-let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-    label: Some("texture_buffer_copy_encoder"),
-});
+let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+    label: Some("texture_buffer_copy_encoder"),
+});
 
-encoder.copy_buffer_to_texture(
-    wgpu::ImageCopyBuffer {
-        buffer: &buffer,
-        offset: 0,
-        bytes_per_row: 4 * dimensions.0,
-        rows_per_image: dimensions.1,
-    },
-    wgpu::ImageCopyTexture {
-        texture: &diffuse_texture,
-        mip_level: 0,
-        array_layer: 0,
-        origin: wgpu::Origin3d::ZERO,
-    },
-    size,
-);
+encoder.copy_buffer_to_texture(
+    wgpu::ImageCopyBuffer {
+        buffer: &buffer,
+        offset: 0,
+        bytes_per_row: 4 * dimensions.0,
+        rows_per_image: dimensions.1,
+    },
+    wgpu::ImageCopyTexture {
+        texture: &diffuse_texture,
+        mip_level: 0,
+        array_layer: 0,
+        origin: wgpu::Origin3d::ZERO,
+    },
+    size,
+);
 
-queue.submit(std::iter::once(encoder.finish()));

值得注意的是 bytes_per_row 字段,这个值需要是 256 的倍数。查看 gif 教程 以了解更多细节。

纹理视图与采样器

现在纹理中已经有了数据,我们需要一种方法来使用它。这,就是纹理视图TextureView)和采样器Sampler)的用处。

纹理视图描述纹理及其关联的元数据。采样器控制纹理如何被 采样。采样工作类似于 GIMP/Photoshop 中的滴管工具。我们的程序在纹理上提供一个坐标(被称为 纹理坐标 ),然后采样器根据纹理和一些内部参数返回相应的颜色。

现在我们来定义 diffuse_texture_viewdiffuse_sampler

rust
// 我们不需要过多地配置纹理视图,所以使用 wgpu 的默认值。
-let diffuse_texture_view = diffuse_texture.create_view(&wgpu::TextureViewDescriptor::default());
-let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
-    address_mode_u: wgpu::AddressMode::ClampToEdge,
-    address_mode_v: wgpu::AddressMode::ClampToEdge,
-    address_mode_w: wgpu::AddressMode::ClampToEdge,
-    mag_filter: wgpu::FilterMode::Linear,
-    min_filter: wgpu::FilterMode::Nearest,
-    mipmap_filter: wgpu::FilterMode::Nearest,
-    ..Default::default()
-});

address_mode_* 参数指定了如果采样器得到的纹理坐标超出了纹理边界时该如何处理。我们有几个选项可供选择:

  • ClampToEdge:任何在纹理外的纹理坐标将返回离纹理边缘最近的像素的颜色。
  • Repeat。当纹理坐标超过纹理的尺寸时,纹理将重复。
  • MirrorRepeat。类似于Repeat,但图像在越过边界时将翻转。

address_mode.png

mag_filtermin_filter 字段描述了当采样足迹小于或大于一个纹素(Texel)时该如何处理。当场景中的贴图远离或靠近 camera 时,这两个字段的设置通常会有效果。

有 2 个选项:

  • Linear:在每个维度中选择两个纹素,并在它们的值之间返回线性插值。
  • Nearest:返回离纹理坐标最近的纹素的值。这创造了一个从远处看比较清晰但近处有像素的图像。然而,如果你的纹理被设计成像素化的,比如像素艺术游戏,或者像 Minecraft 这样的体素游戏,这可能是符合预期的。

Mipmaps 是一个复杂的话题,需要在未来单独写一个章节。现在,我们可以说 mipmap_filter 的功能有点类似于 (mag/min)_filter,因为它告诉采样器如何在 mipmaps 之间混合。

其他字段使用了默认值。如果想了解字段详情,请查看 wgpu 文档

现在,我们需要用到 BindGroupPipelineLayout 来将所有这些不同的资源都接入。

绑定组

绑定组BindGroup)描述了一组资源以及如何通过着色器访问它们。我们先来创建一个绑定组布局BindGroupLayout):

rust
let texture_bind_group_layout =
-            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-                entries: &[
-                    wgpu::BindGroupLayoutEntry {
-                        binding: 0,
-                        visibility: wgpu::ShaderStages::FRAGMENT,
-                        ty: wgpu::BindingType::Texture {
-                            multisampled: false,
-                            view_dimension: wgpu::TextureViewDimension::D2,
-                            sample_type: wgpu::TextureSampleType::Float { filterable: true },
-                        },
-                        count: None,
-                    },
-                    wgpu::BindGroupLayoutEntry {
-                        binding: 1,
-                        visibility: wgpu::ShaderStages::FRAGMENT,
-                        // This should match the filterable field of the
-                        // corresponding Texture entry above.
-                        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
-                        count: None,
-                    },
-                ],
-                label: Some("texture_bind_group_layout"),
-            });

texture_bind_group_layout 有两个条目:一个是绑定到 0 资源槽的纹理,另一个是绑定到 1 资源槽的采样器。这两个绑定只对由 visibility 字段指定的片元着色器可见。这个字段的可选值是 NONEVERTEXFRAGMENTCOMPUTE 的任意按位或(|)组合。

现在使用绑定组布局texture_bind_group_layout)来创建绑定组:

rust
let diffuse_bind_group = device.create_bind_group(
-    &wgpu::BindGroupDescriptor {
-        layout: &texture_bind_group_layout,
-        entries: &[
-            wgpu::BindGroupEntry {
-                binding: 0,
-                resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
-            },
-            wgpu::BindGroupEntry {
-                binding: 1,
-                resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
-            }
-        ],
-        label: Some("diffuse_bind_group"),
-    }
-);

看着这个,你可能会有一点似曾相识的感觉! 这是因为绑定组绑定组布局的一个更具体的声明。它们分开的原因是,只要是共享同一个绑定组布局的绑定组,就能在运行时实时切换。创建的每个纹理和采样器都需要添加到一个绑定组中。为了达成目的,我们将为每个纹理创建一个新的绑定组。

让我们把 diffuse_bind_group 添加到 State 结构体中:

rust
struct State {
-    surface: wgpu::Surface,
-    device: wgpu::Device,
-    queue: wgpu::Queue,
-    config: wgpu::SurfaceConfiguration,
-    size: winit::dpi::PhysicalSize<u32>,
-    render_pipeline: wgpu::RenderPipeline,
-    vertex_buffer: wgpu::Buffer,
-    index_buffer: wgpu::Buffer,
-    num_indices: u32,
-    diffuse_bind_group: wgpu::BindGroup, // 新添加!
-}

确保我们在 new() 函数中返回这个字段:

rust
impl State {
-    async fn new() -> Self {
-        // ...
-        Self {
-            // ...
-            // 新添加!
-            diffuse_bind_group,
-        }
-    }
-}

现在,我们来在 render() 函数中使用绑定组:

rust
// render()
-// ...
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); // NEW!
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
+queue.submit(std::iter::once(encoder.finish()));

值得注意的是 bytes_per_row 字段,这个值需要是 256 的倍数。查看 gif 教程 以了解更多细节。

纹理视图与采样器

现在纹理中已经有了数据,我们需要一种方法来使用它。这,就是纹理视图TextureView)和采样器Sampler)的用处。

纹理视图描述纹理及其关联的元数据。采样器控制纹理如何被 采样。采样工作类似于 GIMP/Photoshop 中的滴管工具。我们的程序在纹理上提供一个坐标(被称为 纹理坐标 ),然后采样器根据纹理和一些内部参数返回相应的颜色。

现在我们来定义 diffuse_texture_viewdiffuse_sampler

rust
// 我们不需要过多地配置纹理视图,所以使用 wgpu 的默认值。
+let diffuse_texture_view = diffuse_texture.create_view(&wgpu::TextureViewDescriptor::default());
+let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
+    address_mode_u: wgpu::AddressMode::ClampToEdge,
+    address_mode_v: wgpu::AddressMode::ClampToEdge,
+    address_mode_w: wgpu::AddressMode::ClampToEdge,
+    mag_filter: wgpu::FilterMode::Linear,
+    min_filter: wgpu::FilterMode::Nearest,
+    mipmap_filter: wgpu::FilterMode::Nearest,
+    ..Default::default()
+});

address_mode_* 参数指定了如果采样器得到的纹理坐标超出了纹理边界时该如何处理。我们有几个选项可供选择:

  • ClampToEdge:任何在纹理外的纹理坐标将返回离纹理边缘最近的像素的颜色。
  • Repeat。当纹理坐标超过纹理的尺寸时,纹理将重复。
  • MirrorRepeat。类似于Repeat,但图像在越过边界时将翻转。

address_mode.png

mag_filtermin_filter 字段描述了当采样足迹小于或大于一个纹素(Texel)时该如何处理。当场景中的贴图远离或靠近 camera 时,这两个字段的设置通常会有效果。

有 2 个选项:

  • Linear:在每个维度中选择两个纹素,并在它们的值之间返回线性插值。
  • Nearest:返回离纹理坐标最近的纹素的值。这创造了一个从远处看比较清晰但近处有像素的图像。然而,如果你的纹理被设计成像素化的,比如像素艺术游戏,或者像 Minecraft 这样的体素游戏,这可能是符合预期的。

Mipmaps 是一个复杂的话题,需要在未来单独写一个章节。现在,我们可以说 mipmap_filter 的功能有点类似于 (mag/min)_filter,因为它告诉采样器如何在 mipmaps 之间混合。

其他字段使用了默认值。如果想了解字段详情,请查看 wgpu 文档

现在,我们需要用到 BindGroupPipelineLayout 来将所有这些不同的资源都接入。

绑定组

绑定组BindGroup)描述了一组资源以及如何通过着色器访问它们。我们先来创建一个绑定组布局BindGroupLayout):

rust
let texture_bind_group_layout =
+            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+                entries: &[
+                    wgpu::BindGroupLayoutEntry {
+                        binding: 0,
+                        visibility: wgpu::ShaderStages::FRAGMENT,
+                        ty: wgpu::BindingType::Texture {
+                            multisampled: false,
+                            view_dimension: wgpu::TextureViewDimension::D2,
+                            sample_type: wgpu::TextureSampleType::Float { filterable: true },
+                        },
+                        count: None,
+                    },
+                    wgpu::BindGroupLayoutEntry {
+                        binding: 1,
+                        visibility: wgpu::ShaderStages::FRAGMENT,
+                        // This should match the filterable field of the
+                        // corresponding Texture entry above.
+                        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
+                        count: None,
+                    },
+                ],
+                label: Some("texture_bind_group_layout"),
+            });

texture_bind_group_layout 有两个条目:一个是绑定到 0 资源槽的纹理,另一个是绑定到 1 资源槽的采样器。这两个绑定只对由 visibility 字段指定的片元着色器可见。这个字段的可选值是 NONEVERTEXFRAGMENTCOMPUTE 的任意按位或(|)组合。

现在使用绑定组布局texture_bind_group_layout)来创建绑定组:

rust
let diffuse_bind_group = device.create_bind_group(
+    &wgpu::BindGroupDescriptor {
+        layout: &texture_bind_group_layout,
+        entries: &[
+            wgpu::BindGroupEntry {
+                binding: 0,
+                resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
+            },
+            wgpu::BindGroupEntry {
+                binding: 1,
+                resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
+            }
+        ],
+        label: Some("diffuse_bind_group"),
+    }
+);

看着这个,你可能会有一点似曾相识的感觉! 这是因为绑定组绑定组布局的一个更具体的声明。它们分开的原因是,只要是共享同一个绑定组布局的绑定组,就能在运行时实时切换。创建的每个纹理和采样器都需要添加到一个绑定组中。为了达成目的,我们将为每个纹理创建一个新的绑定组。

让我们把 diffuse_bind_group 添加到 State 结构体中:

rust
struct State {
+    surface: wgpu::Surface,
+    device: wgpu::Device,
+    queue: wgpu::Queue,
+    config: wgpu::SurfaceConfiguration,
+    size: winit::dpi::PhysicalSize<u32>,
+    render_pipeline: wgpu::RenderPipeline,
+    vertex_buffer: wgpu::Buffer,
+    index_buffer: wgpu::Buffer,
+    num_indices: u32,
+    diffuse_bind_group: wgpu::BindGroup, // 新添加!
+}

确保我们在 new() 函数中返回这个字段:

rust
impl State {
+    async fn new() -> Self {
+        // ...
+        Self {
+            // ...
+            // 新添加!
+            diffuse_bind_group,
+        }
+    }
+}

现在,我们来在 render() 函数中使用绑定组:

rust
// render()
+// ...
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); // NEW!
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
 
-render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

管线布局

还记得在管线章节创建的管线布局PipelineLayout)吗?现在我们终于可以使用它了! 管线布局包含一个管线可以使用的绑定组布局的列表。修改 render_pipeline_layout 以使用 texture_bind_group_layout

rust
async fn new(...) {
-    // ...
-    let render_pipeline_layout = device.create_pipeline_layout(
-        &wgpu::PipelineLayoutDescriptor {
-            label: Some("Render Pipeline Layout"),
-            bind_group_layouts: &[&texture_bind_group_layout], // 新添加!
-            push_constant_ranges: &[],
-        }
-    );
-    // ...
-}

修改 VERTICES 常量

对于 Vertex 的定义有几处需要修改。到目前为止,我们一直在使用 color 字段来设置网格颜色。现在我们要用 tex_coords 代替 color,这些坐标会被传递给采样器以获取纹素(Texel)的颜色。

由于 tex_coords 是二维的,需要修改这个字段的类型为两个浮点数的数组。

先来修改 Vertex 结构体:

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-struct Vertex {
-    position: [f32; 3],
-    tex_coords: [f32; 2], // 新添加!
-}

然后在 VertexBufferLayout 中反映这些变化:

rust
impl Vertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    shader_location: 0,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-                    shader_location: 1,
-                    format: wgpu::VertexFormat::Float32x2, // NEW!
-                },
-            ]
-        }
-    }
-}

最后,需要修改 VERTICES,用以下代码替换现有的定义:

rust
// Changed
-const VERTICES: &[Vertex] = &[
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.99240386], }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.56958647], }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.05060294], }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.1526709], }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.7347359], }, // E
-];

修改着色器

有了新的 Vertex 结构体,现在是时候更新着色器了。首先需要将 tex_coords 传递给顶点着色器,然后将它们用于片元着色器,以便从采样器获得最终的颜色。让我们从顶点着色器开始:

rust
// 顶点着色器
+render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

管线布局

还记得在管线章节创建的管线布局PipelineLayout)吗?现在我们终于可以使用它了! 管线布局包含一个管线可以使用的绑定组布局的列表。修改 render_pipeline_layout 以使用 texture_bind_group_layout

rust
async fn new(...) {
+    // ...
+    let render_pipeline_layout = device.create_pipeline_layout(
+        &wgpu::PipelineLayoutDescriptor {
+            label: Some("Render Pipeline Layout"),
+            bind_group_layouts: &[&texture_bind_group_layout], // 新添加!
+            push_constant_ranges: &[],
+        }
+    );
+    // ...
+}

修改 VERTICES 常量

对于 Vertex 的定义有几处需要修改。到目前为止,我们一直在使用 color 字段来设置网格颜色。现在我们要用 tex_coords 代替 color,这些坐标会被传递给采样器以获取纹素(Texel)的颜色。

由于 tex_coords 是二维的,需要修改这个字段的类型为两个浮点数的数组。

先来修改 Vertex 结构体:

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+struct Vertex {
+    position: [f32; 3],
+    tex_coords: [f32; 2], // 新添加!
+}

然后在 VertexBufferLayout 中反映这些变化:

rust
impl Vertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    shader_location: 0,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+                    shader_location: 1,
+                    format: wgpu::VertexFormat::Float32x2, // NEW!
+                },
+            ]
+        }
+    }
+}

最后,需要修改 VERTICES,用以下代码替换现有的定义:

rust
// Changed
+const VERTICES: &[Vertex] = &[
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.99240386], }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.56958647], }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.05060294], }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.1526709], }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.7347359], }, // E
+];

修改着色器

有了新的 Vertex 结构体,现在是时候更新着色器了。首先需要将 tex_coords 传递给顶点着色器,然后将它们用于片元着色器,以便从采样器获得最终的颜色。让我们从顶点着色器开始:

rust
// 顶点着色器
 
-struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f,
-}
+struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f,
+}
 
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-}
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+}
 
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.clip_position = vec4f(model.position, 1.0);
-    return out;
-}

现在顶点着色器输出了 tex_coords,我们需要改变片元着色器来接收它们。有了这些坐标,就可以使用采样器从纹理中获取纹素的颜色了:

rust
// 片元着色器
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.clip_position = vec4f(model.position, 1.0);
+    return out;
+}

现在顶点着色器输出了 tex_coords,我们需要改变片元着色器来接收它们。有了这些坐标,就可以使用采样器从纹理中获取纹素的颜色了:

rust
// 片元着色器
 
-@group(0) @binding(0)
-var t_diffuse: texture_2d<f32>;
-@group(0)@binding(1)
-var s_diffuse: sampler;
+@group(0) @binding(0)
+var t_diffuse: texture_2d<f32>;
+@group(0)@binding(1)
+var s_diffuse: sampler;
 
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return textureSample(t_diffuse, s_diffuse, in.tex_coords);
-}

变量 t_diffuses_diffuse 就是所谓的 uniforms。我们将在 相机部分 中进一步讨论 uniforms。现在,我们需要知道的是,@group(x) 对应于 set_bind_group() 中的第一个参数,@binding(x) 与我们创建绑定组布局绑定组时指定的 binding 值对应。

渲染结果

现在运行我们的程序,将得到如下渲染效果:

an upside down tree on a hexagon

很奇怪,我们的树是颠倒的! 这是因为 wgpu 的世界坐标的 Y 轴朝上,而纹理坐标的 Y 轴朝下。换句话说,纹理坐标中的(0,0)对应于图像的左上方,而(1,1)是右下方:

happy-tree-uv-coords.png

我们可以通过将每个纹理坐标的 y 坐标替换为 1 - y 来得到纹理的正确朝向:

rust
const VERTICES: &[Vertex] = &[
-    // 修改后的
-    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
-    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
-    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397], }, // C
-    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732914], }, // D
-    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
-];

现在我们就把树正确地放在五边形上了:

our happy tree as it should be

代码整理

为方便起见,让我们把纹理代码放到自己的模块中。我们首先将 anyhow 添加到 Cargo.toml 文件中,以简化错误处理:

toml
[dependencies]
-image = "0.23"
-glam = "0.24"
-winit = "0.28.7"
-env_logger = "0.10"
-log = "0.4"
-pollster = "0.3"
-wgpu = "0.17"
-bytemuck = { version = "1.14", features = [ "derive" ] }
-anyhow = "1.0" # NEW!

然后,在一个名为 src/texture.rs 的新文件中,添加以下代码:

rust
use image::GenericImageView;
-use anyhow::*;
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return textureSample(t_diffuse, s_diffuse, in.tex_coords);
+}

变量 t_diffuses_diffuse 就是所谓的 uniforms。我们将在 相机部分 中进一步讨论 uniforms。现在,我们需要知道的是,@group(x) 对应于 set_bind_group() 中的第一个参数,@binding(x) 与我们创建绑定组布局绑定组时指定的 binding 值对应。

渲染结果

现在运行我们的程序,将得到如下渲染效果:

an upside down tree on a hexagon

很奇怪,我们的树是颠倒的! 这是因为 wgpu 的世界坐标的 Y 轴朝上,而纹理坐标的 Y 轴朝下。换句话说,纹理坐标中的(0,0)对应于图像的左上方,而(1,1)是右下方:

happy-tree-uv-coords.png

我们可以通过将每个纹理坐标的 y 坐标替换为 1 - y 来得到纹理的正确朝向:

rust
const VERTICES: &[Vertex] = &[
+    // 修改后的
+    Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
+    Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
+    Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397], }, // C
+    Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732914], }, // D
+    Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
+];

现在我们就把树正确地放在五边形上了:

our happy tree as it should be

代码整理

为方便起见,让我们把纹理代码放到自己的模块中。我们首先将 anyhow 添加到 Cargo.toml 文件中,以简化错误处理:

toml
[dependencies]
+image = "0.23"
+glam = "0.24"
+winit = "0.28.7"
+env_logger = "0.10"
+log = "0.4"
+pollster = "0.3"
+wgpu = "0.17"
+bytemuck = { version = "1.14", features = [ "derive" ] }
+anyhow = "1.0" # NEW!

然后,在一个名为 src/texture.rs 的新文件中,添加以下代码:

rust
use image::GenericImageView;
+use anyhow::*;
 
-pub struct Texture {
-    pub texture: wgpu::Texture,
-    pub view: wgpu::TextureView,
-    pub sampler: wgpu::Sampler,
-}
+pub struct Texture {
+    pub texture: wgpu::Texture,
+    pub view: wgpu::TextureView,
+    pub sampler: wgpu::Sampler,
+}
 
-impl Texture {
-    pub fn from_bytes(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        bytes: &[u8],
-        label: &str
-    ) -> Result<Self> {
-        let img = image::load_from_memory(bytes)?;
-        Self::from_image(device, queue, &img, Some(label))
-    }
+impl Texture {
+    pub fn from_bytes(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        bytes: &[u8],
+        label: &str
+    ) -> Result<Self> {
+        let img = image::load_from_memory(bytes)?;
+        Self::from_image(device, queue, &img, Some(label))
+    }
 
-    pub fn from_image(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        img: &image::DynamicImage,
-        label: Option<&str>
-    ) -> Result<Self> {
-        let rgba = img.to_rgba8();
-        let dimensions = img.dimensions();
+    pub fn from_image(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        img: &image::DynamicImage,
+        label: Option<&str>
+    ) -> Result<Self> {
+        let rgba = img.to_rgba8();
+        let dimensions = img.dimensions();
 
-        let size = wgpu::Extent3d {
-            width: dimensions.0,
-            height: dimensions.1,
-            depth_or_array_layers: 1,
-        };
-        let texture = device.create_texture(
-            &wgpu::TextureDescriptor {
-                label,
-                size,
-                mip_level_count: 1,
-                sample_count: 1,
-                dimension: wgpu::TextureDimension::D2,
-                format: wgpu::TextureFormat::Rgba8UnormSrgb,
-                usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
-                view_formats: &[],
-            }
-        );
+        let size = wgpu::Extent3d {
+            width: dimensions.0,
+            height: dimensions.1,
+            depth_or_array_layers: 1,
+        };
+        let texture = device.create_texture(
+            &wgpu::TextureDescriptor {
+                label,
+                size,
+                mip_level_count: 1,
+                sample_count: 1,
+                dimension: wgpu::TextureDimension::D2,
+                format: wgpu::TextureFormat::Rgba8UnormSrgb,
+                usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
+                view_formats: &[],
+            }
+        );
 
-        queue.write_texture(
-            wgpu::ImageCopyTexture {
-                aspect: wgpu::TextureAspect::All,
-                texture: &texture,
-                mip_level: 0,
-                origin: wgpu::Origin3d::ZERO,
-            },
-            &rgba,
-            wgpu::ImageDataLayout {
-                offset: 0,
-                bytes_per_row: Some(4 * dimensions.0),
-                rows_per_image: Some(dimensions.1),
-            },
-            size,
-        );
+        queue.write_texture(
+            wgpu::ImageCopyTexture {
+                aspect: wgpu::TextureAspect::All,
+                texture: &texture,
+                mip_level: 0,
+                origin: wgpu::Origin3d::ZERO,
+            },
+            &rgba,
+            wgpu::ImageDataLayout {
+                offset: 0,
+                bytes_per_row: Some(4 * dimensions.0),
+                rows_per_image: Some(dimensions.1),
+            },
+            size,
+        );
 
-        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
-        let sampler = device.create_sampler(
-            &wgpu::SamplerDescriptor {
-                address_mode_u: wgpu::AddressMode::ClampToEdge,
-                address_mode_v: wgpu::AddressMode::ClampToEdge,
-                address_mode_w: wgpu::AddressMode::ClampToEdge,
-                mag_filter: wgpu::FilterMode::Linear,
-                min_filter: wgpu::FilterMode::Nearest,
-                mipmap_filter: wgpu::FilterMode::Nearest,
-                ..Default::default()
-            }
-        );
+        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
+        let sampler = device.create_sampler(
+            &wgpu::SamplerDescriptor {
+                address_mode_u: wgpu::AddressMode::ClampToEdge,
+                address_mode_v: wgpu::AddressMode::ClampToEdge,
+                address_mode_w: wgpu::AddressMode::ClampToEdge,
+                mag_filter: wgpu::FilterMode::Linear,
+                min_filter: wgpu::FilterMode::Nearest,
+                mipmap_filter: wgpu::FilterMode::Nearest,
+                ..Default::default()
+            }
+        );
 
-        Ok(Self { texture, view, sampler })
-    }
-}

注意,我们使用的是 to_rgba8() 而不是 as_rgba8()。PNG 使用 as_rgba8() 没问题,因为它们有一个 alpha 通道。但是 JPEG 没有 alpha 通道,如果我们试图在 JPEG 纹理图像上调用 as_rgba8(),代码就会陷入恐慌。相反,我们可以使用 to_rgba8() 来处理没有 alpha 通道的图像,它会生成一个新的图像缓冲区。

lib.rs 文件的顶部添加以下代码来将 texture.rs 作为一个模块导入:

rust
mod texture;

new() 函数中的纹理创建代码现在变得简化多了:

rust
surface.configure(&device, &config);
-let diffuse_bytes = include_bytes!("happy-tree.png"); // CHANGED!
-let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); // CHANGED!
+        Ok(Self { texture, view, sampler })
+    }
+}

注意,我们使用的是 to_rgba8() 而不是 as_rgba8()。PNG 使用 as_rgba8() 没问题,因为它们有一个 alpha 通道。但是 JPEG 没有 alpha 通道,如果我们试图在 JPEG 纹理图像上调用 as_rgba8(),代码就会陷入恐慌。相反,我们可以使用 to_rgba8() 来处理没有 alpha 通道的图像,它会生成一个新的图像缓冲区。

lib.rs 文件的顶部添加以下代码来将 texture.rs 作为一个模块导入:

rust
mod texture;

new() 函数中的纹理创建代码现在变得简化多了:

rust
surface.configure(&device, &config);
+let diffuse_bytes = include_bytes!("happy-tree.png"); // CHANGED!
+let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); // CHANGED!
 
-// 到 `let texture_bind_group_layout = ...` 行为止的所有代码现在都可以移除了。

我们仍然需要单独存储绑定组,因为纹理无须知道绑定组的布局。修改创建 diffuse_bind_group 的过程以使用diffuse_textureviewsampler 字段:

rust
let diffuse_bind_group = device.create_bind_group(
-    &wgpu::BindGroupDescriptor {
-        layout: &texture_bind_group_layout,
-        entries: &[
-            wgpu::BindGroupEntry {
-                binding: 0,
-                resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), // CHANGED!
-            },
-            wgpu::BindGroupEntry {
-                binding: 1,
-                resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), // CHANGED!
-            }
-        ],
-        label: Some("diffuse_bind_group"),
-    }
-);

最后,需要更新 State 中的字段以使用全新 Texture 结构体,在未来的教程中还会用到它:

rust
struct State {
-    // ...
-    diffuse_bind_group: wgpu::BindGroup,
-    diffuse_texture: texture::Texture, // NEW
-}
rust
impl State {
-    async fn new() -> Self {
-        // ...
-        Self {
-            // ...
-            num_indices,
-            diffuse_bind_group,
-            diffuse_texture, // NEW
-        }
-    }
-}

经过上边的整理,代码的工作方式还和以前一样,但我们现在有了一个更便利的方式来创建纹理。

挑战

另创建一个纹理,并在你按下空格键时交替使用。

- +// 到 `let texture_bind_group_layout = ...` 行为止的所有代码现在都可以移除了。

我们仍然需要单独存储绑定组,因为纹理无须知道绑定组的布局。修改创建 diffuse_bind_group 的过程以使用diffuse_textureviewsampler 字段:

rust
let diffuse_bind_group = device.create_bind_group(
+    &wgpu::BindGroupDescriptor {
+        layout: &texture_bind_group_layout,
+        entries: &[
+            wgpu::BindGroupEntry {
+                binding: 0,
+                resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), // CHANGED!
+            },
+            wgpu::BindGroupEntry {
+                binding: 1,
+                resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), // CHANGED!
+            }
+        ],
+        label: Some("diffuse_bind_group"),
+    }
+);

最后,需要更新 State 中的字段以使用全新 Texture 结构体,在未来的教程中还会用到它:

rust
struct State {
+    // ...
+    diffuse_bind_group: wgpu::BindGroup,
+    diffuse_texture: texture::Texture, // NEW
+}
rust
impl State {
+    async fn new() -> Self {
+        // ...
+        Self {
+            // ...
+            num_indices,
+            diffuse_bind_group,
+            diffuse_texture, // NEW
+        }
+    }
+}

经过上边的整理,代码的工作方式还和以前一样,但我们现在有了一个更便利的方式来创建纹理。

挑战

另创建一个纹理,并在你按下空格键时交替使用。

+ \ No newline at end of file diff --git a/beginner/tutorial6-uniforms/index.html b/beginner/tutorial6-uniforms/index.html index b4943e0b5..9dc5fc7a7 100644 --- a/beginner/tutorial6-uniforms/index.html +++ b/beginner/tutorial6-uniforms/index.html @@ -5,289 +5,291 @@ Uniform 缓冲区与 3D 虚拟摄像机 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

Uniform 缓冲区与 3D 虚拟摄像机

虽然我们之前的渲染似乎都是在 2D 空间下进行的,但实际上我们一直都是在 3D 空间下渲染的!这就是为什么 Vertex 结构体的 position 是 3 个浮点数的数组而不是 2 个。由于我们是在正面观察,所以才无法真正看到场景的立体感。下面将通过创建一个虚拟摄像机Camera)来改变我们的观察视角。

透视摄像机

本教程聚焦于 wgpu 的教学,而不是线性代数,所以会略过很多涉及的数学知识。如果你对线性代数感兴趣,网上有大量的阅读材料。我们将使用 glam 来处理所有数学问题,在 Cargo.toml 中添加以下依赖:

toml
[dependencies]
-# other deps...
-glam = "0.24"

现在让我们开始使用此数学!在 State 结构体上方创建摄像机结构体:

rust
struct Camera {
-    eye: glam::Vec3,
-    target: glam::Vec3,
-    up: glam::Vec3,
-    aspect: f32,
-    fovy: f32,
-    znear: f32,
-    zfar: f32,
-}
+    
Skip to content

Uniform 缓冲区与 3D 虚拟摄像机

虽然我们之前的渲染似乎都是在 2D 空间下进行的,但实际上我们一直都是在 3D 空间下渲染的!这就是为什么 Vertex 结构体的 position 是 3 个浮点数的数组而不是 2 个。由于我们是在正面观察,所以才无法真正看到场景的立体感。下面将通过创建一个虚拟摄像机Camera)来改变我们的观察视角。

透视摄像机

本教程聚焦于 wgpu 的教学,而不是线性代数,所以会略过很多涉及的数学知识。如果你对线性代数感兴趣,网上有大量的阅读材料。我们将使用 glam 来处理所有数学问题,在 Cargo.toml 中添加以下依赖:

toml
[dependencies]
+# other deps...
+glam = "0.24"

现在让我们开始使用此数学!在 State 结构体上方创建摄像机结构体:

rust
struct Camera {
+    eye: glam::Vec3,
+    target: glam::Vec3,
+    up: glam::Vec3,
+    aspect: f32,
+    fovy: f32,
+    znear: f32,
+    zfar: f32,
+}
 
-impl Camera {
-    fn build_view_projection_matrix(&self) -> glam::Mat4 {
-        // 1.
-        let view = glam::Mat4::look_at_rh(self.eye, self.target, self.up);
-        // 2.
-        let proj = glam::Mat4::perspective_rh(self.fovy.to_radians(), self.aspect, self.znear, self.zfar);
+impl Camera {
+    fn build_view_projection_matrix(&self) -> glam::Mat4 {
+        // 1.
+        let view = glam::Mat4::look_at_rh(self.eye, self.target, self.up);
+        // 2.
+        let proj = glam::Mat4::perspective_rh(self.fovy.to_radians(), self.aspect, self.znear, self.zfar);
 
-        // 3.
-        return proj * view;
-    }
-}

build_view_projection_matrix 函数实现了视图投影矩阵。

  1. 视图矩阵移动并旋转世界坐标到摄像机所观察的位置。它本质上是摄像机变换的逆矩阵。
  2. 投影矩阵变换场景空间,以产生景深的效果。如果没有它,近处的物对象将与远处的大小相同。
  3. wgpu 的坐标系统是基于 DirectX 和 Metal 的坐标系,在归一化设备坐标中,x 轴和 y 轴的范围是 [-1.0, 1.0],而 z 轴是 [0.0, 1.0]。 移植 OpenGL 程序时需要注意:在 OpenGL 的归一化设备坐标中 z 轴的范围是 [-1.0, 1.0]。

现在我们来给 State 添加上 camera 字段:

rust
struct State {
-    // ...
-    camera: Camera,
-    // ...
-}
+        // 3.
+        return proj * view;
+    }
+}

build_view_projection_matrix 函数实现了视图投影矩阵。

  1. 视图矩阵移动并旋转世界坐标到摄像机所观察的位置。它本质上是摄像机变换的逆矩阵。
  2. 投影矩阵变换场景空间,以产生景深的效果。如果没有它,近处的物对象将与远处的大小相同。
  3. wgpu 的坐标系统是基于 DirectX 和 Metal 的坐标系,在归一化设备坐标中,x 轴和 y 轴的范围是 [-1.0, 1.0],而 z 轴是 [0.0, 1.0]。 移植 OpenGL 程序时需要注意:在 OpenGL 的归一化设备坐标中 z 轴的范围是 [-1.0, 1.0]。

现在我们来给 State 添加上 camera 字段:

rust
struct State {
+    // ...
+    camera: Camera,
+    // ...
+}
 
-async fn new(window: &Window) -> Self {
-    // let diffuse_bind_group ...
+async fn new(window: &Window) -> Self {
+    // let diffuse_bind_group ...
 
-    let camera = Camera {
-        // 将摄像机向上移动 1 个单位,向后移动 2 个单位
-        // +z 朝向屏幕外
-        eye: (0.0, 1.0, 2.0).into(),
-        // 摄像机看向原点
-        target: (0.0, 0.0, 0.0).into(),
-        // 定义哪个方向朝上
-        up: glam::Vec3::Y,
-        aspect: config.width as f32 / config.height as f32,
-        fovy: 45.0,
-        znear: 0.1,
-        zfar: 100.0,
-    };
+    let camera = Camera {
+        // 将摄像机向上移动 1 个单位,向后移动 2 个单位
+        // +z 朝向屏幕外
+        eye: (0.0, 1.0, 2.0).into(),
+        // 摄像机看向原点
+        target: (0.0, 0.0, 0.0).into(),
+        // 定义哪个方向朝上
+        up: glam::Vec3::Y,
+        aspect: config.width as f32 / config.height as f32,
+        fovy: 45.0,
+        znear: 0.1,
+        zfar: 100.0,
+    };
 
-    Self {
-        // ...
-        camera,
-        // ...
-    }
-}

有了可以提供视图投影矩阵的摄像机,我们还需要一些方法将其引入着色器。

Uniform 缓冲区

到目前为止,我们已经使用缓冲区来存储顶点和索引数据,甚至加载纹理。我们将再次使用它来创建一个称之为 uniform 的缓冲区。Uniform 缓冲区也是一个数据块,在一组着色器的每个调用中都可以使用,从技术的角度来看,我们已经为纹理采样器使用了 Uniform 缓冲区。下面将再次使用它们来存储视图投影矩阵,我们先创建一个结构体来保存 uniform:

rust
// 此属性标注数据的内存布局兼容 C-ABI,令其可用于着色器
-#[repr(C)]
-// derive 属性自动导入的这些 trait,令其可被存入缓冲区
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct CameraUniform {
-    // glam 的数据类型不能直接用于 bytemuck
-    // 需要先将 Matrix4 矩阵转为一个 4x4 的浮点数数组
-    view_proj: [[f32; 4]; 4],
-}
+    Self {
+        // ...
+        camera,
+        // ...
+    }
+}

有了可以提供视图投影矩阵的摄像机,我们还需要一些方法将其引入着色器。

Uniform 缓冲区

到目前为止,我们已经使用缓冲区来存储顶点和索引数据,甚至加载纹理。我们将再次使用它来创建一个称之为 uniform 的缓冲区。Uniform 缓冲区也是一个数据块,在一组着色器的每个调用中都可以使用,从技术的角度来看,我们已经为纹理采样器使用了 Uniform 缓冲区。下面将再次使用它们来存储视图投影矩阵,我们先创建一个结构体来保存 uniform:

rust
// 此属性标注数据的内存布局兼容 C-ABI,令其可用于着色器
+#[repr(C)]
+// derive 属性自动导入的这些 trait,令其可被存入缓冲区
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct CameraUniform {
+    // glam 的数据类型不能直接用于 bytemuck
+    // 需要先将 Matrix4 矩阵转为一个 4x4 的浮点数数组
+    view_proj: [[f32; 4]; 4],
+}
 
-impl CameraUniform {
-    fn new() -> Self {
-        Self {
-            view_proj: glam::Mat4::IDENTITY.to_cols_array_2d(),
-        }
-    }
+impl CameraUniform {
+    fn new() -> Self {
+        Self {
+            view_proj: glam::Mat4::IDENTITY.to_cols_array_2d(),
+        }
+    }
 
-    fn update_view_proj(&mut self, camera: &Camera) {
-        self.view_proj = camera.build_view_projection_matrix().to_cols_array_2d();
-    }
-}

封装好了数据,接下来创建一个名为 camera_buffer 的 Uniform 缓冲区:

rust
// 在 new() 函数中创建 `camera` 后
+    fn update_view_proj(&mut self, camera: &Camera) {
+        self.view_proj = camera.build_view_projection_matrix().to_cols_array_2d();
+    }
+}

封装好了数据,接下来创建一个名为 camera_buffer 的 Uniform 缓冲区:

rust
// 在 new() 函数中创建 `camera` 后
 
-let mut camera_uniform = CameraUniform::new();
-camera_uniform.update_view_proj(&camera);
+let mut camera_uniform = CameraUniform::new();
+camera_uniform.update_view_proj(&camera);
 
-let camera_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Camera Buffer"),
-        contents: bytemuck::cast_slice(&[camera_uniform]),
-        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
-    }
-);

Uniform 缓冲区和绑定组

现在有了一个 Uniform 缓冲区,那该如何使用呢?答案是为它创建一个绑定组。我们得先创建绑定组的布局:

rust
let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-    entries: &[
-        wgpu::BindGroupLayoutEntry {
-            binding: 0,
-            visibility: wgpu::ShaderStages::VERTEX,     // 1
-            ty: wgpu::BindingType::Buffer {
-                ty: wgpu::BufferBindingType::Uniform,
-                has_dynamic_offset: false,              // 2
-                min_binding_size: None,
-            },
-            count: None,
-        }
-    ],
-    label: Some("camera_bind_group_layout"),
-});
  1. 我们只在顶点着色器中需要虚拟摄像机信息,因为要用它来操作顶点
  2. has_dynamic_offset 字段表示这个缓冲区是否会动态改变偏移量。如果我们想一次性在 Uniform 中存储多组数据,并实时修改偏移量来告诉着色器当前使用哪组数据时,这就很有用。

现在,我们可以创建实际的绑定组了:

rust
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-    layout: &camera_bind_group_layout,
-    entries: &[
-        wgpu::BindGroupEntry {
-            binding: 0,
-            resource: camera_buffer.as_entire_binding(),
-        }
-    ],
-    label: Some("camera_bind_group"),
-});

就像对纹理所做的那样,我们需要在管线布局描述符中注册 camera_bind_group_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(
-    &wgpu::PipelineLayoutDescriptor {
-        label: Some("Render Pipeline Layout"),
-        bind_group_layouts: &[
-            &texture_bind_group_layout,
-            &camera_bind_group_layout,
-        ],
-        push_constant_ranges: &[],
-    }
-);

现在,需要将 camera_buffercamera_bind_group 添加到 State 中:

rust
struct State {
-    // ...
-    camera: Camera,
-    camera_uniform: CameraUniform,
-    camera_buffer: wgpu::Buffer,
-    camera_bind_group: wgpu::BindGroup,
-}
+let camera_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Camera Buffer"),
+        contents: bytemuck::cast_slice(&[camera_uniform]),
+        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
+    }
+);

Uniform 缓冲区和绑定组

现在有了一个 Uniform 缓冲区,那该如何使用呢?答案是为它创建一个绑定组。我们得先创建绑定组的布局:

rust
let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+    entries: &[
+        wgpu::BindGroupLayoutEntry {
+            binding: 0,
+            visibility: wgpu::ShaderStages::VERTEX,     // 1
+            ty: wgpu::BindingType::Buffer {
+                ty: wgpu::BufferBindingType::Uniform,
+                has_dynamic_offset: false,              // 2
+                min_binding_size: None,
+            },
+            count: None,
+        }
+    ],
+    label: Some("camera_bind_group_layout"),
+});
  1. 我们只在顶点着色器中需要虚拟摄像机信息,因为要用它来操作顶点
  2. has_dynamic_offset 字段表示这个缓冲区是否会动态改变偏移量。如果我们想一次性在 Uniform 中存储多组数据,并实时修改偏移量来告诉着色器当前使用哪组数据时,这就很有用。

现在,我们可以创建实际的绑定组了:

rust
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+    layout: &camera_bind_group_layout,
+    entries: &[
+        wgpu::BindGroupEntry {
+            binding: 0,
+            resource: camera_buffer.as_entire_binding(),
+        }
+    ],
+    label: Some("camera_bind_group"),
+});

就像对纹理所做的那样,我们需要在管线布局描述符中注册 camera_bind_group_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(
+    &wgpu::PipelineLayoutDescriptor {
+        label: Some("Render Pipeline Layout"),
+        bind_group_layouts: &[
+            &texture_bind_group_layout,
+            &camera_bind_group_layout,
+        ],
+        push_constant_ranges: &[],
+    }
+);

现在,需要将 camera_buffercamera_bind_group 添加到 State 中:

rust
struct State {
+    // ...
+    camera: Camera,
+    camera_uniform: CameraUniform,
+    camera_buffer: wgpu::Buffer,
+    camera_bind_group: wgpu::BindGroup,
+}
 
-async fn new(window: &Window) -> Self {
-    // ...
-    Self {
-        // ...
-        camera,
-        camera_uniform,
-        camera_buffer,
-        camera_bind_group,
-    }
-}

在进入着色器之前,我们要做的最后一件事就是在 render() 函数中使用绑定组

rust
render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
-// 新添加!
-render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
+async fn new(window: &Window) -> Self {
+    // ...
+    Self {
+        // ...
+        camera,
+        camera_uniform,
+        camera_buffer,
+        camera_bind_group,
+    }
+}

在进入着色器之前,我们要做的最后一件事就是在 render() 函数中使用绑定组

rust
render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
+// 新添加!
+render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
 
-render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

在顶点着色器中使用 uniform

修改顶点着色器以加入如下代码:

rust
// 顶点着色器
-struct CameraUniform {
-    view_proj: mat4x4f,
-};
-@group(1) @binding(0) // 1.
-var<uniform> camera: CameraUniform;
+render_pass.draw_indexed(0..self.num_indices, 0, 0..1);

在顶点着色器中使用 uniform

修改顶点着色器以加入如下代码:

rust
// 顶点着色器
+struct CameraUniform {
+    view_proj: mat4x4f,
+};
+@group(1) @binding(0) // 1.
+var<uniform> camera: CameraUniform;
 
-struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f,
-}
+struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f,
+}
 
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-}
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+}
 
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.clip_position = camera.view_proj * vec4f(model.position, 1.0); // 2.
-    return out;
-}
  1. 因为我们已经创建了一个新的绑定组,所以需要指定在着色器中使用哪一个。这个数字由我们的 render_pipeline_layout 决定。texture_bind_group_layout 被列在第一位,因此它是 group(0),而 camera_bind_group 是第二位,因此它是 group(1)
  2. 当涉及到矩阵时,乘法的顺序很重要。向量在最右边,矩阵按重要性顺序在左边(裁剪空间坐标 = 投影矩阵 x 模型视图矩阵 x 位置向量)。

摄像机控制器

如果现在运行代码,看到的将是如下渲染效果:

./static-tree.png

形状的拉伸度降低了,但它仍然是静态的。你可以尝试移动摄像机的位置使画面动起来,就像游戏中的摄像机通常所做的那样。由于本教程聚焦于 wgpu 的使用,而非用户输入事件的处理,所以仅在此贴出摄像机控制器(CameraController)的代码:

rust
struct CameraController {
-    speed: f32,
-    is_forward_pressed: bool,
-    is_backward_pressed: bool,
-    is_left_pressed: bool,
-    is_right_pressed: bool,
-}
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.clip_position = camera.view_proj * vec4f(model.position, 1.0); // 2.
+    return out;
+}
  1. 因为我们已经创建了一个新的绑定组,所以需要指定在着色器中使用哪一个。这个数字由我们的 render_pipeline_layout 决定。texture_bind_group_layout 被列在第一位,因此它是 group(0),而 camera_bind_group 是第二位,因此它是 group(1)
  2. 当涉及到矩阵时,乘法的顺序很重要。向量在最右边,矩阵按重要性顺序在左边(裁剪空间坐标 = 投影矩阵 x 模型视图矩阵 x 位置向量)。

摄像机控制器

如果现在运行代码,看到的将是如下渲染效果:

./static-tree.png

形状的拉伸度降低了,但它仍然是静态的。你可以尝试移动摄像机的位置使画面动起来,就像游戏中的摄像机通常所做的那样。由于本教程聚焦于 wgpu 的使用,而非用户输入事件的处理,所以仅在此贴出摄像机控制器(CameraController)的代码:

rust
struct CameraController {
+    speed: f32,
+    is_forward_pressed: bool,
+    is_backward_pressed: bool,
+    is_left_pressed: bool,
+    is_right_pressed: bool,
+}
 
-impl CameraController {
-    fn new(speed: f32) -> Self {
-        Self {
-            speed,
-            is_forward_pressed: false,
-            is_backward_pressed: false,
-            is_left_pressed: false,
-            is_right_pressed: false,
-        }
-    }
+impl CameraController {
+    fn new(speed: f32) -> Self {
+        Self {
+            speed,
+            is_forward_pressed: false,
+            is_backward_pressed: false,
+            is_left_pressed: false,
+            is_right_pressed: false,
+        }
+    }
 
-    fn process_events(&mut self, event: &WindowEvent) -> bool {
-        match event {
-            WindowEvent::KeyboardInput {
-                input: KeyboardInput {
-                    state,
-                    virtual_keycode: Some(keycode),
-                    ..
-                },
-                ..
-            } => {
-                let is_pressed = *state == ElementState::Pressed;
-                match keycode {
-                    VirtualKeyCode::W | VirtualKeyCode::Up => {
-                        self.is_forward_pressed = is_pressed;
-                        true
-                    }
-                    VirtualKeyCode::A | VirtualKeyCode::Left => {
-                        self.is_left_pressed = is_pressed;
-                        true
-                    }
-                    VirtualKeyCode::S | VirtualKeyCode::Down => {
-                        self.is_backward_pressed = is_pressed;
-                        true
-                    }
-                    VirtualKeyCode::D | VirtualKeyCode::Right => {
-                        self.is_right_pressed = is_pressed;
-                        true
-                    }
-                    _ => false,
-                }
-            }
-            _ => false,
-        }
-    }
+    fn process_events(&mut self, event: &WindowEvent) -> bool {
+        match event {
+            WindowEvent::KeyboardInput {
+                input: KeyboardInput {
+                    state,
+                    virtual_keycode: Some(keycode),
+                    ..
+                },
+                ..
+            } => {
+                let is_pressed = *state == ElementState::Pressed;
+                match keycode {
+                    VirtualKeyCode::W | VirtualKeyCode::Up => {
+                        self.is_forward_pressed = is_pressed;
+                        true
+                    }
+                    VirtualKeyCode::A | VirtualKeyCode::Left => {
+                        self.is_left_pressed = is_pressed;
+                        true
+                    }
+                    VirtualKeyCode::S | VirtualKeyCode::Down => {
+                        self.is_backward_pressed = is_pressed;
+                        true
+                    }
+                    VirtualKeyCode::D | VirtualKeyCode::Right => {
+                        self.is_right_pressed = is_pressed;
+                        true
+                    }
+                    _ => false,
+                }
+            }
+            _ => false,
+        }
+    }
 
-    fn update_camera(&self, camera: &mut Camera) {
-        let forward = camera.target - camera.eye;
-        let forward_norm = forward.normalize();
-        let forward_mag = forward.length();
+    fn update_camera(&self, camera: &mut Camera) {
+        let forward = camera.target - camera.eye;
+        let forward_norm = forward.normalize();
+        let forward_mag = forward.length();
 
-        // 防止摄像机离场景中心太近时出现问题
-        if self.is_forward_pressed && forward_mag > self.speed {
-            camera.eye += forward_norm * self.speed;
-        }
-        if self.is_backward_pressed {
-            camera.eye -= forward_norm * self.speed;
-        }
+        // 防止摄像机离场景中心太近时出现问题
+        if self.is_forward_pressed && forward_mag > self.speed {
+            camera.eye += forward_norm * self.speed;
+        }
+        if self.is_backward_pressed {
+            camera.eye -= forward_norm * self.speed;
+        }
 
-        let right = forward_norm.cross(camera.up);
+        let right = forward_norm.cross(camera.up);
 
-        // 在按下前进或后退键时重做半径计算
-        let forward = camera.target - camera.eye;
-        let forward_mag = forward.length();
+        // 在按下前进或后退键时重做半径计算
+        let forward = camera.target - camera.eye;
+        let forward_mag = forward.length();
 
-        if self.is_right_pressed {
-            // 重新调整目标和眼睛之间的距离,以便其不发生变化。
-            // 因此,眼睛仍然位于目标和眼睛形成的圆圈上。
-            camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
-        }
-        if self.is_left_pressed {
-            camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag;
-        }
-    }
-}

这段代码并不完美。当你旋转摄像机时,摄像机会慢慢向后移动。虽然已达到了我们的目的,但你还是可以自由地改进它!

我们仍然需要把它插入到现有的代码中使其生效。将控制器添加到 State 中,并在 new() 函数中创建它的实例:

rust
struct State {
-    // ...
-    camera: Camera,
-    // 新添加!
-    camera_controller: CameraController,
-    // ...
-}
-// ...
-impl State {
-    async fn new(window: &Window) -> Self {
-        // ...
-        let camera_controller = CameraController::new(0.2);
-        // ...
+        if self.is_right_pressed {
+            // 重新调整目标和眼睛之间的距离,以便其不发生变化。
+            // 因此,眼睛仍然位于目标和眼睛形成的圆圈上。
+            camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
+        }
+        if self.is_left_pressed {
+            camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag;
+        }
+    }
+}

这段代码并不完美。当你旋转摄像机时,摄像机会慢慢向后移动。虽然已达到了我们的目的,但你还是可以自由地改进它!

我们仍然需要把它插入到现有的代码中使其生效。将控制器添加到 State 中,并在 new() 函数中创建它的实例:

rust
struct State {
+    // ...
+    camera: Camera,
+    // 新添加!
+    camera_controller: CameraController,
+    // ...
+}
+// ...
+impl State {
+    async fn new(window: &Window) -> Self {
+        // ...
+        let camera_controller = CameraController::new(0.2);
+        // ...
 
-        Self {
-            // ...
-            camera_controller,
-            // ...
-        }
-    }
-}

将下边这行代码添加到 input() 函数中。

rust
fn input(&mut self, event: &WindowEvent) -> bool {
-    self.camera_controller.process_events(event)
-}

到目前为止,摄像机控制器还没有真正工作起来。uniform 缓冲区中的值需要被更新。有几种方式可以做到这一点:

  1. 可以创建一个单独的缓冲区,并将其数据复制到 camera_buffer。这个新的缓冲区被称为中继缓冲区(Staging Buffer)。这种方法允许主缓冲区(在这里是指 camera_buffer)的数据只被 GPU 访问,从而令 GPU 能做一些速度上的优化。如果缓冲区能被 CPU 访问,就无法实现此类优化。
  2. 可以在缓冲区本身调用内存映射函数 map_read_asyncmap_write_async。此方式允许我们直接访问缓冲区的数据,但是需要处理异步代码,也需要缓冲区使用 BufferUsages::MAP_READ 和/或 BufferUsages::MAP_WRITE。在此不再详述,如果你想了解更多,可以查看 wgpu without a window 教程。
  3. 可以在 queue 上使用 write_buffer 函数。

我们将使用第 3 种方式。

rust
fn update(&mut self) {
-    self.camera_controller.update_camera(&mut self.camera);
-    self.camera_uniform.update_view_proj(&self.camera);
-    self.queue.write_buffer(&self.camera_buffer, 0, bytemuck::cast_slice(&[self.camera_uniform]));
-}

这就是要做的全部工作了。现在运行代码,将能看到一个带有树木纹理的五边形,并可以用 wasd/arrow 键来旋转和缩放。

挑战

让上面的五边形独立于摄像机进行旋转。提示:你需要另一个矩阵来实现这一点

- + Self { + // ... + camera_controller, + // ... + } + } +}

将下边这行代码添加到 input() 函数中。

rust
fn input(&mut self, event: &WindowEvent) -> bool {
+    self.camera_controller.process_events(event)
+}

到目前为止,摄像机控制器还没有真正工作起来。uniform 缓冲区中的值需要被更新。有几种方式可以做到这一点:

  1. 可以创建一个单独的缓冲区,并将其数据复制到 camera_buffer。这个新的缓冲区被称为中继缓冲区(Staging Buffer)。这种方法允许主缓冲区(在这里是指 camera_buffer)的数据只被 GPU 访问,从而令 GPU 能做一些速度上的优化。如果缓冲区能被 CPU 访问,就无法实现此类优化。
  2. 可以在缓冲区本身调用内存映射函数 map_read_asyncmap_write_async。此方式允许我们直接访问缓冲区的数据,但是需要处理异步代码,也需要缓冲区使用 BufferUsages::MAP_READ 和/或 BufferUsages::MAP_WRITE。在此不再详述,如果你想了解更多,可以查看 wgpu without a window 教程。
  3. 可以在 queue 上使用 write_buffer 函数。

我们将使用第 3 种方式。

rust
fn update(&mut self) {
+    self.camera_controller.update_camera(&mut self.camera);
+    self.camera_uniform.update_view_proj(&self.camera);
+    self.queue.write_buffer(&self.camera_buffer, 0, bytemuck::cast_slice(&[self.camera_uniform]));
+}

这就是要做的全部工作了。现在运行代码,将能看到一个带有树木纹理的五边形,并可以用 wasd/arrow 键来旋转和缩放。

挑战

让上面的五边形独立于摄像机进行旋转。提示:你需要另一个矩阵来实现这一点

+ \ No newline at end of file diff --git a/beginner/tutorial7-instancing/index.html b/beginner/tutorial7-instancing/index.html index 339e8fdbf..8c087577f 100644 --- a/beginner/tutorial7-instancing/index.html +++ b/beginner/tutorial7-instancing/index.html @@ -5,161 +5,163 @@ 实例化绘制 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

实例化绘制

我们目前的场景非常简单:仅有一个以坐标 (0,0,0) 为中心的对象。如果想要绘制更多的对象呢? 这,就是实例化绘制(Instancing)的用武之地了。

实例化绘制允许我们以不同的属性(位置、方向、大小、颜色等)多次绘制同一个对象。有多种方式可以实现实例化绘制。其中一种方式是修改 Uniform 缓冲区以加入这些属性,并在绘制每个对象实例之前更新它。

出于性能原因,我们不推荐这种方式。因为逐实例更新时,uniform 缓冲区需要为每一帧复制多个缓冲区而消耗 GPU 内存带宽, 且随实例数增加的绘制命令更是会消耗 GPU 的执行时间。

如果查阅 wgpu 文档draw_indexed 函数的参数 ,我们可以看到解决这一问题的方式:

rust
pub fn draw_indexed(
-    &mut self,
-    indices: Range<u32>,
-    base_vertex: i32,
-    instances: Range<u32> // <-- 在这里
-)

instances 参数是范围Range<u32>)类型的值。它命令 GPU 绘制指定对象的多少个实例。目前我们指定的是0..1,它命令 GPU 绘制 1 个实例后停止。如果使用 0..5,我们的代码就绘制 5 个实例。

instances范围类型可能看起来很奇怪,因为使用 1..2 仍然是绘制 1 个实例。似乎直接使用 u32 类型会更简单,对吧?这里是范围类型的原因是:有时我们不想绘制出所有对象; 有时因为其他实例可能不该出现在这一中而只想绘制指定部分的实例; 又或者我们正在调试某组特定的实例。

好了,现在我们知道了如何绘制 1 个对象的多个实例,那么如何告诉 wgpu 要绘制哪些指定的实例呢?我们将要用到实例缓冲区(Instance Buffer)的概念。

实例缓冲区

我们将以类似于创建 Uniform 缓冲区的方式创建一个实例缓冲区。首先,声明一个名为 Instance 的结构体:

rust
// lib.rs
-// ...
+    
Skip to content

实例化绘制

我们目前的场景非常简单:仅有一个以坐标 (0,0,0) 为中心的对象。如果想要绘制更多的对象呢? 这,就是实例化绘制(Instancing)的用武之地了。

实例化绘制允许我们以不同的属性(位置、方向、大小、颜色等)多次绘制同一个对象。有多种方式可以实现实例化绘制。其中一种方式是修改 Uniform 缓冲区以加入这些属性,并在绘制每个对象实例之前更新它。

出于性能原因,我们不推荐这种方式。因为逐实例更新时,uniform 缓冲区需要为每一帧复制多个缓冲区而消耗 GPU 内存带宽, 且随实例数增加的绘制命令更是会消耗 GPU 的执行时间。

如果查阅 wgpu 文档draw_indexed 函数的参数 ,我们可以看到解决这一问题的方式:

rust
pub fn draw_indexed(
+    &mut self,
+    indices: Range<u32>,
+    base_vertex: i32,
+    instances: Range<u32> // <-- 在这里
+)

instances 参数是范围Range<u32>)类型的值。它命令 GPU 绘制指定对象的多少个实例。目前我们指定的是0..1,它命令 GPU 绘制 1 个实例后停止。如果使用 0..5,我们的代码就绘制 5 个实例。

instances范围类型可能看起来很奇怪,因为使用 1..2 仍然是绘制 1 个实例。似乎直接使用 u32 类型会更简单,对吧?这里是范围类型的原因是:有时我们不想绘制出所有对象; 有时因为其他实例可能不该出现在这一中而只想绘制指定部分的实例; 又或者我们正在调试某组特定的实例。

好了,现在我们知道了如何绘制 1 个对象的多个实例,那么如何告诉 wgpu 要绘制哪些指定的实例呢?我们将要用到实例缓冲区(Instance Buffer)的概念。

实例缓冲区

我们将以类似于创建 Uniform 缓冲区的方式创建一个实例缓冲区。首先,声明一个名为 Instance 的结构体:

rust
// lib.rs
+// ...
 
-// 新增!
-struct Instance {
-    position: glam::Vec3,
-    rotation: glam::Quat,
-}

四元数(Quaternion) 是一种通常用来表示旋转的数学结构。这里不会介绍它背后的数学原理(涉及虚数和 4 维空间)。如果你想深入了解四元数,这里有一篇 Wolfram Alpha 的文章。

着色器中直接使用这些值会有麻烦,因为 WGSL 里没有四元数的数据类型。我不想在着色器中做四元数运算,所以把 Instance 数据转换成了矩阵,并将其存储在一个名为 InstanceRaw 的结构体中:

rust
// 新增!
-#[repr(C)]
-#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct InstanceRaw {
-    model: [[f32; 4]; 4],
-}

这就是将要写入缓冲区的数据。我们拆分出 InstanceRaw 之后,就可以自由地更新 Instance 而无需涉及矩阵,因为 raw 数据只需要在绘制之前更新。

让我们在 Instance 上创建一个函数来计算并返回 InstanceRaw

rust
// 新增!
-impl Instance {
-    fn to_raw(&self) -> InstanceRaw {
-        InstanceRaw {
-            model: (glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
-        }
-    }
-}

现在需要给 State 添加两个字段:instancesinstance_buffer

rust
struct State {
-    instances: Vec<Instance>,
-    instance_buffer: wgpu::Buffer,
-}

接下来在 new() 函数中创建实例数据,先定义几个常量用于简化代码:

rust
const NUM_INSTANCES_PER_ROW: u32 = 10;
-const INSTANCE_DISPLACEMENT: glam::Vec3 = glam::Vec3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);

我们将创建一组 10 行 10 列空间排列均匀的实例数据,下边是具体代码:

rust
impl State {
-    async fn new(window: &Window) -> Self {
-        // ...
-        let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
-            (0..NUM_INSTANCES_PER_ROW).map(move |x| {
-                let position = glam::Vec3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
+// 新增!
+struct Instance {
+    position: glam::Vec3,
+    rotation: glam::Quat,
+}

四元数(Quaternion) 是一种通常用来表示旋转的数学结构。这里不会介绍它背后的数学原理(涉及虚数和 4 维空间)。如果你想深入了解四元数,这里有一篇 Wolfram Alpha 的文章。

着色器中直接使用这些值会有麻烦,因为 WGSL 里没有四元数的数据类型。我不想在着色器中做四元数运算,所以把 Instance 数据转换成了矩阵,并将其存储在一个名为 InstanceRaw 的结构体中:

rust
// 新增!
+#[repr(C)]
+#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct InstanceRaw {
+    model: [[f32; 4]; 4],
+}

这就是将要写入缓冲区的数据。我们拆分出 InstanceRaw 之后,就可以自由地更新 Instance 而无需涉及矩阵,因为 raw 数据只需要在绘制之前更新。

让我们在 Instance 上创建一个函数来计算并返回 InstanceRaw

rust
// 新增!
+impl Instance {
+    fn to_raw(&self) -> InstanceRaw {
+        InstanceRaw {
+            model: (glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
+        }
+    }
+}

现在需要给 State 添加两个字段:instancesinstance_buffer

rust
struct State {
+    instances: Vec<Instance>,
+    instance_buffer: wgpu::Buffer,
+}

接下来在 new() 函数中创建实例数据,先定义几个常量用于简化代码:

rust
const NUM_INSTANCES_PER_ROW: u32 = 10;
+const INSTANCE_DISPLACEMENT: glam::Vec3 = glam::Vec3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);

我们将创建一组 10 行 10 列空间排列均匀的实例数据,下边是具体代码:

rust
impl State {
+    async fn new(window: &Window) -> Self {
+        // ...
+        let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
+            (0..NUM_INSTANCES_PER_ROW).map(move |x| {
+                let position = glam::Vec3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
 
-                let rotation = if position.length().abs() <= std::f32::EPSILON {
-                    // 这一行特殊确保在坐标 (0, 0, 0) 处的对象不会被缩放到 0
-                    // 因为错误的四元数会影响到缩放
-                    glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
-                } else {
-                    glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
-                };
+                let rotation = if position.length().abs() <= std::f32::EPSILON {
+                    // 这一行特殊确保在坐标 (0, 0, 0) 处的对象不会被缩放到 0
+                    // 因为错误的四元数会影响到缩放
+                    glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
+                } else {
+                    glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
+                };
 
-                Instance {
-                    position, rotation,
-                }
-            })
-        }).collect::<Vec<_>>();
-        // ...
-    }
-}

现在数据已经有了,我们来创建实际的实例缓冲区

rust
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
-let instance_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Instance Buffer"),
-        contents: bytemuck::cast_slice(&instance_data),
-        usage: wgpu::BufferUsages::VERTEX,
-    }
-);

需要为 InstanceRaw 创建一个新的顶点缓冲区布局

rust
impl InstanceRaw {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
-            // step_mode 的值需要从 Vertex 改为 Instance
-            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
-            step_mode: wgpu::VertexStepMode::Instance,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
-                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
-                    shader_location: 5,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
-                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
-                    shader_location: 6,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
-                    shader_location: 7,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
-                    shader_location: 8,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-            ],
-        }
-    }
-}

我们需要将此布局添加到渲染管线中,以便在渲染时可以使用它:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    vertex: wgpu::VertexState {
-        // ...
-        // 更新!
-        buffers: &[Vertex::desc(), InstanceRaw::desc()],
-    },
-    // ...
-});

别忘了要返回新增的变量:

rust
Self {
-    // ...
-    // 新添加!
-    instances,
-    instance_buffer,
-}

最后,在 render() 函数中绑定 instance_buffer,并修改 draw_indexed() 绘制命令以使用我们实际的实例数:

rust
render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
-render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
-render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
-// 新添加!
-render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
+                Instance {
+                    position, rotation,
+                }
+            })
+        }).collect::<Vec<_>>();
+        // ...
+    }
+}

现在数据已经有了,我们来创建实际的实例缓冲区

rust
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
+let instance_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Instance Buffer"),
+        contents: bytemuck::cast_slice(&instance_data),
+        usage: wgpu::BufferUsages::VERTEX,
+    }
+);

需要为 InstanceRaw 创建一个新的顶点缓冲区布局

rust
impl InstanceRaw {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
+            // step_mode 的值需要从 Vertex 改为 Instance
+            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
+            step_mode: wgpu::VertexStepMode::Instance,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
+                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
+                    shader_location: 5,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
+                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
+                    shader_location: 6,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
+                    shader_location: 7,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
+                    shader_location: 8,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+            ],
+        }
+    }
+}

我们需要将此布局添加到渲染管线中,以便在渲染时可以使用它:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    vertex: wgpu::VertexState {
+        // ...
+        // 更新!
+        buffers: &[Vertex::desc(), InstanceRaw::desc()],
+    },
+    // ...
+});

别忘了要返回新增的变量:

rust
Self {
+    // ...
+    // 新添加!
+    instances,
+    instance_buffer,
+}

最后,在 render() 函数中绑定 instance_buffer,并修改 draw_indexed() 绘制命令以使用我们实际的实例数:

rust
render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
+render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
+render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
+// 新添加!
+render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
 
-// 更新!
-render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as _);

当你向数组添加新的实例时,请确保重新创建了 instance_buffercamera_bind_group,否则新实例不会正确显示。

shader.wgsl 中需要引入我们新增的矩阵,这样才能在实例中使用它。请在 shader.wgsl 文件的顶部添加以下代码:

rust
struct InstanceInput {
-    @location(5) model_matrix_0: vec4f,
-    @location(6) model_matrix_1: vec4f,
-    @location(7) model_matrix_2: vec4f,
-    @location(8) model_matrix_3: vec4f,
-};

在使用之前,我们需要将矩阵重新组装出来:

rust
@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    let model_matrix = mat4x4f(
-        instance.model_matrix_0,
-        instance.model_matrix_1,
-        instance.model_matrix_2,
-        instance.model_matrix_3,
-    );
-    // Continued...
-}

我们得在应用 camera_uniform.view_proj 之前先应用 model_matrix。因为 view_proj 将坐标系从世界空间(World Space)变换为相机空间(Camera Space),而 model_matrix 是一个世界空间的变换,所以在使用它时不希望处于相机空间

rust
@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    // ...
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.clip_position = camera.view_proj * model_matrix * vec4f(model.position, 1.0);
-    return out;
-}

完成后,应该就能看到一片树林了!

./forest.png

挑战

逐帧变更实例的位置 和/或 旋转弧度。

- +// 更新! +render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as _);

当你向数组添加新的实例时,请确保重新创建了 instance_buffercamera_bind_group,否则新实例不会正确显示。

shader.wgsl 中需要引入我们新增的矩阵,这样才能在实例中使用它。请在 shader.wgsl 文件的顶部添加以下代码:

rust
struct InstanceInput {
+    @location(5) model_matrix_0: vec4f,
+    @location(6) model_matrix_1: vec4f,
+    @location(7) model_matrix_2: vec4f,
+    @location(8) model_matrix_3: vec4f,
+};

在使用之前,我们需要将矩阵重新组装出来:

rust
@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    let model_matrix = mat4x4f(
+        instance.model_matrix_0,
+        instance.model_matrix_1,
+        instance.model_matrix_2,
+        instance.model_matrix_3,
+    );
+    // Continued...
+}

我们得在应用 camera_uniform.view_proj 之前先应用 model_matrix。因为 view_proj 将坐标系从世界空间(World Space)变换为相机空间(Camera Space),而 model_matrix 是一个世界空间的变换,所以在使用它时不希望处于相机空间

rust
@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    // ...
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.clip_position = camera.view_proj * model_matrix * vec4f(model.position, 1.0);
+    return out;
+}

完成后,应该就能看到一片树林了!

./forest.png

挑战

逐帧变更实例的位置 和/或 旋转弧度。

+ \ No newline at end of file diff --git a/beginner/tutorial8-depth/index.html b/beginner/tutorial8-depth/index.html index 01abab087..9d295ed4b 100644 --- a/beginner/tutorial8-depth/index.html +++ b/beginner/tutorial8-depth/index.html @@ -5,100 +5,102 @@ 深度缓冲区 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

深度缓冲区

让我们换个摄像机角度来仔细观察上个教程中的例子:

depth_problems.png

应该排在后面的对象被渲染在了前面的对象之前。这是由绘制顺序引起的。默认情况下,新对象的像素数据将取代帧缓冲区(FrameBuffer)相同坐标上旧的像素数据。

有两种方式可以解决这个问题:将数据从后往前排序; 或者使用深度缓冲区(Depth Buffer)。

从后往前排序

这是 2D 渲染的常用方法,因为很容易计算绘制对象的前后关系,甚至可以直接使用 Z 轴顺序。而在 3D 渲染中就有点棘手了,因为对象的前后关系会根据摄像机的角度而改变。

一个简单的方法是按照对象摄像机的距离来排序。但这种方法也有缺陷,因为当大对象的模型中心坐标处在小对象后面时,大对象中本应在小对象前面的部分也会被渲染到后面。我们还会遇到对象本身重叠的问题。

如果想正确地实现绘制对象的前后关系,就需要有像素级的精度。这,就是 深度缓冲区 的作用。

像素深度

深度缓冲区是一个用来存储已渲染像素的 Z 轴坐标的纹理。在绘制新的像素时,wgpu 使用它来决定是替换数据还是丢弃。这种技术被称为深度测试,它将解决绘制顺序问题,而不需要我们对绘制对象进行排序!

让我们在 texture.rs 中添加一个函数来创建深度纹理

rust
impl Texture {
-    pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; // 1.
+    
Skip to content

深度缓冲区

让我们换个摄像机角度来仔细观察上个教程中的例子:

depth_problems.png

应该排在后面的对象被渲染在了前面的对象之前。这是由绘制顺序引起的。默认情况下,新对象的像素数据将取代帧缓冲区(FrameBuffer)相同坐标上旧的像素数据。

有两种方式可以解决这个问题:将数据从后往前排序; 或者使用深度缓冲区(Depth Buffer)。

从后往前排序

这是 2D 渲染的常用方法,因为很容易计算绘制对象的前后关系,甚至可以直接使用 Z 轴顺序。而在 3D 渲染中就有点棘手了,因为对象的前后关系会根据摄像机的角度而改变。

一个简单的方法是按照对象摄像机的距离来排序。但这种方法也有缺陷,因为当大对象的模型中心坐标处在小对象后面时,大对象中本应在小对象前面的部分也会被渲染到后面。我们还会遇到对象本身重叠的问题。

如果想正确地实现绘制对象的前后关系,就需要有像素级的精度。这,就是 深度缓冲区 的作用。

像素深度

深度缓冲区是一个用来存储已渲染像素的 Z 轴坐标的纹理。在绘制新的像素时,wgpu 使用它来决定是替换数据还是丢弃。这种技术被称为深度测试,它将解决绘制顺序问题,而不需要我们对绘制对象进行排序!

让我们在 texture.rs 中添加一个函数来创建深度纹理

rust
impl Texture {
+    pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; // 1.
 
-    pub fn create_depth_texture(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration, label: &str) -> Self {
-        let size = wgpu::Extent3d { // 2.
-            width: config.width,
-            height: config.height,
-            depth_or_array_layers: 1,
-        };
-        let desc = wgpu::TextureDescriptor {
-            label: Some(label),
-            size,
-            mip_level_count: 1,
-            sample_count: 1,
-            dimension: wgpu::TextureDimension::D2,
-            format: Self::DEPTH_FORMAT,
-            usage: wgpu::TextureUsages::RENDER_ATTACHMENT // 3.
-                | wgpu::TextureUsages::TEXTURE_BINDING,
-            view_formats: &[],
-        };
-        let texture = device.create_texture(&desc);
+    pub fn create_depth_texture(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration, label: &str) -> Self {
+        let size = wgpu::Extent3d { // 2.
+            width: config.width,
+            height: config.height,
+            depth_or_array_layers: 1,
+        };
+        let desc = wgpu::TextureDescriptor {
+            label: Some(label),
+            size,
+            mip_level_count: 1,
+            sample_count: 1,
+            dimension: wgpu::TextureDimension::D2,
+            format: Self::DEPTH_FORMAT,
+            usage: wgpu::TextureUsages::RENDER_ATTACHMENT // 3.
+                | wgpu::TextureUsages::TEXTURE_BINDING,
+            view_formats: &[],
+        };
+        let texture = device.create_texture(&desc);
 
-        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
-        let sampler = device.create_sampler(
-            &wgpu::SamplerDescriptor { // 4.
-                address_mode_u: wgpu::AddressMode::ClampToEdge,
-                address_mode_v: wgpu::AddressMode::ClampToEdge,
-                address_mode_w: wgpu::AddressMode::ClampToEdge,
-                mag_filter: wgpu::FilterMode::Linear,
-                min_filter: wgpu::FilterMode::Linear,
-                mipmap_filter: wgpu::FilterMode::Nearest,
-                compare: Some(wgpu::CompareFunction::LessEqual), // 5.
-                lod_min_clamp: 0.0,
-                lod_max_clamp: 200.0,
-                ..Default::default()
-            }
-        );
+        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
+        let sampler = device.create_sampler(
+            &wgpu::SamplerDescriptor { // 4.
+                address_mode_u: wgpu::AddressMode::ClampToEdge,
+                address_mode_v: wgpu::AddressMode::ClampToEdge,
+                address_mode_w: wgpu::AddressMode::ClampToEdge,
+                mag_filter: wgpu::FilterMode::Linear,
+                min_filter: wgpu::FilterMode::Linear,
+                mipmap_filter: wgpu::FilterMode::Nearest,
+                compare: Some(wgpu::CompareFunction::LessEqual), // 5.
+                lod_min_clamp: 0.0,
+                lod_max_clamp: 200.0,
+                ..Default::default()
+            }
+        );
 
-        Self { texture, view, sampler }
-    }
-}
  1. 定义 DEPTH_FORMAT 用于创建深度纹理render_pipeline 需要的 DepthStencilState 对象。
  2. 深度纹理的宽高需要与展示平面一致(更准确的说,是需要与当前的 Color Attachment 一致)。我们传入展示平面使用的 config 参数来确保它们的宽高相同。
  3. 由于要对这个纹理进行渲染,我们需要给它添加 RENDER_ATTACHMENT 使用范围标志。
  4. 从技术的角度来看,我们不需要深度纹理的采样器,是我们的 Texture 结构体需要它。

现在在 State::new() 中创建我们的 depth_texture

rust
let depth_texture = texture::Texture::create_depth_texture(&device, &config, "depth_texture");

然后修改渲染管线以启用深度测试

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    depth_stencil: Some(wgpu::DepthStencilState {
-        format: texture::Texture::DEPTH_FORMAT,
-        depth_write_enabled: true,
-        depth_compare: wgpu::CompareFunction::Less, // 1.
-        stencil: wgpu::StencilState::default(), // 2.
-        bias: wgpu::DepthBiasState::default(),
-    }),
-    // ...
-});
  1. depth_compare 字段指定通过深度测试的条件。使用 LESS 意味着像素将被从后往前绘制,大于当前位置的深度值的像素将被丢弃。下面是可选的所有枚举值:
rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-pub enum CompareFunction {
-    Undefined = 0,
-    Never = 1,
-    Less = 2,
-    Equal = 3,
-    LessEqual = 4,
-    Greater = 5,
-    NotEqual = 6,
-    GreaterEqual = 7,
-    Always = 8,
-}
  1. 还有一种类型的缓冲区叫做模版缓冲区(Stencil Buffer)。模版缓冲区和深度缓冲区通常被存储在同一个纹理中。这些字段控制着模版测试的数值。目前我们没有使用模版缓冲区,这里就使用默认值。在以后教程中再详情介绍模版缓冲区。

不要忘了在 State 中存储 depth_texture

rust
Self {
-// ...
-depth_texture,
-}

还要记得修改 resize() 函数来更新深度纹理及它的纹理视图

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-    // ...
-    self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture");
-    // ...
-}

请确保更新了 config 之后一定要更新 depth_texture,否则程序就会崩溃,因为此时 depth_texturesurface 纹理的宽高已经不一致了(还记得上边提到过的 “深度纹理的宽高需要与展示平面一致” 吗?)。

最后是修改 render() 函数,我们已经创建了深度纹理,但目前还没有使用。只需把它绑定到渲染通道depth_stencil_attachment 字段即可:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-    // ...
-    depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
-        view: &self.depth_texture.view,
-        depth_ops: Some(wgpu::Operations {
-            load: wgpu::LoadOp::Clear(1.0),
-            store: wgpu::StoreOp::Store
-        }),
-        stencil_ops: None,
-    }),
-    ..Default::default()
-});

这就是我们所要做的!不涉及着色器代码!现在运行该应用程序,将看到深度问题已不复存在:

forest_fixed.png

挑战

深度缓冲区是一张纹理,所以我们可以在着色器中对其采样。请为深度纹理创建一个绑定组(或重用现有的),并将其渲染到屏幕上。

- + Self { texture, view, sampler } + } +}
  1. 定义 DEPTH_FORMAT 用于创建深度纹理render_pipeline 需要的 DepthStencilState 对象。
  2. 深度纹理的宽高需要与展示平面一致(更准确的说,是需要与当前的 Color Attachment 一致)。我们传入展示平面使用的 config 参数来确保它们的宽高相同。
  3. 由于要对这个纹理进行渲染,我们需要给它添加 RENDER_ATTACHMENT 使用范围标志。
  4. 从技术的角度来看,我们不需要深度纹理的采样器,是我们的 Texture 结构体需要它。

现在在 State::new() 中创建我们的 depth_texture

rust
let depth_texture = texture::Texture::create_depth_texture(&device, &config, "depth_texture");

然后修改渲染管线以启用深度测试

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    depth_stencil: Some(wgpu::DepthStencilState {
+        format: texture::Texture::DEPTH_FORMAT,
+        depth_write_enabled: true,
+        depth_compare: wgpu::CompareFunction::Less, // 1.
+        stencil: wgpu::StencilState::default(), // 2.
+        bias: wgpu::DepthBiasState::default(),
+    }),
+    // ...
+});
  1. depth_compare 字段指定通过深度测试的条件。使用 LESS 意味着像素将被从后往前绘制,大于当前位置的深度值的像素将被丢弃。下面是可选的所有枚举值:
rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub enum CompareFunction {
+    Undefined = 0,
+    Never = 1,
+    Less = 2,
+    Equal = 3,
+    LessEqual = 4,
+    Greater = 5,
+    NotEqual = 6,
+    GreaterEqual = 7,
+    Always = 8,
+}
  1. 还有一种类型的缓冲区叫做模版缓冲区(Stencil Buffer)。模版缓冲区和深度缓冲区通常被存储在同一个纹理中。这些字段控制着模版测试的数值。目前我们没有使用模版缓冲区,这里就使用默认值。在以后教程中再详情介绍模版缓冲区。

不要忘了在 State 中存储 depth_texture

rust
Self {
+// ...
+depth_texture,
+}

还要记得修改 resize() 函数来更新深度纹理及它的纹理视图

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+    // ...
+    self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture");
+    // ...
+}

请确保更新了 config 之后一定要更新 depth_texture,否则程序就会崩溃,因为此时 depth_texturesurface 纹理的宽高已经不一致了(还记得上边提到过的 “深度纹理的宽高需要与展示平面一致” 吗?)。

最后是修改 render() 函数,我们已经创建了深度纹理,但目前还没有使用。只需把它绑定到渲染通道depth_stencil_attachment 字段即可:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+    // ...
+    depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
+        view: &self.depth_texture.view,
+        depth_ops: Some(wgpu::Operations {
+            load: wgpu::LoadOp::Clear(1.0),
+            store: wgpu::StoreOp::Store
+        }),
+        stencil_ops: None,
+    }),
+    ..Default::default()
+});

这就是我们所要做的!不涉及着色器代码!现在运行该应用程序,将看到深度问题已不复存在:

forest_fixed.png

挑战

深度缓冲区是一张纹理,所以我们可以在着色器中对其采样。请为深度纹理创建一个绑定组(或重用现有的),并将其渲染到屏幕上。

+ \ No newline at end of file diff --git a/beginner/tutorial9-models/index.html b/beginner/tutorial9-models/index.html index 8f6e8f73d..056f0a57f 100644 --- a/beginner/tutorial9-models/index.html +++ b/beginner/tutorial9-models/index.html @@ -5,395 +5,397 @@ 模型加载 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

模型加载

到目前为止,我们一直在手动创建模型。简单的模型当然可以这么干,但如果是有成千上万多边形的复杂模型,那就行不通了。因此,我们将修改代码以利用 .obj 模型格式,以便可以利用 Blender 等软件来创建模型并运用到项目中。

lib.rs 文件中堆砌的代码已经很多了,让我们创建一个 model.rs 文件来安置所有模型加载相关的代码:

rust
// model.rs
-pub trait Vertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a>;
-}
-
-#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-pub struct ModelVertex {
-    pub position: [f32; 3],
-    pub tex_coords: [f32; 2],
-    pub normal: [f32; 3],
-}
-
-impl Vertex for ModelVertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        todo!();
-    }
-}

你会注意到这里有几点变化:

首先是 Vertex, 它在 lib.rs 中是一个结构体,而这里我们改为了 trait。我们会有多种顶点类型(模型、UI、实例数据等),Vertex 做为 trait 令我们能从其中抽象出 VertexBufferLayout 的创建函数,从而简化渲染管线的创建。

其次是 ModelVertex 中新增了 normal 字段。在讨论光照之前暂时不会用到它。

让我们来创建 VertexBufferLayout

rust
impl Vertex for ModelVertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    shader_location: 0,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
-                    shader_location: 1,
-                    format: wgpu::VertexFormat::Float32x2,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
-                    shader_location: 2,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-            ],
-        }
-    }
-}

这与原来的 VertexBufferLayout 基本相同,只是为 normal 添加了一个 VertexAttribute。删除 lib.rs 中我们已不再需要的旧 Vertex 结构体,并在 RenderPipeline 中使用来自 model 的新 Vertex:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    // ...
-    vertex: wgpu::VertexState {
-        // ...
-        buffers: &[model::ModelVertex::desc(), InstanceRaw::desc()],
-    },
-    // ...
-});

由于 desc 接口是定义在 Vertex trait 上的,因此需要先导入 Vertex,然后才能调用到该接口的具体实现。只需将导入代码放在文件顶部:

rust
use model::Vertex;

现在,我们需要一个用于渲染的模型。你可以使用自己的模型,我这也提供了一个模型及其纹理的 zip 压缩包 。我们将新建一个与 src 目录同级的 res 目录来安置这个模型。

访问资源文件

cargo 在构建并运行程序时会设置一个当前工作目录,该目录通常就是放置了 Cargo.toml 文件的项目根目录。资源(res)目录的路径会因项目的结构而异。本节教程示例代码的资源目录位于 code/beginner/tutorial9-models/res/。我们加载模型时可以使用这个路径,仅需在路径后拼上 cube.obj。这似乎很完美,可一旦修改项目的目录结构,写在代码里的路径就不可用了。

所以,我们通过修改构建脚本,将 res 目录复制到 cargo 创建可执行文件的位置来解决此问题,然后再从那里引用资源文件。创建一个 build.rs(构建文件的默认名称)文件并添加以下代码:

rust
use anyhow::*;
-use fs_extra::copy_items;
-use fs_extra::dir::CopyOptions;
-use std::env;
-
-fn main() -> Result<()> {
-    // 这一行告诉 cargo 如果 /res/ 目录中的内容发生了变化,就重新运行脚本
-    println!("cargo:rerun-if-changed=res/*");
-
-    let out_dir = env::var("OUT_DIR")?;
-    let mut copy_options = CopyOptions::new();
-    copy_options.overwrite = true;
-    let mut paths_to_copy = Vec::new();
-    paths_to_copy.push("res/");
-    copy_items(&paths_to_copy, out_dir, &copy_options)?;
-
-    Ok(())
-}

确保将 build.rs 放在与 Cargo.toml 相同的目录中,只有这样,在项目构建时 cargo 才能此运行构建脚本。

OUT_DIR 是一个环境变量,cargo 用它来指定应用程序将在哪里构建。

还需修改 Cargo.toml 来让构建脚本能正常运行,在构建依赖[build-dependencies])配置里添加以下依赖项:

toml
[build-dependencies]
-anyhow = "1.0"
-fs_extra = "1.3"
-glob = "0.3"

从 WASM 访问文件

遵循 WASM 规范,你不能在 Web Assembly 中访问用户文件系统上的文件。所以,我们利用 web 服务来提供这些文件,然后使用 http 请求将文件加载 ​​ 到代码中。让我们创建一个名为 resources.rs 的文件来处理这个问题,创建两个函数分别用于加载文本文件和二进制文件:

rust
use std::io::{BufReader, Cursor};
-
-use cfg_if::cfg_if;
-use wgpu::util::DeviceExt;
-
-use crate::{model, texture};
-
-#[cfg(target_arch = "wasm32")]
-fn format_url(file_name: &str) -> reqwest::Url {
-    let window = web_sys::window().unwrap();
-    let location = window.location();
-    let base = reqwest::Url::parse(&format!(
-        "{}/{}/",
-        location.origin().unwrap(),
-        option_env!("RES_PATH").unwrap_or("res"),
-    )).unwrap();
-    base.join(file_name).unwrap()
-}
-
-pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
-    cfg_if! {
-        if #[cfg(target_arch = "wasm32")] {
-            let url = format_url(file_name);
-            let txt = reqwest::get(url)
-                .await?
-                .text()
-                .await?;
-        } else {
-            let path = std::path::Path::new(env!("OUT_DIR"))
-                .join("res")
-                .join(file_name);
-            let txt = std::fs::read_to_string(path)?;
-        }
-    }
-
-    Ok(txt)
-}
-
-pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
-    cfg_if! {
-        if #[cfg(target_arch = "wasm32")] {
-            let url = format_url(file_name);
-            let data = reqwest::get(url)
-                .await?
-                .bytes()
-                .await?
-                .to_vec();
-        } else {
-            let path = std::path::Path::new(env!("OUT_DIR"))
-                .join("res")
-                .join(file_name);
-            let data = std::fs::read(path)?;
-        }
-    }
-
-    Ok(data)
-}

桌面环境里,我们是使用 OUT_DIR 环境变量来访问资源目录。

在 WASM 环境里,我们使用了 reqwest 来处理网络请求。需将以下依赖项添加到 Cargo.toml:

toml
[target.'cfg(target_arch = "wasm32")'.dependencies]
-# Other dependencies
-reqwest = { version = "0.11" }

还需要将 Location 功能添加到 web-sys 的 features 数组里:

toml
web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-    "Location",
-]}

确保 resources 作为模块已添加到 lib.rs 中:

rust
mod resources;

使用 TOBJ 加载模型

加载模型是使用的 tobj 。让我们将其添加到 Cargo.toml 中:

toml
[dependencies]
-# other dependencies...
-tobj = { version = "3.2.1", features = [
-    "async",
-]}

在加载模型之前,我们需要有一个结构体来存放模型数据:

rust
// model.rs
-pub struct Model {
-    pub meshes: Vec<Mesh>,
-    pub materials: Vec<Material>,
-}

Model 结构体中 meshesmaterials 两个字段都是动态数组类型。这很重要,因为一个 obj 文件可以包含多个网格材质。下面我们接着来创建 MeshMaterial 结构体:

rust
pub struct Material {
-    pub name: String,
-    pub diffuse_texture: texture::Texture,
-    pub bind_group: wgpu::BindGroup,
-}
-
-pub struct Mesh {
-    pub name: String,
-    pub vertex_buffer: wgpu::Buffer,
-    pub index_buffer: wgpu::Buffer,
-    pub num_elements: u32,
-    pub material: usize,
-}

Material 很简单,它主要有一个名称字段和一个纹理字段。名称更多是被用于程序调试。我们的立方体模型实际上有 2 个纹理,但其中一个是法线贴图稍后 我们会介绍这些纹理。

说到纹理,我们还需在 resources.rs 中添加一个函数来加载 Texture

rust
pub async fn load_texture(
-    file_name: &str,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-) -> anyhow::Result<texture::Texture> {
-    let data = load_binary(file_name).await?;
-    texture::Texture::from_bytes(device, queue, &data, file_name)
-}

load_texture 函数在为模型加载纹理会很有用,因为include_bytes! 宏要求我们在编译阶段就指定文件名称并加载纹理数据到构建的程序包内,而我们希望模型纹理能根据需要动态加载。

Mesh 包含一个顶点缓冲区、一个索引缓冲区和网格中的索引数,material 字段被定义为 usize 类型,它将用于在绘制时索引 materials 列表。

完成上面这些后,我们就可以加载模型了:

rust
pub async fn load_model(
-    file_name: &str,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    layout: &wgpu::BindGroupLayout,
-) -> anyhow::Result<model::Model> {
-    let obj_text = load_string(file_name).await?;
-    let obj_cursor = Cursor::new(obj_text);
-    let mut obj_reader = BufReader::new(obj_cursor);
-
-    let (models, obj_materials) = tobj::load_obj_buf_async(
-        &mut obj_reader,
-        &tobj::LoadOptions {
-            triangulate: true,
-            single_index: true,
-            ..Default::default()
-        },
-        |p| async move {
-            let mat_text = load_string(&p).await.unwrap();
-            tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
-        },
-    )
-    .await?;
-
-    let mut materials = Vec::new();
-    for m in obj_materials? {
-        let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
-        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-            layout,
-            entries: &[
-                wgpu::BindGroupEntry {
-                    binding: 0,
-                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
-                },
-                wgpu::BindGroupEntry {
-                    binding: 1,
-                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
-                },
-            ],
-            label: None,
-        });
-
-        materials.push(model::Material {
-            name: m.name,
-            diffuse_texture,
-            bind_group,
-        })
-    }
-
-    let meshes = models
-        .into_iter()
-        .map(|m| {
-            let vertices = (0..m.mesh.positions.len() / 3)
-                .map(|i| model::ModelVertex {
-                    position: [
-                        m.mesh.positions[i * 3],
-                        m.mesh.positions[i * 3 + 1],
-                        m.mesh.positions[i * 3 + 2],
-                    ],
-                    tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
-                    normal: [
-                        m.mesh.normals[i * 3],
-                        m.mesh.normals[i * 3 + 1],
-                        m.mesh.normals[i * 3 + 2],
-                    ],
-                })
-                .collect::<Vec<_>>();
-
-            let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-                label: Some(&format!("{:?} Vertex Buffer", file_name)),
-                contents: bytemuck::cast_slice(&vertices),
-                usage: wgpu::BufferUsages::VERTEX,
-            });
-            let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-                label: Some(&format!("{:?} Index Buffer", file_name)),
-                contents: bytemuck::cast_slice(&m.mesh.indices),
-                usage: wgpu::BufferUsages::INDEX,
-            });
-
-            model::Mesh {
-                name: file_name.to_string(),
-                vertex_buffer,
-                index_buffer,
-                num_elements: m.mesh.indices.len() as u32,
-                material: m.mesh.material_id.unwrap_or(0),
-            }
-        })
-        .collect::<Vec<_>>();
-
-    Ok(model::Model { meshes, materials })
-}

渲染网格

在能够绘制完整模型之前,需要能绘制单个网格对象。让我们创建一个名为 DrawModel 的 trait,并为 RenderPass 实现它:

rust
// model.rs
-pub trait DrawModel<'a> {
-    fn draw_mesh(&mut self, mesh: &'a Mesh);
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        instances: Range<u32>,
-    );
-}
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_mesh(&mut self, mesh: &'b Mesh) {
-        self.draw_mesh_instanced(mesh, 0..1);
-    }
-
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        instances: Range<u32>,
-    ){
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-}

把这些函数放在 impl Model 中也是可以的,但我觉得让渲染通道做所有的渲染(准确地说,渲染通道只是编码所有的渲染命令)更加合理,因为这是它的工作。这也意味着在渲染时必须先导入 DrawModel trait:

rust
// lib.rs
-render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
-render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
-
-use model::DrawModel;
-render_pass.draw_mesh_instanced(&self.obj_model.meshes[0], 0..self.instances.len() as u32);

在开始绘制之前,需要实际加载模型并将其保存到 State 实例。请在 State::new() 中加入以下代码:

rust
let obj_model = resources::load_model(
-    "cube.obj",
-    &device,
-    &queue,
-    &texture_bind_group_layout,
-).await.unwrap();

我们的新模型比之前的五角星要大一些,所以需要调整一下实例间的间距:

rust
const SPACE_BETWEEN: f32 = 3.0;
-let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
-    (0..NUM_INSTANCES_PER_ROW).map(move |x| {
-        let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
-        let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
-
-        let position = glam::Vec3 { x, y: 0.0, z };
-
-        let rotation = if position.length().abs() <= std::f32::EPSILON {
-             glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
-        } else {
-            glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
-        };
-
-        Instance {
-            position, rotation,
-        }
-    })
-}).collect::<Vec<_>>();

完成上面这些后,运行项目你就能看到如下渲染效果:

cubes.png

使用正确的纹理

我们目前看到的是还是之前的树纹理,它显然不是 obj 文件里的纹理。正确的纹理应该是下边这个:

cube-diffuse.jpg

这其中的原因很简单:尽管我们已经创建了纹理,但还没有创建一个绑定组来给 RenderPass,使用的仍然是 diffuse_bind_group

如果想修正这一点,我们就需要使用材质绑定组--Material 结构体的 bind_group 字段。

现在,我们来给 DrawModel 添加一个材质参数:

rust
pub trait DrawModel<'a> {
-    fn draw_mesh(&mut self, mesh: &'a Mesh, material: &'a Material, camera_bind_group: &'a wgpu::BindGroup);
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        material: &'a Material,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-    );
-
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, camera_bind_group: &'b wgpu::BindGroup) {
-        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group);
-    }
-
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        material: &'b Material,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.set_bind_group(0, &material.bind_group, &[]);
-        self.set_bind_group(1, camera_bind_group, &[]);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-}

接下来修改渲染代码以使用正确的材质参数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-
-render_pass.set_pipeline(&self.render_pipeline);
-
-let mesh = &self.obj_model.meshes[0];
-let material = &self.obj_model.materials[mesh.material];
-render_pass.draw_mesh_instanced(mesh, material, 0..self.instances.len() as u32, &self.camera_bind_group);

全部修改完毕,就能看到如下渲染效果:

cubes-correct.png

渲染完整模型

上边的代码直接指定了网格和对应的材质。这对使用不同的材质绘制网格很有用。

我们还没有渲染模型的其他部分,让我们为 DrawModel 新增一个函数,它将绘制模型的所有网格和对应的材质:

rust
pub trait DrawModel<'a> {
-    // ...
-    fn draw_model(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
-    fn draw_model_instanced(
-        &mut self,
-        model: &'a Model,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a, {
-    // ...
-    fn draw_model(&mut self, model: &'b Model, camera_bind_group: &'b wgpu::BindGroup) {
-        self.draw_model_instanced(model, 0..1, camera_bind_group);
-    }
-
-    fn draw_model_instanced(
-        &mut self,
-        model: &'b Model,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            let material = &model.materials[mesh.material];
-            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group);
-        }
-    }
-}

lib.rs 中的代码也相应地修改一下以调用新的 draw_model_instanced 函数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-render_pass.set_pipeline(&self.render_pipeline);
-render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.camera_bind_group);
- +
Skip to content

模型加载

到目前为止,我们一直在手动创建模型。简单的模型当然可以这么干,但如果是有成千上万多边形的复杂模型,那就行不通了。因此,我们将修改代码以利用 .obj 模型格式,以便可以利用 Blender 等软件来创建模型并运用到项目中。

lib.rs 文件中堆砌的代码已经很多了,让我们创建一个 model.rs 文件来安置所有模型加载相关的代码:

rust
// model.rs
+pub trait Vertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a>;
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+pub struct ModelVertex {
+    pub position: [f32; 3],
+    pub tex_coords: [f32; 2],
+    pub normal: [f32; 3],
+}
+
+impl Vertex for ModelVertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        todo!();
+    }
+}

你会注意到这里有几点变化:

首先是 Vertex, 它在 lib.rs 中是一个结构体,而这里我们改为了 trait。我们会有多种顶点类型(模型、UI、实例数据等),Vertex 做为 trait 令我们能从其中抽象出 VertexBufferLayout 的创建函数,从而简化渲染管线的创建。

其次是 ModelVertex 中新增了 normal 字段。在讨论光照之前暂时不会用到它。

让我们来创建 VertexBufferLayout

rust
impl Vertex for ModelVertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    shader_location: 0,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
+                    shader_location: 1,
+                    format: wgpu::VertexFormat::Float32x2,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
+                    shader_location: 2,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+            ],
+        }
+    }
+}

这与原来的 VertexBufferLayout 基本相同,只是为 normal 添加了一个 VertexAttribute。删除 lib.rs 中我们已不再需要的旧 Vertex 结构体,并在 RenderPipeline 中使用来自 model 的新 Vertex:

rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    // ...
+    vertex: wgpu::VertexState {
+        // ...
+        buffers: &[model::ModelVertex::desc(), InstanceRaw::desc()],
+    },
+    // ...
+});

由于 desc 接口是定义在 Vertex trait 上的,因此需要先导入 Vertex,然后才能调用到该接口的具体实现。只需将导入代码放在文件顶部:

rust
use model::Vertex;

现在,我们需要一个用于渲染的模型。你可以使用自己的模型,我这也提供了一个模型及其纹理的 zip 压缩包 。我们将新建一个与 src 目录同级的 res 目录来安置这个模型。

访问资源文件

cargo 在构建并运行程序时会设置一个当前工作目录,该目录通常就是放置了 Cargo.toml 文件的项目根目录。资源(res)目录的路径会因项目的结构而异。本节教程示例代码的资源目录位于 code/beginner/tutorial9-models/res/。我们加载模型时可以使用这个路径,仅需在路径后拼上 cube.obj。这似乎很完美,可一旦修改项目的目录结构,写在代码里的路径就不可用了。

所以,我们通过修改构建脚本,将 res 目录复制到 cargo 创建可执行文件的位置来解决此问题,然后再从那里引用资源文件。创建一个 build.rs(构建文件的默认名称)文件并添加以下代码:

rust
use anyhow::*;
+use fs_extra::copy_items;
+use fs_extra::dir::CopyOptions;
+use std::env;
+
+fn main() -> Result<()> {
+    // 这一行告诉 cargo 如果 /res/ 目录中的内容发生了变化,就重新运行脚本
+    println!("cargo:rerun-if-changed=res/*");
+
+    let out_dir = env::var("OUT_DIR")?;
+    let mut copy_options = CopyOptions::new();
+    copy_options.overwrite = true;
+    let mut paths_to_copy = Vec::new();
+    paths_to_copy.push("res/");
+    copy_items(&paths_to_copy, out_dir, &copy_options)?;
+
+    Ok(())
+}

确保将 build.rs 放在与 Cargo.toml 相同的目录中,只有这样,在项目构建时 cargo 才能此运行构建脚本。

OUT_DIR 是一个环境变量,cargo 用它来指定应用程序将在哪里构建。

还需修改 Cargo.toml 来让构建脚本能正常运行,在构建依赖[build-dependencies])配置里添加以下依赖项:

toml
[build-dependencies]
+anyhow = "1.0"
+fs_extra = "1.3"
+glob = "0.3"

从 WASM 访问文件

遵循 WASM 规范,你不能在 Web Assembly 中访问用户文件系统上的文件。所以,我们利用 web 服务来提供这些文件,然后使用 http 请求将文件加载 ​​ 到代码中。让我们创建一个名为 resources.rs 的文件来处理这个问题,创建两个函数分别用于加载文本文件和二进制文件:

rust
use std::io::{BufReader, Cursor};
+
+use cfg_if::cfg_if;
+use wgpu::util::DeviceExt;
+
+use crate::{model, texture};
+
+#[cfg(target_arch = "wasm32")]
+fn format_url(file_name: &str) -> reqwest::Url {
+    let window = web_sys::window().unwrap();
+    let location = window.location();
+    let base = reqwest::Url::parse(&format!(
+        "{}/{}/",
+        location.origin().unwrap(),
+        option_env!("RES_PATH").unwrap_or("res"),
+    )).unwrap();
+    base.join(file_name).unwrap()
+}
+
+pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
+    cfg_if! {
+        if #[cfg(target_arch = "wasm32")] {
+            let url = format_url(file_name);
+            let txt = reqwest::get(url)
+                .await?
+                .text()
+                .await?;
+        } else {
+            let path = std::path::Path::new(env!("OUT_DIR"))
+                .join("res")
+                .join(file_name);
+            let txt = std::fs::read_to_string(path)?;
+        }
+    }
+
+    Ok(txt)
+}
+
+pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
+    cfg_if! {
+        if #[cfg(target_arch = "wasm32")] {
+            let url = format_url(file_name);
+            let data = reqwest::get(url)
+                .await?
+                .bytes()
+                .await?
+                .to_vec();
+        } else {
+            let path = std::path::Path::new(env!("OUT_DIR"))
+                .join("res")
+                .join(file_name);
+            let data = std::fs::read(path)?;
+        }
+    }
+
+    Ok(data)
+}

桌面环境里,我们是使用 OUT_DIR 环境变量来访问资源目录。

在 WASM 环境里,我们使用了 reqwest 来处理网络请求。需将以下依赖项添加到 Cargo.toml:

toml
[target.'cfg(target_arch = "wasm32")'.dependencies]
+# Other dependencies
+reqwest = { version = "0.11" }

还需要将 Location 功能添加到 web-sys 的 features 数组里:

toml
web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+    "Location",
+]}

确保 resources 作为模块已添加到 lib.rs 中:

rust
mod resources;

使用 TOBJ 加载模型

加载模型是使用的 tobj 。让我们将其添加到 Cargo.toml 中:

toml
[dependencies]
+# other dependencies...
+tobj = { version = "3.2.1", features = [
+    "async",
+]}

在加载模型之前,我们需要有一个结构体来存放模型数据:

rust
// model.rs
+pub struct Model {
+    pub meshes: Vec<Mesh>,
+    pub materials: Vec<Material>,
+}

Model 结构体中 meshesmaterials 两个字段都是动态数组类型。这很重要,因为一个 obj 文件可以包含多个网格材质。下面我们接着来创建 MeshMaterial 结构体:

rust
pub struct Material {
+    pub name: String,
+    pub diffuse_texture: texture::Texture,
+    pub bind_group: wgpu::BindGroup,
+}
+
+pub struct Mesh {
+    pub name: String,
+    pub vertex_buffer: wgpu::Buffer,
+    pub index_buffer: wgpu::Buffer,
+    pub num_elements: u32,
+    pub material: usize,
+}

Material 很简单,它主要有一个名称字段和一个纹理字段。名称更多是被用于程序调试。我们的立方体模型实际上有 2 个纹理,但其中一个是法线贴图稍后 我们会介绍这些纹理。

说到纹理,我们还需在 resources.rs 中添加一个函数来加载 Texture

rust
pub async fn load_texture(
+    file_name: &str,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+) -> anyhow::Result<texture::Texture> {
+    let data = load_binary(file_name).await?;
+    texture::Texture::from_bytes(device, queue, &data, file_name)
+}

load_texture 函数在为模型加载纹理会很有用,因为include_bytes! 宏要求我们在编译阶段就指定文件名称并加载纹理数据到构建的程序包内,而我们希望模型纹理能根据需要动态加载。

Mesh 包含一个顶点缓冲区、一个索引缓冲区和网格中的索引数,material 字段被定义为 usize 类型,它将用于在绘制时索引 materials 列表。

完成上面这些后,我们就可以加载模型了:

rust
pub async fn load_model(
+    file_name: &str,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    layout: &wgpu::BindGroupLayout,
+) -> anyhow::Result<model::Model> {
+    let obj_text = load_string(file_name).await?;
+    let obj_cursor = Cursor::new(obj_text);
+    let mut obj_reader = BufReader::new(obj_cursor);
+
+    let (models, obj_materials) = tobj::load_obj_buf_async(
+        &mut obj_reader,
+        &tobj::LoadOptions {
+            triangulate: true,
+            single_index: true,
+            ..Default::default()
+        },
+        |p| async move {
+            let mat_text = load_string(&p).await.unwrap();
+            tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
+        },
+    )
+    .await?;
+
+    let mut materials = Vec::new();
+    for m in obj_materials? {
+        let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
+        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+            layout,
+            entries: &[
+                wgpu::BindGroupEntry {
+                    binding: 0,
+                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
+                },
+                wgpu::BindGroupEntry {
+                    binding: 1,
+                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
+                },
+            ],
+            label: None,
+        });
+
+        materials.push(model::Material {
+            name: m.name,
+            diffuse_texture,
+            bind_group,
+        })
+    }
+
+    let meshes = models
+        .into_iter()
+        .map(|m| {
+            let vertices = (0..m.mesh.positions.len() / 3)
+                .map(|i| model::ModelVertex {
+                    position: [
+                        m.mesh.positions[i * 3],
+                        m.mesh.positions[i * 3 + 1],
+                        m.mesh.positions[i * 3 + 2],
+                    ],
+                    tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
+                    normal: [
+                        m.mesh.normals[i * 3],
+                        m.mesh.normals[i * 3 + 1],
+                        m.mesh.normals[i * 3 + 2],
+                    ],
+                })
+                .collect::<Vec<_>>();
+
+            let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+                label: Some(&format!("{:?} Vertex Buffer", file_name)),
+                contents: bytemuck::cast_slice(&vertices),
+                usage: wgpu::BufferUsages::VERTEX,
+            });
+            let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+                label: Some(&format!("{:?} Index Buffer", file_name)),
+                contents: bytemuck::cast_slice(&m.mesh.indices),
+                usage: wgpu::BufferUsages::INDEX,
+            });
+
+            model::Mesh {
+                name: file_name.to_string(),
+                vertex_buffer,
+                index_buffer,
+                num_elements: m.mesh.indices.len() as u32,
+                material: m.mesh.material_id.unwrap_or(0),
+            }
+        })
+        .collect::<Vec<_>>();
+
+    Ok(model::Model { meshes, materials })
+}

渲染网格

在能够绘制完整模型之前,需要能绘制单个网格对象。让我们创建一个名为 DrawModel 的 trait,并为 RenderPass 实现它:

rust
// model.rs
+pub trait DrawModel<'a> {
+    fn draw_mesh(&mut self, mesh: &'a Mesh);
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        instances: Range<u32>,
+    );
+}
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_mesh(&mut self, mesh: &'b Mesh) {
+        self.draw_mesh_instanced(mesh, 0..1);
+    }
+
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        instances: Range<u32>,
+    ){
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+}

把这些函数放在 impl Model 中也是可以的,但我觉得让渲染通道做所有的渲染(准确地说,渲染通道只是编码所有的渲染命令)更加合理,因为这是它的工作。这也意味着在渲染时必须先导入 DrawModel trait:

rust
// lib.rs
+render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
+render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
+
+use model::DrawModel;
+render_pass.draw_mesh_instanced(&self.obj_model.meshes[0], 0..self.instances.len() as u32);

在开始绘制之前,需要实际加载模型并将其保存到 State 实例。请在 State::new() 中加入以下代码:

rust
let obj_model = resources::load_model(
+    "cube.obj",
+    &device,
+    &queue,
+    &texture_bind_group_layout,
+).await.unwrap();

我们的新模型比之前的五角星要大一些,所以需要调整一下实例间的间距:

rust
const SPACE_BETWEEN: f32 = 3.0;
+let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
+    (0..NUM_INSTANCES_PER_ROW).map(move |x| {
+        let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
+        let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
+
+        let position = glam::Vec3 { x, y: 0.0, z };
+
+        let rotation = if position.length().abs() <= std::f32::EPSILON {
+             glam::Quat::from_axis_angle(glam::Vec3::Z, 0.0)
+        } else {
+            glam::Quat::from_axis_angle(position.normalize(), consts::FRAC_PI_4)
+        };
+
+        Instance {
+            position, rotation,
+        }
+    })
+}).collect::<Vec<_>>();

完成上面这些后,运行项目你就能看到如下渲染效果:

cubes.png

使用正确的纹理

我们目前看到的是还是之前的树纹理,它显然不是 obj 文件里的纹理。正确的纹理应该是下边这个:

cube-diffuse.jpg

这其中的原因很简单:尽管我们已经创建了纹理,但还没有创建一个绑定组来给 RenderPass,使用的仍然是 diffuse_bind_group

如果想修正这一点,我们就需要使用材质绑定组--Material 结构体的 bind_group 字段。

现在,我们来给 DrawModel 添加一个材质参数:

rust
pub trait DrawModel<'a> {
+    fn draw_mesh(&mut self, mesh: &'a Mesh, material: &'a Material, camera_bind_group: &'a wgpu::BindGroup);
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        material: &'a Material,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+    );
+
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, camera_bind_group: &'b wgpu::BindGroup) {
+        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group);
+    }
+
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        material: &'b Material,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.set_bind_group(0, &material.bind_group, &[]);
+        self.set_bind_group(1, camera_bind_group, &[]);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+}

接下来修改渲染代码以使用正确的材质参数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+
+render_pass.set_pipeline(&self.render_pipeline);
+
+let mesh = &self.obj_model.meshes[0];
+let material = &self.obj_model.materials[mesh.material];
+render_pass.draw_mesh_instanced(mesh, material, 0..self.instances.len() as u32, &self.camera_bind_group);

全部修改完毕,就能看到如下渲染效果:

cubes-correct.png

渲染完整模型

上边的代码直接指定了网格和对应的材质。这对使用不同的材质绘制网格很有用。

我们还没有渲染模型的其他部分,让我们为 DrawModel 新增一个函数,它将绘制模型的所有网格和对应的材质:

rust
pub trait DrawModel<'a> {
+    // ...
+    fn draw_model(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
+    fn draw_model_instanced(
+        &mut self,
+        model: &'a Model,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a, {
+    // ...
+    fn draw_model(&mut self, model: &'b Model, camera_bind_group: &'b wgpu::BindGroup) {
+        self.draw_model_instanced(model, 0..1, camera_bind_group);
+    }
+
+    fn draw_model_instanced(
+        &mut self,
+        model: &'b Model,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            let material = &model.materials[mesh.material];
+            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group);
+        }
+    }
+}

lib.rs 中的代码也相应地修改一下以调用新的 draw_model_instanced 函数:

rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+render_pass.set_pipeline(&self.render_pipeline);
+render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.camera_bind_group);
+ \ No newline at end of file diff --git a/beginner/wgsl.html b/beginner/wgsl.html index 115f89e2a..390c627e9 100644 --- a/beginner/wgsl.html +++ b/beginner/wgsl.html @@ -5,273 +5,275 @@ WGSL 着色器语言 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

WGSL 着色器语言

WGSL 的来由

WebGPU 的目标是要在各个现代底层图形 API 之上抽象出一套统一的图形 API,而每个底层图形 API 后端都有自己的着色语言:

  • DirectX 使用 HLSL(High Level Shading Language)
  • Metal 使用 MSL(Metal Shading Language)
  • OpenGL 使用 GLSL(OpenGL Shading Language)
  • Vulkan 使用的着色语言又跟之前的图形 API 都不同,它的着色器必须以 SPIR-V 这种二进制字节码的格式提供(有一些库能提供将其它语言编写的着色器编译为 SPIR-V 的能力,比如 shaderc )。

WGSL (WebGPU Shading Language) 出现之前,很多开发者或团队是通过宏及各种转译工具来将自己的着色器编译到不同目标平台的,他们自然是希望有一个标准化的统一语言。

WebGPU 成员花了 2 年半的时间来争论 WebGPU 是否应该有自己的着色语言。kvark 将这场争论中的核心论点组成了一张流图,它是 SVG 格式的,支持在网页中无损放大查看。

WGSL 的目标不是要与 GLSL 兼容,它是对现代着色器语言的重新设计。

2020 年 4 月 27 日,WGSL 标准有了第一次提交。自此开始,wgpu 和 dawn 都摆脱了对 shaderc 之类复杂繁重的着色器转译工具的依赖。wgpu 里使用的 WGSL 转译工具叫 naga, kvark 有一篇博客(Shader translation benchmark)对比了 naga 相比于其它转译工具的性能优化,总体来说,有 10 倍以上的性能优势。

2023 年之前,WGSL 的学习资源不多,唯一好的参考是 WGSL 规范,但它是对语言实现细节的规范,对普通用户来说有点难以理解。 我从 2018 年开始使用 wgpu (那时还是 使用 GLSL 做为着色器语言),2021 年底完成了个人作品 字习 Pro 及其他几个练手作品从 GLSL 到 WGSL 的 100 多个着色器的移植工作,在这个过程中对这两个着色器语言有了比较深入的了解。这个增补章节旨在介绍 WGSL 的一些基础知识,希望这对从 OpenGL / WebGL 迁移到 WebGPU 的朋友带来一点有益的经验(下边的所有 GLSL 代码均是按照 GLSL450 标准编写的)。

增补两个网上新出现的学习资源:

Tour of WGSLcompute.toys

一个简单的绘制着色器:对比 GLSL

GLSL 的绘制着色器:

rust
// 顶点着色器文件
-layout(location = 0) in vec3 position;
-layout(location = 1) in vec2 texcoord;
-layout(location = 0) out vec2 uv;
-
-layout(set = 0, binding = 0) uniform UniformParams {
-    mat4 mvp_matrix;
-    vec3 tint_color;
-};
-
-void main() {
-    gl_Position = mvp_matrix * vec4(position, 1.0);
-    uv = texcoord;
-}
-
-// 片元着色器文件
-layout(location = 0) in vec2 uv;
-layout(location = 0) out vec4 frag_color;
-
-layout(set = 0, binding = 0) uniform UniformParams {
-    mat4 mvp_matrix;
-    vec3 tint_color;
-};
-layout(set = 0, binding = 1) uniform texture2D textureFront;
-layout(set = 0, binding = 2) uniform sampler samplerFront;
-
-void main(void) {
-  vec4 front = texture(sampler2D(textureFront, samplerFront), uv);
-  frag_color = front * vec4(tint_color.rgb, 1.0);;
-}

下边是使用 WGSL 的等价实现,在 WGSL 中,我们通常将顶点着色器与片元着色器写在同一个文件中:

rust
struct VertexOutput {
-    @location(0) uv: vec2f,
-    @builtin(position) position: vec4f,
-};
-
-struct UniformParams {
-    mvp: mat4x4f,
-	tint_color: vec3f,
-};
-
-@group(0) @binding(0) var<uniform> params: UniformParams;
-
-@vertex
-fn vs_main(@location(0) pos: vec3f, @location(1) uv: vec2f) -> VertexOutput {
-    var out: VertexOutput;
-    out.position = params.mvp * vec4f(pos, 1.0);
-    out.uv = uv;
-    return out;
-}
-
-@group(0) @binding(1) var texture_front: texture_2d<f32>;
-@group(0) @binding(2) var sampler_front: sampler;
-
-@fragment
-fn fs_main(input: VertexOutput) -> @location(0) vec4f {
-    let front = textureSample(texture_front, sampler_front, input.uv);
-    return front * vec4f(params.tintColor, 1.0);
-}

计算着色器:继续对比 GLSL

GLSL 的计算着色器, 实现在 x 轴上的高斯模糊:

rust
layout(local_size_x = 16, local_size_y = 16) in;
-
-layout(set = 0, binding = 0) uniform InfoParams {
-  ivec2 img_size;
-};
-layout(set = 0, binding = 1) uniform readonly image2D src_pic;
-layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
-
-const float WEIGHT[5] = float[](0.2, 0.1, 0.10, 0.1, 0.1);
-
-void main() {
-  ivec2 uv = ivec2(gl_GlobalInvocationID.xy);
-  if (uv.x > info.x || uv.y > info.y) {
-    return;
-  }
-
-  vec4 temp = imageLoad(src_pic, uv) * WEIGHT[0];
-  ivec2 uvMax: vec2<i32> = img_size - 1;
-  for (int i = 1; i < 5; i += 1) {
-    ivec2 offset_uv = ivec2(1.0, 0) * i;
-    temp += imageLoad(src_pic, clamp(uv + offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
-    temp += imageLoad(src_pic, clamp(uv - offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
-  }
-  imageStore(swap_pic, uv, temp);
-}
WebGL 2.0 并不支持计算着色器,所以上面的 GLSL 计算着色器只能在 Native 端使用。

WGSL 版本的对等实现:

rust
struct InfoParams {
-  img_size: vec2<i32>,
-};
-
-@group(0) @binding(0) var<uniform> params: InfoParams;
-@group(0) @binding(1) var src_pic: texture_2d<f32>;
-@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;
-
-let WEIGHT: array<f32, 5> = array<f32, 5>(0.2, 0.1, 0.10, 0.1, 0.1);
-
-@compute @workgroup_size(16, 16)
-fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
-  let uv = vec2<i32>(global_id.xy);
-  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
-    return;
-  }
-
-  var temp = textureLoad(src_pic, uv, 0) * WEIGHT[0];
-  let uvMax: vec2<i32> = img_size - 1;
-  for (var i: i32 = 1; i <= 4; i += 1) {
-    var uvOffset = vec2<i32>(3, 0) * i;
-    temp += textureLoad(src_pic, clamp(uv + uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
-    temp += textureLoad(src_pic, clamp(uv - uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
-  }
-  textureStore(swap_pic, uv, temp);
-}

你应该注意到了很多差异,比如:

  • 顶点、片元、计算着色器的入口函数(WebGPU 中叫入口点 Entry Point)声明方式差异;
  • 计算着色器工作组(Workgroup)大小的声明方式差异;
  • 许多细节必须硬编码,例如输入和输出的特定位置;
  • 结构体的使用差异;
  • ...

总体上 WGSL 代码要比 GLSL 明晰得多。这是 WGSL 的一大优点,几乎所有内容都具有明确的自说明特性。 下边我们来深入了解一些关键区别。

入口点

WGSL 没有强制使用固定的 main() 函数作为入口点Entry Point),它通过 @vertex@fragment@compute 三个着色器阶段(Shader State)标记提供了足够的灵活性让开发人员能更好的组织着色器代码。你可以给入口点取任意函数名,只要不重名,还能将所有阶段(甚至是不同着色器的同一个阶段)的代码组织同一个文件中:

rust
// 顶点着色器入口点
-@vertex
-fn vs_main() {}
-
-// 片元着色器入口点
-@fragment
-fn fs_main() -> @location(X) vec4f{}
-
-// 计算着色器入口点
-@compute
-fn cs_main() {}

工作组

计算着色器中,一个工作组(Workgroup)就是一组调用,它们同时执行一个计算着色器阶段入口点,并共享对工作组地址空间中着色器变量的访问。可以将工作组理解为一个三维网格,我们通过(x, y, z)三个维度来声明当前计算着色器的工作组大小,每个维度上的默认值都是 1。

WGSL 声明工作组大小的语法相比 GLSL 简洁明了:

rust
// GLSL
-layout(local_size_x = 16, local_size_y = 16) in;
-
-// WGSL
-@workgroup_size(16, 16) // x = 16, y = 16, z = 1
-@workgroup_size(16)     // x = 16, y = 1, z = 1

Group 与 Binding 属性

WGSL 中每个资源都使用了 @group(X)@binding(X) 属性标记,例如 @group(0) @binding(0) var<uniform> params: UniformParams 它表示的是 Uniform buffer 对应于哪个绑定组中的哪个绑定槽(对应于 wgpu API 调用)。这与 GLSL 中的 layout(set = X, binding = X) 布局标记类似。WGSL 的属性非常明晰,描述了着色器阶段到结构的精确二进制布局的所有内容。

变量声明

WGSL 对于基于显式类型的 var 的变量声明有不同的语法。

rust
// GLSL:
-lowp vec4 color;
-// 或者,也可以不使用精度说明符
-vec4 color;
-
-// WGSL:
-var color: vec4f;

WGSL 没有像 lowp 这样的精度说明符, 而是显式指定具体类型,例如 f32(32 位浮点数)。如果要使用 f16 类型,需要在你的 WebGPU 程序中开启 shader-f16 扩展(wgpu 中目前已经加入了此扩展,但是 naga 中还没有完全实现对 f16 的支持)。

WGSL 支持自动类型推断。因此,如果在声明变量的同时进行赋值,就不必指定类型:

rust
// 显式指定变量类型声明
-var color: vec4f = vec4f(1.0, 0.0, 0.0, 1.0);
-
-// 省略类型声明,变量类型将在编译时自动推断得出
-var color = vec4f(1.0, 0.0, 0.0, 1.0);

WGSL 中的 var let 关键字与 Swift 语言一样:

  • var 表示变量可变或可被重新赋值(与 Rust 中的 let mut 一样);
  • let 表示变量不可变,不能重新赋值;

结构体

在 WGSL 中,结构体(struct)用于表示 Unoform 及 Storage 缓冲区以及着色器的输入和输出。Unoform 缓冲区与 GLSL 类似,Storage 缓冲区虽然也在 GLSL 中存在等价物,但是 WebGL 2.0 并不支持。

WGSL 结构体字段对齐规则也与 GLSL 几乎一致,想要了解更多细节,可查看 WGSL 规范中的字节对齐规则示例

rust
// GLSL
-layout(set = 0, binding = 0) uniform UniformParams {
-    mat4 mvp_matrix;
-    vec3 tint_color;
-};
-// ...
-gl_Position = mvp_matrix * vec4(position, 1.0);
-
-
-// WGSL
-struct UniformParams {
-    mvp: mat4x4f,
-	tint_color: vec3f,
-};
-@group(0) @binding(0) var<uniform> params: UniformParams;
-// ...
-out.position = params.mvp * vec4f(pos, 1.0);

注意到上面 Unoform 缓冲区在声明及使用上的两个区别了吗?

  1. WGSL 需要先定义结构体然后才能声明绑定,而 GLSL 可以在声明绑定的同时定义(当然也支持先定义);
  2. WGSL 里需要用声明的变量来访问结构体字段,而 GLSL 里是直接使用结构体中的字段;

WGSL 的输入和输出结构体比较独特,在 GLSL 中没有对应物。入口函数接受输入结构,返回输出结构,并且结构体的所有字段都有 location(X) 属性注释。 如果只有单个输入或输出,那使用结构体就是可选的。

这种明确定义输入和输出的方式,使得 WGSL 的代码逻辑更加清晰,明显优于在 GLSL 中给魔法变量赋值的方式。

下边是一个顶点着色器的输出结构体(同时它也是对应的片元着色器的输入结构体):

rust
struct VertexOutput {
-    @location(0) uv: vec2f,
-    @builtin(position) position: vec4f,
-};
  • @builtin(position) 内建属性标记的字段对应着 GLSL 顶点着色器中的 gl_Position 内建字段。
  • @location(X) 属性标记的字段对应着 GLSL 顶点着色器中的 layout(location = X) out ... 以及片元着色中的 layout(location = X) in ...;

WGSL 不再需要像 GLSL 一样,在顶点着色器中定义完输出字段后,再到片元着色器中定义相应的输入字段。

函数语法

WGSL 函数语法与 Rust 一致, 而 GLSL 是类 C 语法。一个简单的 add 函数如下:

rust
// GLSL
-float add(float a, float b) {
-    return a + b;
-}
-
-// WGSL
-fn add(a: f32, b: f32) -> f32 {
-	return a + b;
-}

纹理

采样纹理

WGSL 中采样纹理总是要指定纹素(Texel)的数据类型 texture_2d<T>texture_3d<T>texture_cube<T>texture_cube_array<T>(T 必须是 f32、i32、u32 这三种类型之一),而 GLSL 中是没有纹素类型信息的,只有查看使用此着色器的程序源码才能知道:

rust
// GLSL
-layout(set = 0, binding = 1) uniform texture2D texture_front;
-
-// WGSL
-@group(0) @binding(1) var texture_front: texture_2d<f32>;

Storage 纹理

WGSL 中存储纹理的数据类型为 texture_storage_XX<T, access>, 而 GLSL 中没有明确的存储纹理类型,如果需要当做存储纹理使用,就需要在 layout(...) 中标记出纹素格式:

rust
// GLSL
-layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
-
-// WGSL
-@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;

在目前的 WebGPU 标准中, 存储纹理的 access 只能为 write(只写), wgpu 能在 native 中支持 read_write(可读可写)。

更多 WGSL 语法细节

三元运算符

GLSL 支持三元运算符 ? : , WGSL 并不直接支持,但提供了内置函数 select(falseValue,trueValue,condition)

rust
// GLSL
-int n = isTrue ? 1 : 0;
-
-// WGSL
-let n: i32 = select(0, 1, isTrue);

花括号

WGSL 中的 if else 语法不能省略大括号(与 Rust 及 Swift 语言一样):

rust
// GLSL
-if (gray > 0.2) n = 65600;
-
-// WGSL
-if (gray > 0.2) { n = 65600; }

求模运算

GLSL 中我们使用 mod 函数做求模运算,WGSL 中有一个长得类似的函数 modf, 但它的功能是将传入参数分割为小数与整数两部分。在 WGSL 中需要使用 % 运算符来求模, 且 mod% 的工作方式还略有不同, mod 内部使用的是 floor (x - y * floor(x / y)), 而 % 内部使用的是 trunc (x - y * trunc(x / y)):

rust
// GLSL
-float n = mod(x, y);
-
-// WGSL
-let n = x % y;

着色器预处理

听到过很多人抱怨 WGSL 不提供预处理器,但其实所有的着色器语言都不自己提供预处理,只是我们可能已经习惯了使用已经封装好预处理逻辑的框架。

其实自己写一个预处理逻辑也是非常简单的事,有两种实现预处理的机制:

  1. 着色器被调用时实时预处理(对运行时性能会产生负影响);
  2. 利用 build.rs 在程序编译阶段预处理,并磁盘上生成预处理后的文件;

这两种实现方式的代码逻辑其实是一样的,仅仅是预处理的时机不同。

下边是一个需要预处理的实现了边缘检测的片元着色器:

rust
///#include "common/group0+vs.wgsl"
-
-///#include "func/edge_detection.wgsl"
-
-@fragment
-fn fs_main(vertex: VertexOutput) -> @location(0) vec4f {
-    let color = textureSample(tex, tex_sampler, vertex.uv);
-    return vec4f(edge_detection(length(color.rgb), 0.125));
-}

///#include 后面的路径分指向的是 commonfunc 目录下已经实现好的通用顶点着色器与边缘检测函数,我们现在按第 2 种机制实现一个简单的预处理来自动将顶点着色器及边缘检测函数包含进来:

rust
const WGSL_FOLDER: &'static str = "../wgsl_preprocessed";
-const INCLUDE: &tatic str = "///#include ";
-
-fn main() -> Result<(), Box<dyn Error>> {
-    // 这一行告诉 cargo 如果 /wgsl/ 目录中的内容发生了变化,就重新运行脚本
-    println!("cargo:rerun-if-changed=/../wgsl/*");
-
-    // 需要预处理的着色器数组(当然,更好的方式是读取并遍历待处理文件夹)
-    let shader_files = vec!["edge_detection"];
-
-    // 创建预处理后着色器的存放目录
-    std::fs::create_dir_all(WGSL_FOLDER)?;
-    for name in shader_files {
-        let _ = regenerate_shader(name);
-    }
-    Ok(())
-}
-
-fn regenerate_shader(shader_name: &str) -> Result<(), Box<dyn Error>> {
-    let base_dir = env!("CARGO_MANIFEST_DIR");
-    let path = PathBuf::from(&base_dir)
-        .join("../wgsl")
-        .join(format!("{}.wgsl", shader_name));
-    let mut out_path = WGSL_FOLDER.to_string();
-    out_path += &format!("/{}.wgsl", shader_name.replace("/", "_"));
-
-    let code = match read_to_string(&path) {
-        Ok(code) => code,
-        Err(e) => {
-            panic!("无法读取 {:?}: {:?}", path, e)
-        }
-    };
-
-    let mut shader_source = String::new();
-    parse_shader_source(&code, &mut shader_source, &base_dir);
-
-    let mut f = std::fs::File::create(&std::path::Path::new(&base_dir).join(&out_path))?;
-    f.write_all(shader_source.as_bytes())?;
-
-    Ok(())
-}
-
-fn parse_shader_source(source: &str, output: &mut String, base_path: &str) {
-    for line in source.lines() {
-        if line.starts_with(INCLUDE) {
-            // 支持一次 include 多个外部着色器文件,文件路径之间用 , 号分割
-            let imports = line[INCLUDE.len()..].split(',');
-            // 遍历所有待导入的文件,递归处理导入的代码里还包括导入的情况
-            for import in imports {
-                if let Some(include) = get_include_code(import, base_path) {
-                    parse_shader_source(&include, output, base_path);
-                } else {
-                    println!("无法找到要导入的着色器文件: {}", import);
-                }
-            }
-        }
-    }
-}
-
-fn get_include_code(key: &str, base_path: &str) -> Option<String> {
-    let path = PathBuf::from(base_path)
-        .join("../wgsl")
-        .join(key.replace('"', ""));
-    let shader = match read_to_string(&path) {
-        Ok(code) => code,
-        Err(e) => panic!("无法读取 {:?}: {:?}", path, e),
-    };
-    Some(shader)
-}

上面的几十行代码就是一套完整的预处理逻辑,它在每次程序编译时自动检查 wgsl/ 目录下的待处理着色器有没有发生变化,如果有变化,就重新处理并在 wgsl_preprocessed/ 目录下写入一个同名的处理后的着色器。

- +
Skip to content

WGSL 着色器语言

WGSL 的来由

WebGPU 的目标是要在各个现代底层图形 API 之上抽象出一套统一的图形 API,而每个底层图形 API 后端都有自己的着色语言:

  • DirectX 使用 HLSL(High Level Shading Language)
  • Metal 使用 MSL(Metal Shading Language)
  • OpenGL 使用 GLSL(OpenGL Shading Language)
  • Vulkan 使用的着色语言又跟之前的图形 API 都不同,它的着色器必须以 SPIR-V 这种二进制字节码的格式提供(有一些库能提供将其它语言编写的着色器编译为 SPIR-V 的能力,比如 shaderc )。

WGSL (WebGPU Shading Language) 出现之前,很多开发者或团队是通过宏及各种转译工具来将自己的着色器编译到不同目标平台的,他们自然是希望有一个标准化的统一语言。

WebGPU 成员花了 2 年半的时间来争论 WebGPU 是否应该有自己的着色语言。kvark 将这场争论中的核心论点组成了一张流图,它是 SVG 格式的,支持在网页中无损放大查看。

WGSL 的目标不是要与 GLSL 兼容,它是对现代着色器语言的重新设计。

2020 年 4 月 27 日,WGSL 标准有了第一次提交。自此开始,wgpu 和 dawn 都摆脱了对 shaderc 之类复杂繁重的着色器转译工具的依赖。wgpu 里使用的 WGSL 转译工具叫 naga, kvark 有一篇博客(Shader translation benchmark)对比了 naga 相比于其它转译工具的性能优化,总体来说,有 10 倍以上的性能优势。

2023 年之前,WGSL 的学习资源不多,唯一好的参考是 WGSL 规范,但它是对语言实现细节的规范,对普通用户来说有点难以理解。 我从 2018 年开始使用 wgpu (那时还是 使用 GLSL 做为着色器语言),2021 年底完成了个人作品 字习 Pro 及其他几个练手作品从 GLSL 到 WGSL 的 100 多个着色器的移植工作,在这个过程中对这两个着色器语言有了比较深入的了解。这个增补章节旨在介绍 WGSL 的一些基础知识,希望这对从 OpenGL / WebGL 迁移到 WebGPU 的朋友带来一点有益的经验(下边的所有 GLSL 代码均是按照 GLSL450 标准编写的)。

增补两个网上新出现的学习资源:

Tour of WGSLcompute.toys

一个简单的绘制着色器:对比 GLSL

GLSL 的绘制着色器:

rust
// 顶点着色器文件
+layout(location = 0) in vec3 position;
+layout(location = 1) in vec2 texcoord;
+layout(location = 0) out vec2 uv;
+
+layout(set = 0, binding = 0) uniform UniformParams {
+    mat4 mvp_matrix;
+    vec3 tint_color;
+};
+
+void main() {
+    gl_Position = mvp_matrix * vec4(position, 1.0);
+    uv = texcoord;
+}
+
+// 片元着色器文件
+layout(location = 0) in vec2 uv;
+layout(location = 0) out vec4 frag_color;
+
+layout(set = 0, binding = 0) uniform UniformParams {
+    mat4 mvp_matrix;
+    vec3 tint_color;
+};
+layout(set = 0, binding = 1) uniform texture2D textureFront;
+layout(set = 0, binding = 2) uniform sampler samplerFront;
+
+void main(void) {
+  vec4 front = texture(sampler2D(textureFront, samplerFront), uv);
+  frag_color = front * vec4(tint_color.rgb, 1.0);;
+}

下边是使用 WGSL 的等价实现,在 WGSL 中,我们通常将顶点着色器与片元着色器写在同一个文件中:

rust
struct VertexOutput {
+    @location(0) uv: vec2f,
+    @builtin(position) position: vec4f,
+};
+
+struct UniformParams {
+    mvp: mat4x4f,
+	tint_color: vec3f,
+};
+
+@group(0) @binding(0) var<uniform> params: UniformParams;
+
+@vertex
+fn vs_main(@location(0) pos: vec3f, @location(1) uv: vec2f) -> VertexOutput {
+    var out: VertexOutput;
+    out.position = params.mvp * vec4f(pos, 1.0);
+    out.uv = uv;
+    return out;
+}
+
+@group(0) @binding(1) var texture_front: texture_2d<f32>;
+@group(0) @binding(2) var sampler_front: sampler;
+
+@fragment
+fn fs_main(input: VertexOutput) -> @location(0) vec4f {
+    let front = textureSample(texture_front, sampler_front, input.uv);
+    return front * vec4f(params.tintColor, 1.0);
+}

计算着色器:继续对比 GLSL

GLSL 的计算着色器, 实现在 x 轴上的高斯模糊:

rust
layout(local_size_x = 16, local_size_y = 16) in;
+
+layout(set = 0, binding = 0) uniform InfoParams {
+  ivec2 img_size;
+};
+layout(set = 0, binding = 1) uniform readonly image2D src_pic;
+layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
+
+const float WEIGHT[5] = float[](0.2, 0.1, 0.10, 0.1, 0.1);
+
+void main() {
+  ivec2 uv = ivec2(gl_GlobalInvocationID.xy);
+  if (uv.x > info.x || uv.y > info.y) {
+    return;
+  }
+
+  vec4 temp = imageLoad(src_pic, uv) * WEIGHT[0];
+  ivec2 uvMax: vec2<i32> = img_size - 1;
+  for (int i = 1; i < 5; i += 1) {
+    ivec2 offset_uv = ivec2(1.0, 0) * i;
+    temp += imageLoad(src_pic, clamp(uv + offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
+    temp += imageLoad(src_pic, clamp(uv - offset_uv, ivec2(0), uvMax)) * WEIGHT[i];
+  }
+  imageStore(swap_pic, uv, temp);
+}
WebGL 2.0 并不支持计算着色器,所以上面的 GLSL 计算着色器只能在 Native 端使用。

WGSL 版本的对等实现:

rust
struct InfoParams {
+  img_size: vec2<i32>,
+};
+
+@group(0) @binding(0) var<uniform> params: InfoParams;
+@group(0) @binding(1) var src_pic: texture_2d<f32>;
+@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;
+
+let WEIGHT: array<f32, 5> = array<f32, 5>(0.2, 0.1, 0.10, 0.1, 0.1);
+
+@compute @workgroup_size(16, 16)
+fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
+  let uv = vec2<i32>(global_id.xy);
+  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
+    return;
+  }
+
+  var temp = textureLoad(src_pic, uv, 0) * WEIGHT[0];
+  let uvMax: vec2<i32> = img_size - 1;
+  for (var i: i32 = 1; i <= 4; i += 1) {
+    var uvOffset = vec2<i32>(3, 0) * i;
+    temp += textureLoad(src_pic, clamp(uv + uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
+    temp += textureLoad(src_pic, clamp(uv - uvOffset, vec2<i32>(0), uvMax), 0) * WEIGHT[i];
+  }
+  textureStore(swap_pic, uv, temp);
+}

你应该注意到了很多差异,比如:

  • 顶点、片元、计算着色器的入口函数(WebGPU 中叫入口点 Entry Point)声明方式差异;
  • 计算着色器工作组(Workgroup)大小的声明方式差异;
  • 许多细节必须硬编码,例如输入和输出的特定位置;
  • 结构体的使用差异;
  • ...

总体上 WGSL 代码要比 GLSL 明晰得多。这是 WGSL 的一大优点,几乎所有内容都具有明确的自说明特性。 下边我们来深入了解一些关键区别。

入口点

WGSL 没有强制使用固定的 main() 函数作为入口点Entry Point),它通过 @vertex@fragment@compute 三个着色器阶段(Shader State)标记提供了足够的灵活性让开发人员能更好的组织着色器代码。你可以给入口点取任意函数名,只要不重名,还能将所有阶段(甚至是不同着色器的同一个阶段)的代码组织同一个文件中:

rust
// 顶点着色器入口点
+@vertex
+fn vs_main() {}
+
+// 片元着色器入口点
+@fragment
+fn fs_main() -> @location(X) vec4f{}
+
+// 计算着色器入口点
+@compute
+fn cs_main() {}

工作组

计算着色器中,一个工作组(Workgroup)就是一组调用,它们同时执行一个计算着色器阶段入口点,并共享对工作组地址空间中着色器变量的访问。可以将工作组理解为一个三维网格,我们通过(x, y, z)三个维度来声明当前计算着色器的工作组大小,每个维度上的默认值都是 1。

WGSL 声明工作组大小的语法相比 GLSL 简洁明了:

rust
// GLSL
+layout(local_size_x = 16, local_size_y = 16) in;
+
+// WGSL
+@workgroup_size(16, 16) // x = 16, y = 16, z = 1
+@workgroup_size(16)     // x = 16, y = 1, z = 1

Group 与 Binding 属性

WGSL 中每个资源都使用了 @group(X)@binding(X) 属性标记,例如 @group(0) @binding(0) var<uniform> params: UniformParams 它表示的是 Uniform buffer 对应于哪个绑定组中的哪个绑定槽(对应于 wgpu API 调用)。这与 GLSL 中的 layout(set = X, binding = X) 布局标记类似。WGSL 的属性非常明晰,描述了着色器阶段到结构的精确二进制布局的所有内容。

变量声明

WGSL 对于基于显式类型的 var 的变量声明有不同的语法。

rust
// GLSL:
+lowp vec4 color;
+// 或者,也可以不使用精度说明符
+vec4 color;
+
+// WGSL:
+var color: vec4f;

WGSL 没有像 lowp 这样的精度说明符, 而是显式指定具体类型,例如 f32(32 位浮点数)。如果要使用 f16 类型,需要在你的 WebGPU 程序中开启 shader-f16 扩展(wgpu 中目前已经加入了此扩展,但是 naga 中还没有完全实现对 f16 的支持)。

WGSL 支持自动类型推断。因此,如果在声明变量的同时进行赋值,就不必指定类型:

rust
// 显式指定变量类型声明
+var color: vec4f = vec4f(1.0, 0.0, 0.0, 1.0);
+
+// 省略类型声明,变量类型将在编译时自动推断得出
+var color = vec4f(1.0, 0.0, 0.0, 1.0);

WGSL 中的 var let 关键字与 Swift 语言一样:

  • var 表示变量可变或可被重新赋值(与 Rust 中的 let mut 一样);
  • let 表示变量不可变,不能重新赋值;

结构体

在 WGSL 中,结构体(struct)用于表示 Unoform 及 Storage 缓冲区以及着色器的输入和输出。Unoform 缓冲区与 GLSL 类似,Storage 缓冲区虽然也在 GLSL 中存在等价物,但是 WebGL 2.0 并不支持。

WGSL 结构体字段对齐规则也与 GLSL 几乎一致,想要了解更多细节,可查看 WGSL 规范中的字节对齐规则示例

rust
// GLSL
+layout(set = 0, binding = 0) uniform UniformParams {
+    mat4 mvp_matrix;
+    vec3 tint_color;
+};
+// ...
+gl_Position = mvp_matrix * vec4(position, 1.0);
+
+
+// WGSL
+struct UniformParams {
+    mvp: mat4x4f,
+	tint_color: vec3f,
+};
+@group(0) @binding(0) var<uniform> params: UniformParams;
+// ...
+out.position = params.mvp * vec4f(pos, 1.0);

注意到上面 Unoform 缓冲区在声明及使用上的两个区别了吗?

  1. WGSL 需要先定义结构体然后才能声明绑定,而 GLSL 可以在声明绑定的同时定义(当然也支持先定义);
  2. WGSL 里需要用声明的变量来访问结构体字段,而 GLSL 里是直接使用结构体中的字段;

WGSL 的输入和输出结构体比较独特,在 GLSL 中没有对应物。入口函数接受输入结构,返回输出结构,并且结构体的所有字段都有 location(X) 属性注释。 如果只有单个输入或输出,那使用结构体就是可选的。

这种明确定义输入和输出的方式,使得 WGSL 的代码逻辑更加清晰,明显优于在 GLSL 中给魔法变量赋值的方式。

下边是一个顶点着色器的输出结构体(同时它也是对应的片元着色器的输入结构体):

rust
struct VertexOutput {
+    @location(0) uv: vec2f,
+    @builtin(position) position: vec4f,
+};
  • @builtin(position) 内建属性标记的字段对应着 GLSL 顶点着色器中的 gl_Position 内建字段。
  • @location(X) 属性标记的字段对应着 GLSL 顶点着色器中的 layout(location = X) out ... 以及片元着色中的 layout(location = X) in ...;

WGSL 不再需要像 GLSL 一样,在顶点着色器中定义完输出字段后,再到片元着色器中定义相应的输入字段。

函数语法

WGSL 函数语法与 Rust 一致, 而 GLSL 是类 C 语法。一个简单的 add 函数如下:

rust
// GLSL
+float add(float a, float b) {
+    return a + b;
+}
+
+// WGSL
+fn add(a: f32, b: f32) -> f32 {
+	return a + b;
+}

纹理

采样纹理

WGSL 中采样纹理总是要指定纹素(Texel)的数据类型 texture_2d<T>texture_3d<T>texture_cube<T>texture_cube_array<T>(T 必须是 f32、i32、u32 这三种类型之一),而 GLSL 中是没有纹素类型信息的,只有查看使用此着色器的程序源码才能知道:

rust
// GLSL
+layout(set = 0, binding = 1) uniform texture2D texture_front;
+
+// WGSL
+@group(0) @binding(1) var texture_front: texture_2d<f32>;

Storage 纹理

WGSL 中存储纹理的数据类型为 texture_storage_XX<T, access>, 而 GLSL 中没有明确的存储纹理类型,如果需要当做存储纹理使用,就需要在 layout(...) 中标记出纹素格式:

rust
// GLSL
+layout(set = 0, binding = 2, rgba32f) uniform image2D swap_pic;
+
+// WGSL
+@group(0) @binding(2) var swap_pic: texture_storage_2d<rgba32float, write>;

在目前的 WebGPU 标准中, 存储纹理的 access 只能为 write(只写), wgpu 能在 native 中支持 read_write(可读可写)。

更多 WGSL 语法细节

三元运算符

GLSL 支持三元运算符 ? : , WGSL 并不直接支持,但提供了内置函数 select(falseValue,trueValue,condition)

rust
// GLSL
+int n = isTrue ? 1 : 0;
+
+// WGSL
+let n: i32 = select(0, 1, isTrue);

花括号

WGSL 中的 if else 语法不能省略大括号(与 Rust 及 Swift 语言一样):

rust
// GLSL
+if (gray > 0.2) n = 65600;
+
+// WGSL
+if (gray > 0.2) { n = 65600; }

求模运算

GLSL 中我们使用 mod 函数做求模运算,WGSL 中有一个长得类似的函数 modf, 但它的功能是将传入参数分割为小数与整数两部分。在 WGSL 中需要使用 % 运算符来求模, 且 mod% 的工作方式还略有不同, mod 内部使用的是 floor (x - y * floor(x / y)), 而 % 内部使用的是 trunc (x - y * trunc(x / y)):

rust
// GLSL
+float n = mod(x, y);
+
+// WGSL
+let n = x % y;

着色器预处理

听到过很多人抱怨 WGSL 不提供预处理器,但其实所有的着色器语言都不自己提供预处理,只是我们可能已经习惯了使用已经封装好预处理逻辑的框架。

其实自己写一个预处理逻辑也是非常简单的事,有两种实现预处理的机制:

  1. 着色器被调用时实时预处理(对运行时性能会产生负影响);
  2. 利用 build.rs 在程序编译阶段预处理,并磁盘上生成预处理后的文件;

这两种实现方式的代码逻辑其实是一样的,仅仅是预处理的时机不同。

下边是一个需要预处理的实现了边缘检测的片元着色器:

rust
///#include "common/group0+vs.wgsl"
+
+///#include "func/edge_detection.wgsl"
+
+@fragment
+fn fs_main(vertex: VertexOutput) -> @location(0) vec4f {
+    let color = textureSample(tex, tex_sampler, vertex.uv);
+    return vec4f(edge_detection(length(color.rgb), 0.125));
+}

///#include 后面的路径分指向的是 commonfunc 目录下已经实现好的通用顶点着色器与边缘检测函数,我们现在按第 2 种机制实现一个简单的预处理来自动将顶点着色器及边缘检测函数包含进来:

rust
const WGSL_FOLDER: &'static str = "../wgsl_preprocessed";
+const INCLUDE: &tatic str = "///#include ";
+
+fn main() -> Result<(), Box<dyn Error>> {
+    // 这一行告诉 cargo 如果 /wgsl/ 目录中的内容发生了变化,就重新运行脚本
+    println!("cargo:rerun-if-changed=/../wgsl/*");
+
+    // 需要预处理的着色器数组(当然,更好的方式是读取并遍历待处理文件夹)
+    let shader_files = vec!["edge_detection"];
+
+    // 创建预处理后着色器的存放目录
+    std::fs::create_dir_all(WGSL_FOLDER)?;
+    for name in shader_files {
+        let _ = regenerate_shader(name);
+    }
+    Ok(())
+}
+
+fn regenerate_shader(shader_name: &str) -> Result<(), Box<dyn Error>> {
+    let base_dir = env!("CARGO_MANIFEST_DIR");
+    let path = PathBuf::from(&base_dir)
+        .join("../wgsl")
+        .join(format!("{}.wgsl", shader_name));
+    let mut out_path = WGSL_FOLDER.to_string();
+    out_path += &format!("/{}.wgsl", shader_name.replace("/", "_"));
+
+    let code = match read_to_string(&path) {
+        Ok(code) => code,
+        Err(e) => {
+            panic!("无法读取 {:?}: {:?}", path, e)
+        }
+    };
+
+    let mut shader_source = String::new();
+    parse_shader_source(&code, &mut shader_source, &base_dir);
+
+    let mut f = std::fs::File::create(&std::path::Path::new(&base_dir).join(&out_path))?;
+    f.write_all(shader_source.as_bytes())?;
+
+    Ok(())
+}
+
+fn parse_shader_source(source: &str, output: &mut String, base_path: &str) {
+    for line in source.lines() {
+        if line.starts_with(INCLUDE) {
+            // 支持一次 include 多个外部着色器文件,文件路径之间用 , 号分割
+            let imports = line[INCLUDE.len()..].split(',');
+            // 遍历所有待导入的文件,递归处理导入的代码里还包括导入的情况
+            for import in imports {
+                if let Some(include) = get_include_code(import, base_path) {
+                    parse_shader_source(&include, output, base_path);
+                } else {
+                    println!("无法找到要导入的着色器文件: {}", import);
+                }
+            }
+        }
+    }
+}
+
+fn get_include_code(key: &str, base_path: &str) -> Option<String> {
+    let path = PathBuf::from(base_path)
+        .join("../wgsl")
+        .join(key.replace('"', ""));
+    let shader = match read_to_string(&path) {
+        Ok(code) => code,
+        Err(e) => panic!("无法读取 {:?}: {:?}", path, e),
+    };
+    Some(shader)
+}

上面的几十行代码就是一套完整的预处理逻辑,它在每次程序编译时自动检查 wgsl/ 目录下的待处理着色器有没有发生变化,如果有变化,就重新处理并在 wgsl_preprocessed/ 目录下写入一个同名的处理后的着色器。

+ \ No newline at end of file diff --git a/hashmap.json b/hashmap.json index 9e2873c2d..ecde84316 100644 --- a/hashmap.json +++ b/hashmap.json @@ -1 +1 @@ -{"glossary_of_terms.md":"239b8b10","integration-and-debugging_index.md":"531e86b4","intermediate_compute-pipeline_index.md":"96e3a5dc","index.md":"e2543842","beginner_tutorial6-uniforms_index.md":"1fa0ff41","beginner_tutorial8-depth_index.md":"3d09ac63","integration-and-debugging_ios_index.md":"b6b47109","integration-and-debugging_snapdragon-profiler_index.md":"13fbbd81","intermediate_tutorial10-lighting_index.md":"bf469bcb","integration-and-debugging_xcode_index.md":"8dd60914","beginner_tutorial5-textures_index.md":"dd7befe7","beginner_tutorial1-window.md":"16634366","beginner_tutorial2-surface_index.md":"74c14ab6","beginner_tutorial3-pipeline_index.md":"de2ce2ed","intermediate_tutorial11-normals_index.md":"0360ba19","integration-and-debugging_bevy_index.md":"7becd25a","showcase_gifs_index.md":"af0d950b","intermediate_tutorial12-camera_index.md":"bb97ed34","beginner_tutorial7-instancing_index.md":"442b964d","intermediate_tutorial13-terrain_index.md":"cf92cfdc","intermediate_vertex-animation_index.md":"45cd4c66","beginner_wgsl.md":"a349b4fd","integration-and-debugging_android_index.md":"c7a02422","showcase_compute_index.md":"f0a051d7","beginner_tutorial9-models_index.md":"cc4e4508","intermediate_vertex-animation_universal-animation-formula.md":"e078e665","showcase_alignment.md":"7bc0b30c","showcase_threading.md":"a67a1647","showcase_windowless_index.md":"ad5627a0","intermediate_pbr-notes.md":"fc667d08","simuverse.md":"54864002","showcase_pong_index.md":"1eac932a","beginner_tutorial4-buffer_index.md":"41a21df0"} +{"glossary_of_terms.md":"UkTnpW7D","showcase_gifs_index.md":"7LiwhyX_","integration-and-debugging_android_index.md":"t6gE4WOH","beginner_tutorial1-window.md":"Phs9gXri","showcase_windowless_index.md":"La71QnH2","beginner_tutorial4-buffer_index.md":"Afffgpgg","integration-and-debugging_bevy_index.md":"kfueqURQ","beginner_tutorial7-instancing_index.md":"VCbHZtRg","showcase_pong_index.md":"oLV3LpsH","integration-and-debugging_index.md":"AdQIXBZG","intermediate_vertex-animation_universal-animation-formula.md":"nYbvkNKu","beginner_tutorial3-pipeline_index.md":"QN_XsHwE","showcase_threading.md":"yFNXGQkQ","intermediate_tutorial12-camera_index.md":"A0Ys8PDv","intermediate_compute-pipeline_index.md":"I_Ricl1V","intermediate_tutorial11-normals_index.md":"JKRpqUo4","integration-and-debugging_snapdragon-profiler_index.md":"UnTr8zla","intermediate_tutorial10-lighting_index.md":"sSfFtIbh","beginner_tutorial8-depth_index.md":"0c_yzyfM","intermediate_pbr-notes.md":"TZH05UCG","integration-and-debugging_ios_index.md":"qvFn1Gez","beginner_tutorial2-surface_index.md":"YCJw9OGa","simuverse.md":"A_tJLgNI","index.md":"UCsFBABa","showcase_alignment.md":"WbW77ARR","showcase_compute_index.md":"toRpHUeP","intermediate_tutorial13-terrain_index.md":"hp3fmBWu","intermediate_vertex-animation_index.md":"19WOMCj-","beginner_tutorial6-uniforms_index.md":"_uEvsrye","integration-and-debugging_xcode_index.md":"jb-X6rcJ","beginner_tutorial9-models_index.md":"gSp6U_hw","beginner_tutorial5-textures_index.md":"DkmoMANr","beginner_wgsl.md":"44PkYt5K"} diff --git a/index.html b/index.html index 9f8d5d289..278f17f28 100644 --- a/index.html +++ b/index.html @@ -5,29 +5,31 @@ 介绍 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

介绍

为了便于读者的理解,译者选择性的添加了一些内容,并对原文中有歧义或错误的地方进行重新表述。所有的添加与修改均不会做单独标记。

翻译时采用了第一人称视角,故,除了带 🆕 标记的章节,教程中的主要指的是原作者 @sotrh

另外,专有名词在一个段落中第一次出现时做了加粗处理,同一段落里反复出现时就不再加粗。

WebGPU 是啥?

WebGPU 是由 W3C GPU for the Web 社区组所发布的规范,目标是允许网页代码以高性能且安全可靠的方式访问 GPU 功能。它通过借鉴 Vulkan API,并将其转换为宿主硬件上使用的各式 API(如 DirectX、Metal、Vulkan)来实现这一目标。

wgpu 又是啥?

wgpu 是基于 WebGPU API 规范的、跨平台的、安全的、纯 Rust 图形 API。它是 Firefox、Servo 和 Deno 中 WebGPU 整合的核心。

wgpu 不仅可以在 Web 环境运行,还可以在 macOS / iOS、Android、Window 和 Linux 等系统上原生运行。

为什么要使用 Rust?

wgpu 实际上提供了 C 语言绑定 (wgpu-native),你可以写 C/C++ 或其他能与 C 互通的语言来使用它。尽管如此,wgpu 本身是用 Rust 实现的,它便利的 Rust 绑定能减少你使用中的阻碍。更重要的是,Rust 是一门高性能,内存和线程安全且极具生产力的现代底层语言。

在学习本教程之前你需要先熟悉 Rust,因为这里不会详细介绍 Rust 的语法知识。如果对 Rust 还不太熟悉,可以回顾一下 Rust 教程Rust 语言圣经。另外还需要熟悉 Rust 包管理工具 Cargo

为什么要学习 wgpu,直接用 JS/TS 搞 WebGPU 开发不香吗?

从 wgpu 及 dawn 这两个主要的 WebGPU 标准的实现库的开发动向可以看出,大量的扩展特性目前只有在 Native 端(Windows、macOS、Linux、iOS、Android)原生运行才能支持。wgpu 更是将 Native 端运行做为首要目标,WebGPU 是做为最低支持的特性集而存在。

使用 wgpu 在桌面及移动端做跨平台原生应用开发的体验极好,甚至我偏向于认为:WebGPU 更容易在 Native 端得到普及。因为不用受限于 1.0 标准啥时候发布,用户的浏览器是否支持等问题,现在就可以发布采用了 wgpu 的商业应用。

学习 wgpu 还有另一个重要的优势,那就是可以利用各种强大的桌面端 GPU 调试工具。在开发大型 2D/3D 应用时,通过使用命令记录/回放、帧捕捉、Buffer 视图等功能,可以快速定位 GPU 层代码/数据的性能瓶颈和程序缺陷。相较于仅依靠浏览器提供的有限调试能力,这些工具能够事半功倍,帮助开发者更快地解决问题。

wgpu/WebGPU 的学习资料是不是很少?

其实不用纠结于 WebGPU 方面的直接学习资料的多少。

WebGPU 就是一套图形接口,绝大部分概念都是各图形接口里通用的,任何一本经典图形学书籍都是我们的学习资料。 要利用好这些经典资料,前提仅仅就是要先学习一套图形接口。因为图形学的书不是使用统一的特定图形接口所写,先学会一个图形接口及常见的概念,然后再去深入学习某个方面的资料就会事半功倍。

现在学习 wgpu 是不是为时尚早?

WebGPU 1.0 API 已经稳定,Google 已经在 2023/4/6 宣布从 Chrome 113 版本开始正式支持 WebGPU

WebGPU Spec 1.0 补充一下 @Kangz 的话: Web 规范有点滑稽,因为“草案”或“推荐”之类的名称在很大程度上是一个管理细节,实际上对规范是否稳定可用没有任何影响。事实上,W3C 程序建议至少有两个浏览器在规范通过“草案”之前已经发布了兼容的实现,但显然这些浏览器会认为规范相当稳定,然后才愿意发布实现。然而,这确实令开发人员感到困惑,我们对此深表歉意。

如何运行示例代码

本教程的示例代码大部分放在 code/ 目录下,且示例程序的名称与程序目录同名。 比如,第一章 依赖与窗口 所有在的目录是 code/beginner/tutorial1-window, 此示例程序的名称也叫 tutorial1-window:

sh
# 在桌面环境本地运行
-cargo run --bin tutorial3-pipeline
+    
Skip to content

介绍

为了便于读者的理解,译者选择性的添加了一些内容,并对原文中有歧义或错误的地方进行重新表述。所有的添加与修改均不会做单独标记。

翻译时采用了第一人称视角,故,除了带 🆕 标记的章节,教程中的主要指的是原作者 @sotrh

另外,专有名词在一个段落中第一次出现时做了加粗处理,同一段落里反复出现时就不再加粗。

WebGPU 是啥?

WebGPU 是由 W3C GPU for the Web 社区组所发布的规范,目标是允许网页代码以高性能且安全可靠的方式访问 GPU 功能。它通过借鉴 Vulkan API,并将其转换为宿主硬件上使用的各式 API(如 DirectX、Metal、Vulkan)来实现这一目标。

wgpu 又是啥?

wgpu 是基于 WebGPU API 规范的、跨平台的、安全的、纯 Rust 图形 API。它是 Firefox、Servo 和 Deno 中 WebGPU 整合的核心。

wgpu 不仅可以在 Web 环境运行,还可以在 macOS / iOS、Android、Window 和 Linux 等系统上原生运行。

为什么要使用 Rust?

wgpu 实际上提供了 C 语言绑定 (wgpu-native),你可以写 C/C++ 或其他能与 C 互通的语言来使用它。尽管如此,wgpu 本身是用 Rust 实现的,它便利的 Rust 绑定能减少你使用中的阻碍。更重要的是,Rust 是一门高性能,内存和线程安全且极具生产力的现代底层语言。

在学习本教程之前你需要先熟悉 Rust,因为这里不会详细介绍 Rust 的语法知识。如果对 Rust 还不太熟悉,可以回顾一下 Rust 教程Rust 语言圣经。另外还需要熟悉 Rust 包管理工具 Cargo

为什么要学习 wgpu,直接用 JS/TS 搞 WebGPU 开发不香吗?

从 wgpu 及 dawn 这两个主要的 WebGPU 标准的实现库的开发动向可以看出,大量的扩展特性目前只有在 Native 端(Windows、macOS、Linux、iOS、Android)原生运行才能支持。wgpu 更是将 Native 端运行做为首要目标,WebGPU 是做为最低支持的特性集而存在。

使用 wgpu 在桌面及移动端做跨平台原生应用开发的体验极好,甚至我偏向于认为:WebGPU 更容易在 Native 端得到普及。因为不用受限于 1.0 标准啥时候发布,用户的浏览器是否支持等问题,现在就可以发布采用了 wgpu 的商业应用。

学习 wgpu 还有另一个重要的优势,那就是可以利用各种强大的桌面端 GPU 调试工具。在开发大型 2D/3D 应用时,通过使用命令记录/回放、帧捕捉、Buffer 视图等功能,可以快速定位 GPU 层代码/数据的性能瓶颈和程序缺陷。相较于仅依靠浏览器提供的有限调试能力,这些工具能够事半功倍,帮助开发者更快地解决问题。

wgpu/WebGPU 的学习资料是不是很少?

其实不用纠结于 WebGPU 方面的直接学习资料的多少。

WebGPU 就是一套图形接口,绝大部分概念都是各图形接口里通用的,任何一本经典图形学书籍都是我们的学习资料。 要利用好这些经典资料,前提仅仅就是要先学习一套图形接口。因为图形学的书不是使用统一的特定图形接口所写,先学会一个图形接口及常见的概念,然后再去深入学习某个方面的资料就会事半功倍。

现在学习 wgpu 是不是为时尚早?

WebGPU 1.0 API 已经稳定,Google 已经在 2023/4/6 宣布从 Chrome 113 版本开始正式支持 WebGPU

WebGPU Spec 1.0 补充一下 @Kangz 的话: Web 规范有点滑稽,因为“草案”或“推荐”之类的名称在很大程度上是一个管理细节,实际上对规范是否稳定可用没有任何影响。事实上,W3C 程序建议至少有两个浏览器在规范通过“草案”之前已经发布了兼容的实现,但显然这些浏览器会认为规范相当稳定,然后才愿意发布实现。然而,这确实令开发人员感到困惑,我们对此深表歉意。

如何运行示例代码

本教程的示例代码大部分放在 code/ 目录下,且示例程序的名称与程序目录同名。 比如,第一章 依赖与窗口 所有在的目录是 code/beginner/tutorial1-window, 此示例程序的名称也叫 tutorial1-window:

sh
# 在桌面环境本地运行
+cargo run --bin tutorial3-pipeline
 
-# 在浏览器中运行
-# 需要先安装 Rust WebAssembly target
-rustup target add wasm32-unknown-unknown
-# 使用 WebGPU(需要使用 Chrome/Edge 113+ 或 Chrome/Edge Canary,Canary 需手动开启 WebGPU 试验功能)
-# compute-pipeline, vertex-animation 及 hilbert-curve 示例只能在桌面端与浏览器端 WebGPU 环境运行
-cargo run-wasm --bin vertex-animation
-# 使用 WebGL 2.0
-cargo run-wasm --bin tutorial2-surface --features webgl

调试与集成 部分的代码是 2 个独立的项目: wgpu-in-appbevy-in-app

simuverse 是基于 wgpu + egui 的扩展示例,提供了粒子矢量场,流体场及 GPU 程序化纹理的实现。

如何开启浏览器 WebGPU 试验功能

FireFox

安装 Nightly 版本,在地址栏中输入 about:config , 将 dom.webgpu.enabled 设置为 true: FireFox Nightly

Chrome

Chrome 113+ 已经默认开启了 WebGPU 支持。如果安装的是 Canary 版,在地址栏中输入 chrome://flags , 将 Unsafe WebGPU 设置为 Enabled: Chrome Canary

Microsoft Edge

Microsoft Edge 113+ 也已默认开启了 WebGPU 支持。如果安装的是 Canary 版,在地址栏中输入 edge://flags , 将 Unsafe WebGPU Support 设置为 Enabled: Edge Canary

关于译者

我是一名移动端架构师,有多年使用 OpenGL ES / WebGL, Metal 的实践经验。2018 年开始接触 WebGPU,目前正积极地参与到 wgpu 开源项目的开发与完善之中,并且已于两年前在 AppStore 上架了基于 wgpu 实现的毛笔书法模拟 App 字习 Pro

加入 wgpu 微信学习交流群

- +# 在浏览器中运行 +# 需要先安装 Rust WebAssembly target +rustup target add wasm32-unknown-unknown +# 使用 WebGPU(需要使用 Chrome/Edge 113+ 或 Chrome/Edge Canary,Canary 需手动开启 WebGPU 试验功能) +# compute-pipeline, vertex-animation 及 hilbert-curve 示例只能在桌面端与浏览器端 WebGPU 环境运行 +cargo run-wasm --bin vertex-animation +# 使用 WebGL 2.0 +cargo run-wasm --bin tutorial2-surface --features webgl

调试与集成 部分的代码是 2 个独立的项目: wgpu-in-appbevy-in-app

simuverse 是基于 wgpu + egui 的扩展示例,提供了粒子矢量场,流体场及 GPU 程序化纹理的实现。

如何开启浏览器 WebGPU 功能支持

Chrome

Chrome 113+、Microsoft Edge 113+ 均已经默认开启了 WebGPU 支持。

FireFox

安装 Nightly 版本,在地址栏中输入 about:config , 将 dom.webgpu.enabled 设置为 true: FireFox Nightly

Safari

安装 Safari Technology Preview 185+,从顶部菜单栏选择 开发 -> 功能标志 , 搜索并勾选 WebGPU: Edge Canary

关于译者

我是一名移动端架构师,有多年使用 OpenGL / WebGL, Metal 的实践经验。2018 年开始接触 WebGPU,目前正积极地参与到 wgpu 开源项目的开发与完善之中,并且已于两年前在 AppStore 上架了基于 wgpu 实现的毛笔书法模拟 App 字习 Pro

加入 wgpu 微信学习交流群

+ \ No newline at end of file diff --git a/integration-and-debugging/android/index.html b/integration-and-debugging/android/index.html index 0e9f42d15..ea6937c7b 100644 --- a/integration-and-debugging/android/index.html +++ b/integration-and-debugging/android/index.html @@ -5,227 +5,229 @@ 与 Android App 集成 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

与 Android App 集成

开发环境配置

假设你的电脑上已经安装了 Android Studio,从菜单栏打开 SDK 管理器(Tools > SDK Manager > Android SDK > SDK Tools),勾选以下 3 个选项后点击 OK 按钮确认:

  • Android SDK Build-Tools
  • Android SDK Command-line Tools
  • NDK(Side by side)

然后,设置如下两个系统环境变量:

sh
export ANDROID_SDK_ROOT=$HOME/Library/Android/sdk
-# 注意,此处需要替换为你电脑上安装的 NDK 的版本号
-export NDK_HOME=$ANDROID_SDK_ROOT/ndk/23.1.7779620

添加安卓构建目标支持

到目前为止,Android 模拟器和虚拟设备还不支持 Vulkan 图形 API(仅支持 OpenGL ES),所以开发或调试 wgpu 程序在 Android 系统上的运行时,建议使用真机(各种云测平台的云真机也行)。

如果需要支持模拟器运行,还得加上 x86_64-linux-androidi686-linux-android 这两个构建目标的支持。需要注意的是,如果指定了 wgpu 项目使用 Vulkan 图形后端(Instance::new(wgpu::Backends::VULKAN)),则在模拟内运行时会崩溃:

sh
rustup target add aarch64-linux-android armv7-linux-androideabi

自定义窗口对象

要实现一个 wgpu 里能使用的窗口对象,就必须实现 raw-window-handle 中 raw_window_handle() raw_display_handle() 这两个分别定义在 HasRawWindowHandle HasRawDisplayHandle trait 里的抽象接口。

实现 raw_display_handle() 最为简单, 只需要实例化一个空的 AndroidDisplayHandle 对象做为参数。查看 raw-window-handle 的源码就会发现,实现 raw_window_handle() 抽象接口需要用到 AndroidNdkWindowHandle 对象,此对象有一个叫 a_native_window 的字段,用来指向安卓 App 的 ANativeWindow 实例。 下面我们来一步步实现它。

先给项目添加必要的依赖:

toml
[target.'cfg(target_os = "android")'.dependencies]
-jni = "0.19"
-# 星号表示不锁定特定版本,在项目构建及运行时始终保持使用最新版本
-ndk-sys = "*"
-raw-window-handle = "0.5"

然后定义一个 NativeWindow 结构体,它只有一个叫 a_native_window 的字段:

rust
struct NativeWindow {
-    a_native_window: *mut ndk_sys::ANativeWindow,
-}
-impl NativeWindow {
-    // env 和 surface 都是安卓端传递过来的参数
-    fn new(env: *mut JNIEnv, surface: jobject) -> Self {
-        let a_native_window = unsafe {
-            // 获取与安卓端 surface 对象关联的 ANativeWindow,以便能通过 Rust 与之交互。
-            // 此函数在返回 ANativeWindow 的同时会自动将其引用计数 +1,以防止该对象在安卓端被意外释放。
-            ndk_sys::ANativeWindow_fromSurface(env as *mut _, surface as *mut _)
-        };
-        Self { a_native_window }
-    }
-}

最后给 NativeWindow 实现 raw-window-handle 抽象接口:

rust
unsafe impl HasRawWindowHandle for NativeWindow {
-    fn raw_window_handle(&self) -> RawWindowHandle {
-        let mut handle = AndroidNdkWindowHandle::empty();
-        handle.a_native_window = self.a_native_window as *mut _ as *mut c_void;
-        RawWindowHandle::AndroidNdk(handle)
-    }
-}
+    
Skip to content

与 Android App 集成

开发环境配置

假设你的电脑上已经安装了 Android Studio,从菜单栏打开 SDK 管理器(Tools > SDK Manager > Android SDK > SDK Tools),勾选以下 3 个选项后点击 OK 按钮确认:

  • Android SDK Build-Tools
  • Android SDK Command-line Tools
  • NDK(Side by side)

然后,设置如下两个系统环境变量:

sh
export ANDROID_SDK_ROOT=$HOME/Library/Android/sdk
+# 注意,此处需要替换为你电脑上安装的 NDK 的版本号
+export NDK_HOME=$ANDROID_SDK_ROOT/ndk/23.1.7779620

添加安卓构建目标支持

到目前为止,Android 模拟器和虚拟设备还不支持 Vulkan 图形 API(仅支持 OpenGL ES),所以开发或调试 wgpu 程序在 Android 系统上的运行时,建议使用真机(各种云测平台的云真机也行)。

如果需要支持模拟器运行,还得加上 x86_64-linux-androidi686-linux-android 这两个构建目标的支持。需要注意的是,如果指定了 wgpu 项目使用 Vulkan 图形后端(Instance::new(wgpu::Backends::VULKAN)),则在模拟内运行时会崩溃:

sh
rustup target add aarch64-linux-android armv7-linux-androideabi

自定义窗口对象

要实现一个 wgpu 里能使用的窗口对象,就必须实现 raw-window-handle 中 raw_window_handle() raw_display_handle() 这两个分别定义在 HasRawWindowHandle HasRawDisplayHandle trait 里的抽象接口。

实现 raw_display_handle() 最为简单, 只需要实例化一个空的 AndroidDisplayHandle 对象做为参数。查看 raw-window-handle 的源码就会发现,实现 raw_window_handle() 抽象接口需要用到 AndroidNdkWindowHandle 对象,此对象有一个叫 a_native_window 的字段,用来指向安卓 App 的 ANativeWindow 实例。 下面我们来一步步实现它。

先给项目添加必要的依赖:

toml
[target.'cfg(target_os = "android")'.dependencies]
+jni = "0.19"
+# 星号表示不锁定特定版本,在项目构建及运行时始终保持使用最新版本
+ndk-sys = "*"
+raw-window-handle = "0.5"

然后定义一个 NativeWindow 结构体,它只有一个叫 a_native_window 的字段:

rust
struct NativeWindow {
+    a_native_window: *mut ndk_sys::ANativeWindow,
+}
+impl NativeWindow {
+    // env 和 surface 都是安卓端传递过来的参数
+    fn new(env: *mut JNIEnv, surface: jobject) -> Self {
+        let a_native_window = unsafe {
+            // 获取与安卓端 surface 对象关联的 ANativeWindow,以便能通过 Rust 与之交互。
+            // 此函数在返回 ANativeWindow 的同时会自动将其引用计数 +1,以防止该对象在安卓端被意外释放。
+            ndk_sys::ANativeWindow_fromSurface(env as *mut _, surface as *mut _)
+        };
+        Self { a_native_window }
+    }
+}

最后给 NativeWindow 实现 raw-window-handle 抽象接口:

rust
unsafe impl HasRawWindowHandle for NativeWindow {
+    fn raw_window_handle(&self) -> RawWindowHandle {
+        let mut handle = AndroidNdkWindowHandle::empty();
+        handle.a_native_window = self.a_native_window as *mut _ as *mut c_void;
+        RawWindowHandle::AndroidNdk(handle)
+    }
+}
 
-unsafe impl HasRawDisplayHandle for NativeWindow {
-    fn raw_display_handle(&self) -> RawDisplayHandle {
-        RawDisplayHandle::Android(AndroidDisplayHandle::empty())
-    }
-}

定义 FFI

Rust 有一个关键字 extern(kotlin 中定义 JNI 函数时也有一个对应的关键字叫 external, 我们接下来会用到),当需要与其他语言编写的代码进行交互时,用于创建和使用外部函数接口(FFI,Foreign Function Interface)。FFI 是一种编程语言定义函数的方式,可以让不同的(外部)编程语言调用这些函数。

在 Rust 这一端,我们通过给公开函数添加 #[no_mangle] 属性来允许安卓端调用此函数:

rust
#[no_mangle]
-#[jni_fn("name.jinleili.wgpu.RustBridge")]
-pub fn createWgpuCanvas(env: *mut JNIEnv, _: JClass, surface: jobject, idx: jint) -> jlong {
-    android_logger::init_once(Config::default().with_min_level(Level::Trace));
-    let canvas = WgpuCanvas::new(AppSurface::new(env as *mut _, surface), idx as i32);
-    info!("WgpuCanvas created!");
-    // 使用 Box 对 Rust 对象进行装箱操作。
-    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
-    // into_raw 返回指针的同时,也将此对象的内存管理权转交给调用方
-    Box::into_raw(Box::new(canvas)) as jlong
-}
+unsafe impl HasRawDisplayHandle for NativeWindow {
+    fn raw_display_handle(&self) -> RawDisplayHandle {
+        RawDisplayHandle::Android(AndroidDisplayHandle::empty())
+    }
+}

定义 FFI

Rust 有一个关键字 extern(kotlin 中定义 JNI 函数时也有一个对应的关键字叫 external, 我们接下来会用到),当需要与其他语言编写的代码进行交互时,用于创建和使用外部函数接口(FFI,Foreign Function Interface)。FFI 是一种编程语言定义函数的方式,可以让不同的(外部)编程语言调用这些函数。

在 Rust 这一端,我们通过给公开函数添加 #[no_mangle] 属性来允许安卓端调用此函数:

rust
#[no_mangle]
+#[jni_fn("name.jinleili.wgpu.RustBridge")]
+pub fn createWgpuCanvas(env: *mut JNIEnv, _: JClass, surface: jobject, idx: jint) -> jlong {
+    android_logger::init_once(Config::default().with_min_level(Level::Trace));
+    let canvas = WgpuCanvas::new(AppSurface::new(env as *mut _, surface), idx as i32);
+    info!("WgpuCanvas created!");
+    // 使用 Box 对 Rust 对象进行装箱操作。
+    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
+    // into_raw 返回指针的同时,也将此对象的内存管理权转交给调用方
+    Box::into_raw(Box::new(canvas)) as jlong
+}
 
-#[no_mangle]
-#[jni_fn("name.jinleili.wgpu.RustBridge")]
-pub fn enterFrame(_env: *mut JNIEnv, _: JClass, obj: jlong) {
-    // 直接获取到指针指代的 Rust 对象的可变借用
-    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
-    obj.enter_frame();
-}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

#[jni_fn("XXX")] 这个函数签名属性需要重点介绍一下,做过安卓 JNI 开发的都知道,JNI 函数的签名是又臭又长,比如上面的 createWgpuCanvas 函数,手写符合 JNI 规范的函数签名就会是 Java_name_jinleili_wgpu_RustBridge_createWgpuCanvas 这样,难写且难维护 #[jni_fn("name.jinleili.wgpu.RustBridge")] 这个属性能自动帮我们生成兼容 JNI 的函数签名,使正确编写函数签名变得更加容易。为此,我们需要 jni_fn 依赖项:

toml
[target.'cfg(target_os = "android")'.dependencies]
-jni_fn = "0.1"
-# 其它依赖项

在安卓端,我们定义一个命名空间为 name.jinleili.wgpuRustBridge 类来加载 Rust 程序,并使用 external 关键字标记好具体实现在 Rust 端的外部函数声明:

kotlin
package name.jinleili.wgpu
+#[no_mangle]
+#[jni_fn("name.jinleili.wgpu.RustBridge")]
+pub fn enterFrame(_env: *mut JNIEnv, _: JClass, obj: jlong) {
+    // 直接获取到指针指代的 Rust 对象的可变借用
+    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
+    obj.enter_frame();
+}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

#[jni_fn("XXX")] 这个函数签名属性需要重点介绍一下,做过安卓 JNI 开发的都知道,JNI 函数的签名是又臭又长,比如上面的 createWgpuCanvas 函数,手写符合 JNI 规范的函数签名就会是 Java_name_jinleili_wgpu_RustBridge_createWgpuCanvas 这样,难写且难维护 #[jni_fn("name.jinleili.wgpu.RustBridge")] 这个属性能自动帮我们生成兼容 JNI 的函数签名,使正确编写函数签名变得更加容易。为此,我们需要 jni_fn 依赖项:

toml
[target.'cfg(target_os = "android")'.dependencies]
+jni_fn = "0.1"
+# 其它依赖项

在安卓端,我们定义一个命名空间为 name.jinleili.wgpuRustBridge 类来加载 Rust 程序,并使用 external 关键字标记好具体实现在 Rust 端的外部函数声明:

kotlin
package name.jinleili.wgpu
 
-import android.view.Surface
+import android.view.Surface
 
-class RustBridge {
-    init {
-        System.loadLibrary("wgpu_on_app")
-    }
+class RustBridge {
+    init {
+        System.loadLibrary("wgpu_on_app")
+    }
 
-    external fun createWgpuCanvas(surface: Surface, idx: Int): Long
-    external fun enterFrame(rustObj: Long)
-    // ...
-}

你可以使用任意符合安卓规范的命名空间,只需要记得让 Rust 端 #[jni_fn("")] 属性里的字符串与安卓端代码里的命名空间一致。

实现 cargo so 子命令

实现 so 子命令的目的是为了一劳永逸地解决 Rust 项目配置 Android NDK 链接的问题。如果你对如何给 wgpu 项目手动配置 NDK 感兴趣,Mozilla 的这篇文章有详细的步骤。 so 子命令的代码非常简单,而且我已经将它发布到了 Rust 的包注册网站 crates.io, 可以直接安装使用:

rust
let args = std::env::args();
-match Subcommand::new(args, "so", |_, _| Ok(false)) {
-    Ok(cmd) => match cmd.cmd() {
-        "build" | "b" => {
-            let ndk = Ndk::from_env().unwrap();
-            let build_targets = if let Some(target) = cmd.target() {
-                vec![Target::from_rust_triple(target).ok().unwrap()]
-            } else {
-                vec![
-                    Target::Arm64V8a,
-                    Target::ArmV7a,
-                    Target::X86,
-                    Target::X86_64,
-                ]
-            };
-            for target in build_targets {
-                let triple = target.rust_triple();
-                // setting ar, linker value
-                let mut cargo = cargo_ndk(&ndk, target, 24).unwrap();
-                cargo.arg("rustc");
-                if cmd.target().is_none() {
-                    cargo.arg("--target").arg(triple);
-                }
-                cargo.args(cmd.args());
-                if ndk.build_tag() > 7272597 {
-                    if !cmd.args().contains(&"--".to_owned()) {
-                        cargo.arg("--");
-                    }
-                    let gcc_link_dir = cmd.target_dir().join("gcc-temp-extra-link-libraries");
-                    let _ = std::fs::create_dir_all(&gcc_link_dir);
-                    std::fs::write(gcc_link_dir.join("libgcc.a"), "INPUT(-lunwind)")
-                        .expect("Failed to write");
-                    cargo.arg("-L").arg(gcc_link_dir);
-                }
+    external fun createWgpuCanvas(surface: Surface, idx: Int): Long
+    external fun enterFrame(rustObj: Long)
+    // ...
+}

你可以使用任意符合安卓规范的命名空间,只需要记得让 Rust 端 #[jni_fn("")] 属性里的字符串与安卓端代码里的命名空间一致。

实现 cargo so 子命令

实现 so 子命令的目的是为了一劳永逸地解决 Rust 项目配置 Android NDK 链接的问题。如果你对如何给 wgpu 项目手动配置 NDK 感兴趣,Mozilla 的这篇文章有详细的步骤。 so 子命令的代码非常简单,而且我已经将它发布到了 Rust 的包注册网站 crates.io, 可以直接安装使用:

rust
let args = std::env::args();
+match Subcommand::new(args, "so", |_, _| Ok(false)) {
+    Ok(cmd) => match cmd.cmd() {
+        "build" | "b" => {
+            let ndk = Ndk::from_env().unwrap();
+            let build_targets = if let Some(target) = cmd.target() {
+                vec![Target::from_rust_triple(target).ok().unwrap()]
+            } else {
+                vec![
+                    Target::Arm64V8a,
+                    Target::ArmV7a,
+                    Target::X86,
+                    Target::X86_64,
+                ]
+            };
+            for target in build_targets {
+                let triple = target.rust_triple();
+                // setting ar, linker value
+                let mut cargo = cargo_ndk(&ndk, target, 24).unwrap();
+                cargo.arg("rustc");
+                if cmd.target().is_none() {
+                    cargo.arg("--target").arg(triple);
+                }
+                cargo.args(cmd.args());
+                if ndk.build_tag() > 7272597 {
+                    if !cmd.args().contains(&"--".to_owned()) {
+                        cargo.arg("--");
+                    }
+                    let gcc_link_dir = cmd.target_dir().join("gcc-temp-extra-link-libraries");
+                    let _ = std::fs::create_dir_all(&gcc_link_dir);
+                    std::fs::write(gcc_link_dir.join("libgcc.a"), "INPUT(-lunwind)")
+                        .expect("Failed to write");
+                    cargo.arg("-L").arg(gcc_link_dir);
+                }
 
-                if !cargo.status().unwrap().success() {
-                    println!("{:?}", NdkError::CmdFailed(cargo));
-                }
-            }
-        }
-        _ => {}
-    },
-    Err(_) => {}
-};

编译为 .so 库文件

首先,安装我们上面实现 so 子命令:

sh
# 从 crates.io 安装
-cargo install cargo-so
-# 或者
-# 也可以从源码安装
-cargo install --path ./cargo-so

然后,使用 so 子命令来构建 wgpu 项目:

sh
# 将 wgpu 程序构建为 Android .so 库文件
-cargo so b --lib --target aarch64-linux-android --release
-cargo so b --lib --target armv7-linux-androideabi --release
+                if !cargo.status().unwrap().success() {
+                    println!("{:?}", NdkError::CmdFailed(cargo));
+                }
+            }
+        }
+        _ => {}
+    },
+    Err(_) => {}
+};

编译为 .so 库文件

首先,安装我们上面实现 so 子命令:

sh
# 从 crates.io 安装
+cargo install cargo-so
+# 或者
+# 也可以从源码安装
+cargo install --path ./cargo-so

然后,使用 so 子命令来构建 wgpu 项目:

sh
# 将 wgpu 程序构建为 Android .so 库文件
+cargo so b --lib --target aarch64-linux-android --release
+cargo so b --lib --target armv7-linux-androideabi --release
 
-# 将 .so 复制到安卓项目的 jniLibs/ 目录下
-cp target/aarch64-linux-android/release/libwgpu_on_app.so android/app/libs/arm64-v8a/libwgpu_on_app.so
-cp target/armv7-linux-androideabi/release/libwgpu_on_app.so android/app/libs/armeabi-v7a/libwgpu_on_app.so

我们还可以上面的构建与复制命令放进一个 .sh 命令行文件内,之后编译项目时只需要执行此命令行文件就可以了:

sh
sh ./release.sh

自定义 WGPUSurfaceView

安卓视图组件 SurfaceView 提供了一个可嵌入在视图层级结构中的专用于绘制的视图。它负责绘制表面(Surface)在屏幕上的正确位置,还控制着绘制表面的像素格式及分辨率大小。 SurfaceView 持有的绘制表面是独立于 App 窗口的,可以在单独的线程中进行绘制而不占用主线程资源。所以使用 SurfaceView 可以实现复杂而高效的渲染(比如,游戏、视频播放、相机预览等),且不会阻塞用户交互(触摸、键盘输入等)的响应。

安卓系统上的绘制表面是纵深排序(Z-Ordered)的,它默认处在 App 窗口的后面, SurfaceView 通过在 App 窗口上面设置透明区域来展示处在后面的绘制表面。 如果想将绘制表面放置到窗口的最上层,可以通过 setZOrderOnTop() 函数来实现:

kotlin
mySurfaceView.setZOrderOnTop(true)

这里有必要多解释一句:wgpu 里的 Surface 对象虽然最终指向的就是 SurfaceView 持有的绘制表面,但它是一个经过统一封装的结构体,所以并不是同一个对象:

rust
pub struct Surface {
-    pub(crate) presentation: Option<Presentation>,
-    #[cfg(vulkan)]
-    pub vulkan: Option<HalSurface<hal::api::Vulkan>>,
-    #[cfg(metal)]
-    pub metal: Option<HalSurface<hal::api::Metal>>,
-    #[cfg(dx12)]
-    pub dx12: Option<HalSurface<hal::api::Dx12>>,
-    #[cfg(dx11)]
-    pub dx11: Option<HalSurface<hal::api::Dx11>>,
-    #[cfg(gl)]
-    pub gl: Option<HalSurface<hal::api::Gles>>,
-}

窗口的视图层级结构决定了与绘制表面的正确合成,也就是说,绘制表面的展示会受到视图层级关系的影响,在 SurfaceView 所处层级之上的视图会覆盖(遮挡)在合成后的绘制表面之上。 需要注意的是,如果覆盖内容存在透明度,则每次绘制表面渲染完成后,都会进行一次完整的 alpha 混合合成,这会对性能产生不利影响。

我们只能通过 SurfaceHolder 接口来访问绘制表面。当 SurfaceView 在窗口中可见时,绘制表面就会被创建,而不可见时(比如,App 被切换到后台运行)绘制表面会被销毁,所以需要实现 SurfaceHolder 的回调接口 surfaceCreatedsurfaceDestroyed 来发现绘制表面的创建和销毁。 下边的代码实现了一个继承自 SurfaceViewWGPUSurfaceView

kotlin
// 为当前类实现 SurfaceHolder 的回调接口
-class WGPUSurfaceView : SurfaceView, SurfaceHolder.Callback2 {
-    private var rustBrige = RustBridge()
-    // Rust 对象的指针
-    private var wgpuObj: Long = Long.MAX_VALUE
-    private var idx: Int = 0
+# 将 .so 复制到安卓项目的 jniLibs/ 目录下
+cp target/aarch64-linux-android/release/libwgpu_on_app.so android/app/libs/arm64-v8a/libwgpu_on_app.so
+cp target/armv7-linux-androideabi/release/libwgpu_on_app.so android/app/libs/armeabi-v7a/libwgpu_on_app.so

我们还可以上面的构建与复制命令放进一个 .sh 命令行文件内,之后编译项目时只需要执行此命令行文件就可以了:

sh
sh ./release.sh

自定义 WGPUSurfaceView

安卓视图组件 SurfaceView 提供了一个可嵌入在视图层级结构中的专用于绘制的视图。它负责绘制表面(Surface)在屏幕上的正确位置,还控制着绘制表面的像素格式及分辨率大小。 SurfaceView 持有的绘制表面是独立于 App 窗口的,可以在单独的线程中进行绘制而不占用主线程资源。所以使用 SurfaceView 可以实现复杂而高效的渲染(比如,游戏、视频播放、相机预览等),且不会阻塞用户交互(触摸、键盘输入等)的响应。

安卓系统上的绘制表面是纵深排序(Z-Ordered)的,它默认处在 App 窗口的后面, SurfaceView 通过在 App 窗口上面设置透明区域来展示处在后面的绘制表面。 如果想将绘制表面放置到窗口的最上层,可以通过 setZOrderOnTop() 函数来实现:

kotlin
mySurfaceView.setZOrderOnTop(true)

这里有必要多解释一句:wgpu 里的 Surface 对象虽然最终指向的就是 SurfaceView 持有的绘制表面,但它是一个经过统一封装的结构体,所以并不是同一个对象:

rust
pub struct Surface {
+    pub(crate) presentation: Option<Presentation>,
+    #[cfg(vulkan)]
+    pub vulkan: Option<HalSurface<hal::api::Vulkan>>,
+    #[cfg(metal)]
+    pub metal: Option<HalSurface<hal::api::Metal>>,
+    #[cfg(dx12)]
+    pub dx12: Option<HalSurface<hal::api::Dx12>>,
+    #[cfg(dx11)]
+    pub dx11: Option<HalSurface<hal::api::Dx11>>,
+    #[cfg(gl)]
+    pub gl: Option<HalSurface<hal::api::Gles>>,
+}

窗口的视图层级结构决定了与绘制表面的正确合成,也就是说,绘制表面的展示会受到视图层级关系的影响,在 SurfaceView 所处层级之上的视图会覆盖(遮挡)在合成后的绘制表面之上。 需要注意的是,如果覆盖内容存在透明度,则每次绘制表面渲染完成后,都会进行一次完整的 alpha 混合合成,这会对性能产生不利影响。

我们只能通过 SurfaceHolder 接口来访问绘制表面。当 SurfaceView 在窗口中可见时,绘制表面就会被创建,而不可见时(比如,App 被切换到后台运行)绘制表面会被销毁,所以需要实现 SurfaceHolder 的回调接口 surfaceCreatedsurfaceDestroyed 来发现绘制表面的创建和销毁。 下边的代码实现了一个继承自 SurfaceViewWGPUSurfaceView

kotlin
// 为当前类实现 SurfaceHolder 的回调接口
+class WGPUSurfaceView : SurfaceView, SurfaceHolder.Callback2 {
+    private var rustBrige = RustBridge()
+    // Rust 对象的指针
+    private var wgpuObj: Long = Long.MAX_VALUE
+    private var idx: Int = 0
 
-    //...
+    //...
 
-    init {
-        // 将当前类设置为 SurfaceHolder 的回调接口代理
-        holder.addCallback(this)
-    }
+    init {
+        // 将当前类设置为 SurfaceHolder 的回调接口代理
+        holder.addCallback(this)
+    }
 
-    // 绘制表面被创建后,创建/重新创建 wgpu 对象
-    override fun surfaceCreated(holder: SurfaceHolder) {
-        holder.let { h ->
-            wgpuObj = rustBrige.createWgpuCanvas(h.surface, this.idx)
-            // SurfaceView 默认不会自动开始绘制,setWillNotDraw(false) 用于通知 App 已经准备好开始绘制了。
-            setWillNotDraw(false)
-        }
-    }
+    // 绘制表面被创建后,创建/重新创建 wgpu 对象
+    override fun surfaceCreated(holder: SurfaceHolder) {
+        holder.let { h ->
+            wgpuObj = rustBrige.createWgpuCanvas(h.surface, this.idx)
+            // SurfaceView 默认不会自动开始绘制,setWillNotDraw(false) 用于通知 App 已经准备好开始绘制了。
+            setWillNotDraw(false)
+        }
+    }
 
-    // 绘制表面被销毁后,也销毁 wgpu 对象
-    override fun surfaceDestroyed(holder: SurfaceHolder) {
-        if (wgpuObj != Long.MAX_VALUE) {
-            rustBrige.dropWgpuCanvas(wgpuObj)
-            wgpuObj = Long.MAX_VALUE
-        }
-    }
+    // 绘制表面被销毁后,也销毁 wgpu 对象
+    override fun surfaceDestroyed(holder: SurfaceHolder) {
+        if (wgpuObj != Long.MAX_VALUE) {
+            rustBrige.dropWgpuCanvas(wgpuObj)
+            wgpuObj = Long.MAX_VALUE
+        }
+    }
 
-    override fun draw(canvas: Canvas?) {
-        super.draw(canvas)
-        // 考虑到边界情况,这个条件判断不能省略
-        if (wgpuObj == Long.MAX_VALUE) {
-            return
-        }
-        rustBrige.enterFrame(wgpuObj)
-        // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数 
-        invalidate()
-    }
-}

App 中加载 WGPUSurfaceView

现在可以在 Activity 或 Fragment(此处仅指安卓 Fragment,与着色器里的片元无关)里加载 WGPUSurfaceView 实例了,通过 XML 或者 Java/Kotlin 代码来加载很常见,下面我们来看看在安卓上的新一代 UI 开发框架 Jetpack Compose 中如何加载:

kotlin
class MainActivity : ComponentActivity() {
-    override fun onCreate(savedInstanceState: Bundle?) {
-        super.onCreate(savedInstanceState)
+    override fun draw(canvas: Canvas?) {
+        super.draw(canvas)
+        // 考虑到边界情况,这个条件判断不能省略
+        if (wgpuObj == Long.MAX_VALUE) {
+            return
+        }
+        rustBrige.enterFrame(wgpuObj)
+        // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数 
+        invalidate()
+    }
+}

App 中加载 WGPUSurfaceView

现在可以在 Activity 或 Fragment(此处仅指安卓 Fragment,与着色器里的片元无关)里加载 WGPUSurfaceView 实例了,通过 XML 或者 Java/Kotlin 代码来加载很常见,下面我们来看看在安卓上的新一代 UI 开发框架 Jetpack Compose 中如何加载:

kotlin
class MainActivity : ComponentActivity() {
+    override fun onCreate(savedInstanceState: Bundle?) {
+        super.onCreate(savedInstanceState)
 
-        setContent {
-            MyApplicationTheme {
-                Surface(
-                    modifier = Modifier.fillMaxSize(),
-                    color = colorResource(id = R.color.white)
-                ) {
-                    SurfaceCard()
-                }
-            }
-        }
-    }
-}
+        setContent {
+            MyApplicationTheme {
+                Surface(
+                    modifier = Modifier.fillMaxSize(),
+                    color = colorResource(id = R.color.white)
+                ) {
+                    SurfaceCard()
+                }
+            }
+        }
+    }
+}
 
-@Composable
-fun SurfaceCard() {
-    val screenWidth = LocalConfiguration.current.screenWidthDp.dp
-    Column(modifier = Modifier.fillMaxSize()) {
-        Row(
-            verticalAlignment = Alignment.CenterVertically,
-            horizontalArrangement = Arrangement.Center
-        ) {
-            Text(text = "wgpu on Android", fontSize = 20.sp, fontWeight = FontWeight.Bold)
-        }
-        // ...
+@Composable
+fun SurfaceCard() {
+    val screenWidth = LocalConfiguration.current.screenWidthDp.dp
+    Column(modifier = Modifier.fillMaxSize()) {
+        Row(
+            verticalAlignment = Alignment.CenterVertically,
+            horizontalArrangement = Arrangement.Center
+        ) {
+            Text(text = "wgpu on Android", fontSize = 20.sp, fontWeight = FontWeight.Bold)
+        }
+        // ...
 
-        // 通过 AndroidView 容器来加载我们的 WGPUSurfaceView
-        AndroidView(
-            factory = { ctx ->
-                WGPUSurfaceView(context = ctx)
-            },
-            modifier = Modifier
-                .fillMaxWidth()
-                .height(screenWidth),
-        )
-    }
-}

基于以上代码,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

- + // 通过 AndroidView 容器来加载我们的 WGPUSurfaceView + AndroidView( + factory = { ctx -> + WGPUSurfaceView(context = ctx) + }, + modifier = Modifier + .fillMaxWidth() + .height(screenWidth), + ) + } +}

基于以上代码,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

+ \ No newline at end of file diff --git a/integration-and-debugging/bevy/index.html b/integration-and-debugging/bevy/index.html index a2ddaabc1..fb46eb574 100644 --- a/integration-and-debugging/bevy/index.html +++ b/integration-and-debugging/bevy/index.html @@ -5,280 +5,282 @@ 在 iOS Android App 中集成 Bevy 游戏引擎 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

在 iOS Android App 中集成 Bevy 游戏引擎

认识 Bevy

Bevy 是一个开源、跨平台的 Rust 游戏引擎,设计目的是提供一个简单、高效且易于使用的游戏开发框架。它的特点包括:

  • 模块化设计:游戏引擎的各个组件皆为单独的模块,方便选择需要的组件并扩展。
  • 灵活的插件系统:支持自定义插件,可以创建并集成自己的插件。
  • 易于使用的 API:API 简单易懂,帮助快速开始游戏开发。
  • 强大的渲染系统:使用了 wgpu 作为渲染后端,以提供强大的图形渲染能力。
  • 跨平台:除了支持在 Windows、MacOS 和 Linux 桌面系统及 iOS、Android 移动设备上运行,还能在支持 WebGPU 的浏览器上运行。

Bevy 是一个适合新手的游戏引擎,其简单性和灵活性使得能够轻松地开始游戏开发,随着经验的增加,它也能满足更高级的定制需求。

需求场景

如果需要给已有的 App 添加一个开屏小游戏,或者实现一些动态 UI 组件、图表...,又或者只是想充分利用手机上的 Motion Sensors 来实现令人惊艳的游戏体验,那么就不能使用 Bevy 默认的 WinitPlugin 了。因为 winit 会完全控制 App 的初始化过程和窗口,而我们需要的是在已有的 App 实例中创建 bevy::App, 并且我们可能还希望 bevy::App 能在任意大小的 iOS UIView 或 Android SurfaceView 中运行。

本章我们将逐步实现一个此类场景,并且利用手机的 Motion Sensor 来玩 breakout 小游戏。

Bevy 中的窗口插件

Bevy 中有两个窗口插件:WindowPluginWinitPlugin

WindowPlugin 是 Bevy 中构建游戏或应用程序的基础插件, 它提供了一套简单易用的接口来管理窗口的属性(标题、是否可见...)、行为(获取焦点,缩放约束,窗口显示层级...)、与事件(鼠标、键盘、触摸事件...)。WindowPlugin 并不负责实际的窗口创建,它需要与其他插件配合使用

WinitPlugin 为 Bevy 提供实际的窗口及事件循环的创建与管理。顾名思义,它依赖了 winit 窗口管理库。

下面通过源码来看看 WinitPlugin 是如何完成工作的:

rust
// crates/bevy_winit/src/lib.rs
+    
Skip to content

在 iOS Android App 中集成 Bevy 游戏引擎

认识 Bevy

Bevy 是一个开源、跨平台的 Rust 游戏引擎,设计目的是提供一个简单、高效且易于使用的游戏开发框架。它的特点包括:

  • 模块化设计:游戏引擎的各个组件皆为单独的模块,方便选择需要的组件并扩展。
  • 灵活的插件系统:支持自定义插件,可以创建并集成自己的插件。
  • 易于使用的 API:API 简单易懂,帮助快速开始游戏开发。
  • 强大的渲染系统:使用了 wgpu 作为渲染后端,以提供强大的图形渲染能力。
  • 跨平台:除了支持在 Windows、MacOS 和 Linux 桌面系统及 iOS、Android 移动设备上运行,还能在支持 WebGPU 的浏览器上运行。

Bevy 是一个适合新手的游戏引擎,其简单性和灵活性使得能够轻松地开始游戏开发,随着经验的增加,它也能满足更高级的定制需求。

需求场景

如果需要给已有的 App 添加一个开屏小游戏,或者实现一些动态 UI 组件、图表...,又或者只是想充分利用手机上的 Motion Sensors 来实现令人惊艳的游戏体验,那么就不能使用 Bevy 默认的 WinitPlugin 了。因为 winit 会完全控制 App 的初始化过程和窗口,而我们需要的是在已有的 App 实例中创建 bevy::App, 并且我们可能还希望 bevy::App 能在任意大小的 iOS UIView 或 Android SurfaceView 中运行。

本章我们将逐步实现一个此类场景,并且利用手机的 Motion Sensor 来玩 breakout 小游戏。

Bevy 中的窗口插件

Bevy 中有两个窗口插件:WindowPluginWinitPlugin

WindowPlugin 是 Bevy 中构建游戏或应用程序的基础插件, 它提供了一套简单易用的接口来管理窗口的属性(标题、是否可见...)、行为(获取焦点,缩放约束,窗口显示层级...)、与事件(鼠标、键盘、触摸事件...)。WindowPlugin 并不负责实际的窗口创建,它需要与其他插件配合使用

WinitPlugin 为 Bevy 提供实际的窗口及事件循环的创建与管理。顾名思义,它依赖了 winit 窗口管理库。

下面通过源码来看看 WinitPlugin 是如何完成工作的:

rust
// crates/bevy_winit/src/lib.rs
 
-#[derive(Default)]
-pub struct WinitPlugin;
+#[derive(Default)]
+pub struct WinitPlugin;
 
-impl Plugin for WinitPlugin {
-    fn build(&self, app: &mut App) {
-        let mut event_loop_builder = EventLoopBuilder::<()>::with_user_event();
-        let event_loop = event_loop_builder.build();
-        app.insert_non_send_resource(event_loop);
+impl Plugin for WinitPlugin {
+    fn build(&self, app: &mut App) {
+        let mut event_loop_builder = EventLoopBuilder::<()>::with_user_event();
+        let event_loop = event_loop_builder.build();
+        app.insert_non_send_resource(event_loop);
 
-        app.init_non_send_resource::<WinitWindows>()
-            .init_resource::<WinitSettings>()
-            .set_runner(winit_runner)
-            .add_systems(...);
-        // ...
-    }
-}

WinitPlugin 是个实现了 Plugin trait 的空结构体,在 build 函数内向 app World 内添分别加了 EventLoopWinitWindowsWinitSettings 3 项资源,WinitWindows 用于随后创建并保存窗口实例:

rust
// crates/bevy_winit/src/winit_windows.rs
+        app.init_non_send_resource::<WinitWindows>()
+            .init_resource::<WinitSettings>()
+            .set_runner(winit_runner)
+            .add_systems(...);
+        // ...
+    }
+}

WinitPlugin 是个实现了 Plugin trait 的空结构体,在 build 函数内向 app World 内添分别加了 EventLoopWinitWindowsWinitSettings 3 项资源,WinitWindows 用于随后创建并保存窗口实例:

rust
// crates/bevy_winit/src/winit_windows.rs
 
-#[derive(Debug, Default)]
-pub struct WinitWindows {
-    pub windows: HashMap<winit::window::WindowId, winit::window::Window>,
-    // ...
-}

然后, 设置了一个叫 winit_runner 的 runner 函数,这个函数在用户调用 app.run() 时会被自动执行。

winit_runner() 内调用了 create_window() 来完成最终的窗口创建:

rust
// crates/bevy_winit/src/lib.rs
+#[derive(Debug, Default)]
+pub struct WinitWindows {
+    pub windows: HashMap<winit::window::WindowId, winit::window::Window>,
+    // ...
+}

然后, 设置了一个叫 winit_runner 的 runner 函数,这个函数在用户调用 app.run() 时会被自动执行。

winit_runner() 内调用了 create_window() 来完成最终的窗口创建:

rust
// crates/bevy_winit/src/lib.rs
 
-pub fn winit_runner(mut app: App) {
-    // 取出在 build() 中创建的事件循环实例
-    let mut event_loop = app
-        .world
-        .remove_non_send_resource::<EventLoop<()>>()
-        .unwrap();
+pub fn winit_runner(mut app: App) {
+    // 取出在 build() 中创建的事件循环实例
+    let mut event_loop = app
+        .world
+        .remove_non_send_resource::<EventLoop<()>>()
+        .unwrap();
 
-    // 创建一个新的系统状态
-    let mut create_window_system_state: SystemState<(Commands, Query<(Entity, &mut Window),
-    Added<Window>>, EventWriter<WindowCreated>,NonSendMut<WinitWindows>)>
-        = SystemState::from_world(&mut app.world);
+    // 创建一个新的系统状态
+    let mut create_window_system_state: SystemState<(Commands, Query<(Entity, &mut Window),
+    Added<Window>>, EventWriter<WindowCreated>,NonSendMut<WinitWindows>)>
+        = SystemState::from_world(&mut app.world);
 
-    let event_handler = move |event: Event<()>,
-                              event_loop: &EventLoopWindowTarget<()>,
-                              control_flow: &mut ControlFlow| {
-        // ...
-        // 创建新窗口
-        let (commands, mut new_windows, created_window_writer, winit_windows) =
-                create_window_system_state.get_mut(&mut app.world);
-        create_window(
-            commands,
-            event_loop,
-            new_windows.iter_mut(),
-            created_window_writer,
-            winit_windows,
-            ...
-        );
-        // ...
-
-}
+    let event_handler = move |event: Event<()>,
+                              event_loop: &EventLoopWindowTarget<()>,
+                              control_flow: &mut ControlFlow| {
+        // ...
+        // 创建新窗口
+        let (commands, mut new_windows, created_window_writer, winit_windows) =
+                create_window_system_state.get_mut(&mut app.world);
+        create_window(
+            commands,
+            event_loop,
+            new_windows.iter_mut(),
+            created_window_writer,
+            winit_windows,
+            ...
+        );
+        // ...
+
+}
 
-// crates/bevy_winit/src/system.rs
+// crates/bevy_winit/src/system.rs
 
-pub(crate) fn create_window<'a>(
-    mut commands: Commands,
-    event_loop: &EventLoopWindowTarget<()>,
-    created_windows: impl Iterator<Item = (Entity, Mut<'a, Window>)>,
-    mut event_writer: EventWriter<WindowCreated>,
-    mut winit_windows: NonSendMut<WinitWindows>,
-) {
-    for (entity, mut window) in created_windows {
-        // ...
-        // 创建 winit 窗口
-        let winit_window = winit_windows.create_window(event_loop, entity, &window);
-        // 更新 bevy 窗口的状态
-        window.resolution.XXX;
-        // ...
-    }
-}

实现 AppViewPlugin

接下来要做的就是使用自定义的窗口插件来替代 WinitPlugin。具体怎么做呢?简单模仿 WinitPlugin

首先,需要实现一个创建与保存窗口实例的结构体。由于我们的宿主 App 已经有完整的事件循环了,创建及管理事件循环的步骤都可以免了:

rust
// bevy_in_app/src/app_view/app_views.rs
+pub(crate) fn create_window<'a>(
+    mut commands: Commands,
+    event_loop: &EventLoopWindowTarget<()>,
+    created_windows: impl Iterator<Item = (Entity, Mut<'a, Window>)>,
+    mut event_writer: EventWriter<WindowCreated>,
+    mut winit_windows: NonSendMut<WinitWindows>,
+) {
+    for (entity, mut window) in created_windows {
+        // ...
+        // 创建 winit 窗口
+        let winit_window = winit_windows.create_window(event_loop, entity, &window);
+        // 更新 bevy 窗口的状态
+        window.resolution.XXX;
+        // ...
+    }
+}

实现 AppViewPlugin

接下来要做的就是使用自定义的窗口插件来替代 WinitPlugin。具体怎么做呢?简单模仿 WinitPlugin

首先,需要实现一个创建与保存窗口实例的结构体。由于我们的宿主 App 已经有完整的事件循环了,创建及管理事件循环的步骤都可以免了:

rust
// bevy_in_app/src/app_view/app_views.rs
 
-#[derive(Debug, Default)]
-pub struct AppViews {
-    views: HashMap<WindowId, AppView>,
-    entity_to_window_id: HashMap<Entity, super::WindowId>,
-}
+#[derive(Debug, Default)]
+pub struct AppViews {
+    views: HashMap<WindowId, AppView>,
+    entity_to_window_id: HashMap<Entity, super::WindowId>,
+}
 
-impl AppViews {
-    pub fn create_window(
-        &mut self,
-        #[cfg(target_os = "ios")] view_obj: super::IOSViewObj,
-        #[cfg(target_os = "android")] view_obj: super::AndroidViewObj,
-        entity: Entity,
-    ) -> Window { ... }
-}

AppViews 里的 AppViewIOSViewObjAndroidViewObj 在前面的与 iOS App 集成与 Android App 集成分别有详细介绍,简单来讲,IOSViewObj 封装了 iOS UIView 实例,AndroidViewObj 封装了 Android SurfaceView 所持有的 ANativeWindow 实例,AppView 实现了 HasRawWindowHandleHasRawDisplayHandle trait。

rust
// bevy-in-app/src/app_view/mod.rs
+impl AppViews {
+    pub fn create_window(
+        &mut self,
+        #[cfg(target_os = "ios")] view_obj: super::IOSViewObj,
+        #[cfg(target_os = "android")] view_obj: super::AndroidViewObj,
+        entity: Entity,
+    ) -> Window { ... }
+}

AppViews 里的 AppViewIOSViewObjAndroidViewObj 在前面的与 iOS App 集成与 Android App 集成分别有详细介绍,简单来讲,IOSViewObj 封装了 iOS UIView 实例,AndroidViewObj 封装了 Android SurfaceView 所持有的 ANativeWindow 实例,AppView 实现了 HasRawWindowHandleHasRawDisplayHandle trait。

rust
// bevy-in-app/src/app_view/mod.rs
 
-pub struct AppViewPlugin;
+pub struct AppViewPlugin;
 
-impl Plugin for AppViewPlugin {
-    fn build(&self, app: &mut App) {
-        app.init_non_send_resource::<AppViews>().add_systems(
-            (
-                changed_window.ambiguous_with(exit_on_all_closed),
-                despawn_window.after(changed_window),
-            )
-                .in_base_set(CoreSet::Last),
-        );
-    }
-}

上面就是 AppViewPlugin 的完整代码,就这么简单。

值得注意的是,在 build() 中没有像 WinitPlugin 一样设置 runner 函数,这是怎么回事?

前面已经提到,设置的 runner 函数会在调用 app.run() 时被自动执行。查看源码可以到此函数会将 app 实例从内存中移出并传递 runner,用户端的 app 被替换成了空实例:

rust
// crates/bevy_app/src/app.rs
+impl Plugin for AppViewPlugin {
+    fn build(&self, app: &mut App) {
+        app.init_non_send_resource::<AppViews>().add_systems(
+            (
+                changed_window.ambiguous_with(exit_on_all_closed),
+                despawn_window.after(changed_window),
+            )
+                .in_base_set(CoreSet::Last),
+        );
+    }
+}

上面就是 AppViewPlugin 的完整代码,就这么简单。

值得注意的是,在 build() 中没有像 WinitPlugin 一样设置 runner 函数,这是怎么回事?

前面已经提到,设置的 runner 函数会在调用 app.run() 时被自动执行。查看源码可以到此函数会将 app 实例从内存中移出并传递 runner,用户端的 app 被替换成了空实例:

rust
// crates/bevy_app/src/app.rs
 
-pub fn run(&mut self) {
-	// ...
-	let mut app = std::mem::replace(self, App::empty());
-	let runner = std::mem::replace(&mut app.runner, Box::new(run_once));
+pub fn run(&mut self) {
+	// ...
+	let mut app = std::mem::replace(self, App::empty());
+	let runner = std::mem::replace(&mut app.runner, Box::new(run_once));
 
-	(runner)(app);
-}

我们需要从宿主 App 事件循环中调用 Bevy App 实例,所以不能让 runner 拿走它,改由从 src/ffi/create_bevy_app() 函数中手动调用:

rust
// bevy-in-app/src/ffi/iOS.rs
+	(runner)(app);
+}

我们需要从宿主 App 事件循环中调用 Bevy App 实例,所以不能让 runner 拿走它,改由从 src/ffi/create_bevy_app() 函数中手动调用:

rust
// bevy-in-app/src/ffi/iOS.rs
 
-#[no_mangle]
-pub fn create_bevy_app(view: *mut objc::runtime::Object, scale_factor: f32) -> *mut libc::c_void {
-    let mut bevy_app = crate::create_breakout_app();
-    let ios_obj = IOSViewObj { view, scale_factor };
-    bevy_app.insert_non_send_resource(ios_obj);
+#[no_mangle]
+pub fn create_bevy_app(view: *mut objc::runtime::Object, scale_factor: f32) -> *mut libc::c_void {
+    let mut bevy_app = crate::create_breakout_app();
+    let ios_obj = IOSViewObj { view, scale_factor };
+    bevy_app.insert_non_send_resource(ios_obj);
 
-    create_bevy_window(&mut bevy_app);
-    // ...
-}
+    create_bevy_window(&mut bevy_app);
+    // ...
+}
 
-// bevy-in-app/src/app_view/mod.rs
+// bevy-in-app/src/app_view/mod.rs
 
-pub fn create_bevy_window(app: &mut App) {
-    #[cfg(target_os = "ios")]
-    let view_obj = app.world.remove_non_send_resource::<IOSViewObj>().unwrap();
-    #[cfg(target_os = "android")]
-    let view_obj = app.world.remove_non_send_resource::<AndroidViewObj>().unwrap();
+pub fn create_bevy_window(app: &mut App) {
+    #[cfg(target_os = "ios")]
+    let view_obj = app.world.remove_non_send_resource::<IOSViewObj>().unwrap();
+    #[cfg(target_os = "android")]
+    let view_obj = app.world.remove_non_send_resource::<AndroidViewObj>().unwrap();
 
-    let mut create_window_system_state: SystemState<(
-        Commands,
-        Query<(Entity, &mut Window), Added<Window>>,
-        EventWriter<WindowCreated>,
-        NonSendMut<AppViews>,
-    )> = SystemState::from_world(&mut app.world);
-    let (mut commands, mut new_windows, mut created_window_writer, mut app_views) =
-        create_window_system_state.get_mut(&mut app.world);
+    let mut create_window_system_state: SystemState<(
+        Commands,
+        Query<(Entity, &mut Window), Added<Window>>,
+        EventWriter<WindowCreated>,
+        NonSendMut<AppViews>,
+    )> = SystemState::from_world(&mut app.world);
+    let (mut commands, mut new_windows, mut created_window_writer, mut app_views) =
+        create_window_system_state.get_mut(&mut app.world);
 
-    for (entity, mut bevy_window) in new_windows.iter_mut() {
-        if app_views.get_view(entity).is_some() {
-            continue;
-        }
-        let app_view = app_views.create_window(view_obj, entity);
-        let logical_res = app_view.logical_resolution();
+    for (entity, mut bevy_window) in new_windows.iter_mut() {
+        if app_views.get_view(entity).is_some() {
+            continue;
+        }
+        let app_view = app_views.create_window(view_obj, entity);
+        let logical_res = app_view.logical_resolution();
 
-        bevy_window
-            .resolution
-            .set_scale_factor(app_view.scale_factor as f64);
-        bevy_window.resolution.set(logical_res.0, logical_res.1);
+        bevy_window
+            .resolution
+            .set_scale_factor(app_view.scale_factor as f64);
+        bevy_window.resolution.set(logical_res.0, logical_res.1);
 
-        commands.entity(entity).insert(RawHandleWrapper {
-            window_handle: app_view.raw_window_handle(),
-            display_handle: app_view.raw_display_handle(),
-        });
+        commands.entity(entity).insert(RawHandleWrapper {
+            window_handle: app_view.raw_window_handle(),
+            display_handle: app_view.raw_display_handle(),
+        });
 
-        created_window_writer.send(WindowCreated { window: entity });
-    }
-    create_window_system_state.apply(&mut app.world);
-}

create_bevy_window 函数的完整执行逻辑如下: 0. 从 World 中取出 IOSViewObjAndroidViewObj 目标平台的视图对象资源;

  1. 创建一个新的系统状态,并获取所需的命令队列,窗口实体列表,窗口创建写入器(EventWriter<WindowCreated>)和 AppViews 的可变借用;
  2. 遍历窗口实体列表,检查窗口是否已经被创建;
  3. 调用 AppViewscreate_window() 创建一个新窗口 app_view(也就是实现了 raw-window-handle traits 的 AppView);
  4. 调用 app_view 的相关函数与字段更新 Bevy window 的物理分辨率缩放因子及逻辑分辨率;
  5. 通过命令队列 commandsapp_view 中实现的窗口句柄插入到实体中;
  6. 窗口创建写入器发送一个包含了新窗口的实体的 WindowCreated;
  7. 最后,调用 apply() 函数应用系统状态;

IOSViewObjAndroidViewObj 遵循了 Bevy 中资源传递的惯例,在 create_bevy_app() FFI 函数中调用 insert_non_send_resource() 将其插入到 World。 此处没使用 insert_resource() 有两个原因:

  • IOSViewObj 不是线程安全的,iOS UIView 中的函数只能在主线程中使用;
shell
error[E0277]: `*mut Object` cannot be sent between threads safely
-  --> src/app_view/ios.rs:21:26
-   |
-21 | #[derive(Debug, Default, Resource)]
-   |                          ^^^^^^^^ `*mut Object` cannot be sent between threads safely
-   |
-   = help: the trait `Send` is not implemented for `*mut Object`
-   = note: required for `Option<*mut Object>` to implement `Sync`
  • 事实上也并不需要在多线程环境中来创建窗口;

链接 libc++

实现了 AppViewPlugin 后运行 cargo so b --lib --target aarch64-linux-android 将 crate 编译为 .so 库文件,在 Android 项目中加载时将得到如下错误:

shell
dlopen failed: cannot locate symbol "__gxx_personality_v0" referenced by ...

我们知道 Bevy 项目通过 cargo-apk 命令编译为 .apk 是可以在 Android 上安装运行的,于是查看 cargo-apk 的源码:

rust
self.add_lib(&artifact, target)?;
-for need in list_needed_libs(&readelf_path, &artifact)? {
-	// c++_shared is available in the NDK but not on-device.
-	// Must be bundled with the apk if used:
-	// https://developer.android.com/ndk/guides/cpp-support#libc
-	let search_paths = if need == "libc++_shared.so" {
-		// ...
-	}
-	// ...
-}

根据注释里的相关链接libc++_shared 链接库名称,就知道如何在我们的项目里链接 Android libc++ 共享库了, 在 android.rs 中添加如下代码:

rust
// bevy-in-app/src/ffi/android.rs
+        created_window_writer.send(WindowCreated { window: entity });
+    }
+    create_window_system_state.apply(&mut app.world);
+}

create_bevy_window 函数的完整执行逻辑如下: 0. 从 World 中取出 IOSViewObjAndroidViewObj 目标平台的视图对象资源;

  1. 创建一个新的系统状态,并获取所需的命令队列,窗口实体列表,窗口创建写入器(EventWriter<WindowCreated>)和 AppViews 的可变借用;
  2. 遍历窗口实体列表,检查窗口是否已经被创建;
  3. 调用 AppViewscreate_window() 创建一个新窗口 app_view(也就是实现了 raw-window-handle traits 的 AppView);
  4. 调用 app_view 的相关函数与字段更新 Bevy window 的物理分辨率缩放因子及逻辑分辨率;
  5. 通过命令队列 commandsapp_view 中实现的窗口句柄插入到实体中;
  6. 窗口创建写入器发送一个包含了新窗口的实体的 WindowCreated;
  7. 最后,调用 apply() 函数应用系统状态;

IOSViewObjAndroidViewObj 遵循了 Bevy 中资源传递的惯例,在 create_bevy_app() FFI 函数中调用 insert_non_send_resource() 将其插入到 World。 此处没使用 insert_resource() 有两个原因:

  • IOSViewObj 不是线程安全的,iOS UIView 中的函数只能在主线程中使用;
shell
error[E0277]: `*mut Object` cannot be sent between threads safely
+  --> src/app_view/ios.rs:21:26
+   |
+21 | #[derive(Debug, Default, Resource)]
+   |                          ^^^^^^^^ `*mut Object` cannot be sent between threads safely
+   |
+   = help: the trait `Send` is not implemented for `*mut Object`
+   = note: required for `Option<*mut Object>` to implement `Sync`
  • 事实上也并不需要在多线程环境中来创建窗口;

链接 libc++

实现了 AppViewPlugin 后运行 cargo so b --lib --target aarch64-linux-android 将 crate 编译为 .so 库文件,在 Android 项目中加载时将得到如下错误:

shell
dlopen failed: cannot locate symbol "__gxx_personality_v0" referenced by ...

我们知道 Bevy 项目通过 cargo-apk 命令编译为 .apk 是可以在 Android 上安装运行的,于是查看 cargo-apk 的源码:

rust
self.add_lib(&artifact, target)?;
+for need in list_needed_libs(&readelf_path, &artifact)? {
+	// c++_shared is available in the NDK but not on-device.
+	// Must be bundled with the apk if used:
+	// https://developer.android.com/ndk/guides/cpp-support#libc
+	let search_paths = if need == "libc++_shared.so" {
+		// ...
+	}
+	// ...
+}

根据注释里的相关链接libc++_shared 链接库名称,就知道如何在我们的项目里链接 Android libc++ 共享库了, 在 android.rs 中添加如下代码:

rust
// bevy-in-app/src/ffi/android.rs
 
-#[link(name = "c++_shared")]
-extern "C" {}

编译后运行, 现在出现了新的情况:

sh
dlopen failed: library "libc++_shared.so" not found

有两个解决途径:

  • 从 GitHub 下载编译好的 libc++_shared.so 放到 jniLibs/XX 目录下;
  • 使用 externalNativeBuild 配置一个空的 c++ 原生库的构建, Gradle 会自动将该库添加到 App 中;

bevy-in-app 使用了第二种方式,仅需一点模板化的配置,可以直接查看项目源码,这里就不贴出来了。

调用 Motion Sensors

以 Android 为例:

kotlin
class BevySurfaceView : SurfaceView, SurfaceHolder.Callback2 {
-    private var sensorManager: SensorManager? = null
-    private var mSensor: Sensor? = null
-    private var sensorValues: FloatArray = FloatArray(3)
+#[link(name = "c++_shared")]
+extern "C" {}

编译后运行, 现在出现了新的情况:

sh
dlopen failed: library "libc++_shared.so" not found

有两个解决途径:

  • 从 GitHub 下载编译好的 libc++_shared.so 放到 jniLibs/XX 目录下;
  • 使用 externalNativeBuild 配置一个空的 c++ 原生库的构建, Gradle 会自动将该库添加到 App 中;

bevy-in-app 使用了第二种方式,仅需一点模板化的配置,可以直接查看项目源码,这里就不贴出来了。

调用 Motion Sensors

以 Android 为例:

kotlin
class BevySurfaceView : SurfaceView, SurfaceHolder.Callback2 {
+    private var sensorManager: SensorManager? = null
+    private var mSensor: Sensor? = null
+    private var sensorValues: FloatArray = FloatArray(3)
 
-    constructor(context: Context) : super(context) {
-        // 获取传感器服务
-        sensorManager = context.getSystemService(Context.SENSOR_SERVICE) as SensorManager
-        // 获取重力传感器,用于检测设备的重力变化
-        mSensor = sensorManager?.getDefaultSensor(Sensor.TYPE_GRAVITY)
-    }
+    constructor(context: Context) : super(context) {
+        // 获取传感器服务
+        sensorManager = context.getSystemService(Context.SENSOR_SERVICE) as SensorManager
+        // 获取重力传感器,用于检测设备的重力变化
+        mSensor = sensorManager?.getDefaultSensor(Sensor.TYPE_GRAVITY)
+    }
 
-    override fun surfaceCreated(holder: SurfaceHolder) {
-        holder.let { h ->
-            // ...
-            // 创建了一个传感器事件监听器
-            var sensorEventListener = object : SensorEventListener {
-                // 当传感器的值改变时,更新 sensorValues 变量的值
-                override fun onSensorChanged(event: SensorEvent?) {
-                    if (event != null) {
-                        sensorValues = event.values
-                    }
-                }
-                override fun onAccuracyChanged(sensor: Sensor?, accuracy: Int) {
-                }
-            }
-            mSensor?.also { sensor ->
-                // 注册上边创建的事件监听器,以便开始监听传感器事件
-                sensorManager?.registerListener(sensorEventListener, sensor, SensorManager.SENSOR_DELAY_GAME)
-            }
-        }
-    }
-}

现在已经有了实时变化的传感器数据,仅需调用 Rust 端实现的 FFI 函数来发送此输入事件

Android 端:

kotlin
override fun draw(canvas: Canvas?) {
-    // ...
-    bevyApp.device_motion(bevy_app, sensorValues[0], sensorValues[1], sensorValues[2])
-    bevyApp.enter_frame(bevy_app)
-    // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数
-    invalidate()
-}

Rust 端:

rust
// bevy-in-app/src/ffi/android.rs
+    override fun surfaceCreated(holder: SurfaceHolder) {
+        holder.let { h ->
+            // ...
+            // 创建了一个传感器事件监听器
+            var sensorEventListener = object : SensorEventListener {
+                // 当传感器的值改变时,更新 sensorValues 变量的值
+                override fun onSensorChanged(event: SensorEvent?) {
+                    if (event != null) {
+                        sensorValues = event.values
+                    }
+                }
+                override fun onAccuracyChanged(sensor: Sensor?, accuracy: Int) {
+                }
+            }
+            mSensor?.also { sensor ->
+                // 注册上边创建的事件监听器,以便开始监听传感器事件
+                sensorManager?.registerListener(sensorEventListener, sensor, SensorManager.SENSOR_DELAY_GAME)
+            }
+        }
+    }
+}

现在已经有了实时变化的传感器数据,仅需调用 Rust 端实现的 FFI 函数来发送此输入事件

Android 端:

kotlin
override fun draw(canvas: Canvas?) {
+    // ...
+    bevyApp.device_motion(bevy_app, sensorValues[0], sensorValues[1], sensorValues[2])
+    bevyApp.enter_frame(bevy_app)
+    // invalidate() 函数通知通知 App,在下一个 UI 刷新周期重新调用 draw() 函数
+    invalidate()
+}

Rust 端:

rust
// bevy-in-app/src/ffi/android.rs
 
-#[no_mangle]
-#[jni_fn("name.jinleili.bevy.RustBridge")]
-pub fn device_motion(_env: *mut JNIEnv, _: jobject, obj: jlong, x: jfloat, _y: jfloat, _z: jfloat) {
-    let app = unsafe { &mut *(obj as *mut App) };
-    let x: f32 = x as _;
-    if x < -0.2 {
-        crate::change_input(app, KeyCode::Left, ButtonState::Released);
-        crate::change_input(app, KeyCode::Right, ButtonState::Pressed);
-    } else if x > 0.2 {
-        crate::change_input(app, KeyCode::Right, ButtonState::Released);
-        crate::change_input(app, KeyCode::Left, ButtonState::Pressed);
-    } else {
-        crate::change_input(app, KeyCode::Left, ButtonState::Released);
-        crate::change_input(app, KeyCode::Right, ButtonState::Released);
-    }
-}
+#[no_mangle]
+#[jni_fn("name.jinleili.bevy.RustBridge")]
+pub fn device_motion(_env: *mut JNIEnv, _: jobject, obj: jlong, x: jfloat, _y: jfloat, _z: jfloat) {
+    let app = unsafe { &mut *(obj as *mut App) };
+    let x: f32 = x as _;
+    if x < -0.2 {
+        crate::change_input(app, KeyCode::Left, ButtonState::Released);
+        crate::change_input(app, KeyCode::Right, ButtonState::Pressed);
+    } else if x > 0.2 {
+        crate::change_input(app, KeyCode::Right, ButtonState::Released);
+        crate::change_input(app, KeyCode::Left, ButtonState::Pressed);
+    } else {
+        crate::change_input(app, KeyCode::Left, ButtonState::Released);
+        crate::change_input(app, KeyCode::Right, ButtonState::Released);
+    }
+}
 
-// bevy-in-app/src/lib.rs
+// bevy-in-app/src/lib.rs
 
-#[cfg(any(target_os = "android", target_os = "ios"))]
-pub(crate) fn change_input(app: &mut App, key_code: KeyCode, state: ButtonState) {
-    let input = KeyboardInput {
-        scan_code: if key_code == KeyCode::Left { 123 } else { 124 },
-        state,
-        key_code: Some(key_code),
-    };
-    app.world.cell().send_event(input);
-}

如何重启/退出 Bevy 引擎

Bevy 引擎默认的行为是,当所有窗口关闭时自动退出。显然,这不能满足当前的场景,因为我们需要在不销毁 iOS UIView 或 Android SurfaceView 的情况下原地重启 Bevy App。 为此,需要自定义一个退出函数,以确保在需要的时候可以手动退出:

rust
// bevy-in-app/src/lib.rs
+#[cfg(any(target_os = "android", target_os = "ios"))]
+pub(crate) fn change_input(app: &mut App, key_code: KeyCode, state: ButtonState) {
+    let input = KeyboardInput {
+        scan_code: if key_code == KeyCode::Left { 123 } else { 124 },
+        state,
+        key_code: Some(key_code),
+    };
+    app.world.cell().send_event(input);
+}

如何重启/退出 Bevy 引擎

Bevy 引擎默认的行为是,当所有窗口关闭时自动退出。显然,这不能满足当前的场景,因为我们需要在不销毁 iOS UIView 或 Android SurfaceView 的情况下原地重启 Bevy App。 为此,需要自定义一个退出函数,以确保在需要的时候可以手动退出:

rust
// bevy-in-app/src/lib.rs
 
-#[cfg(any(target_os = "android", target_os = "ios"))]
-pub(crate) fn exit_bevy_app(mut app: Box<App>) {
-    // 创建一个查询所有窗口的状态变量
-    let mut windows_state: SystemState<(Commands, Query<(Entity, &mut Window)>)> =
-        SystemState::from_world(&mut app.world);
-    // 获取命令列表与窗口列表的可变借用
-    let (mut commands, windows) = windows_state.get_mut(&mut app.world);
-    // 遍历并提交所有窗口实体的销毁命令
-    for (window, _focus) in windows.iter() {
-        commands.entity(window).despawn();
-    }
-    windows_state.apply(&mut app.world);
-    // 由于我们没有使用 Bevy App 的 runner 函数,需要手动 update
-    app.update();
-}

运行

Bevy in Android App
Bevy in iOS App
- +#[cfg(any(target_os = "android", target_os = "ios"))] +pub(crate) fn exit_bevy_app(mut app: Box<App>) { + // 创建一个查询所有窗口的状态变量 + let mut windows_state: SystemState<(Commands, Query<(Entity, &mut Window)>)> = + SystemState::from_world(&mut app.world); + // 获取命令列表与窗口列表的可变借用 + let (mut commands, windows) = windows_state.get_mut(&mut app.world); + // 遍历并提交所有窗口实体的销毁命令 + for (window, _focus) in windows.iter() { + commands.entity(window).despawn(); + } + windows_state.apply(&mut app.world); + // 由于我们没有使用 Bevy App 的 runner 函数,需要手动 update + app.update(); +}

运行

Bevy in Android App
Bevy in iOS App
+ \ No newline at end of file diff --git a/integration-and-debugging/index.html b/integration-and-debugging/index.html index 94de4cd9b..4d9563bfb 100644 --- a/integration-and-debugging/index.html +++ b/integration-and-debugging/index.html @@ -5,19 +5,21 @@ 楔子 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

楔子

调试工具集

教程的开篇我们就已提到:wgpu 是基于 WebGPU 规范的跨平台图形 API。也就是说,wgpu 不光能运行在 Web 及桌面环境里,更是能运行在 iOS、Android 两大移动操作系统上。

wgpu 的运行并不依赖于任何窗口程序,所以也不提供窗口的创建及管理功能,只有在创建基于窗口的绘制表面(Surface)时,才可能需要一个实现了 raw-window-handle 抽象接口的实参(之所以说是可能需要,是因为在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例)。 winit 是一个实现了 raw-window-handle 抽象接口的、跨平台的窗口创建及管理(crate)。 在桌面端(macOS、Windows、Linux)及移动端(iOS、Android),winit 会接管整个 App (应用程序)的窗口管理(包括事件循环(Events loop))。

毫无疑问,对于游戏类 App, 使用 wgpu + winit 的组合是非常合适的。但是,大量非游戏类 App 也经常有使用图形 API 的需求(比如,图表、图片滤镜等),这些 App 需要用到大量的系统 UI 组件及交互,winit 这种接管整个 App 窗口的方式是不合适的。所以,将 wgpu 集成到现有的 iOS、Android App 且不使用 winit 将非常有用。

我们都知道,调试和分析是优化程序性能的必备技能。

虽然 wgpu 会在运行时验证 API 调用及参数设置来保证只有有效的工作负载才能提交给 GPU 执行,但是,这并不能保证渲染计算着色(Compute Shading)的正确性。本章中我们还会学习到如何利用调试工具来分析 wgpu 程序的性能及查找难以发现的错误!

加入 wgpu 微信学习交流群

- +
Skip to content

楔子

调试工具集

教程的开篇我们就已提到:wgpu 是基于 WebGPU 规范的跨平台图形 API。也就是说,wgpu 不光能运行在 Web 及桌面环境里,更是能运行在 iOS、Android 两大移动操作系统上。

wgpu 的运行并不依赖于任何窗口程序,所以也不提供窗口的创建及管理功能,只有在创建基于窗口的绘制表面(Surface)时,才可能需要一个实现了 raw-window-handle 抽象接口的实参(之所以说是可能需要,是因为在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例)。 winit 是一个实现了 raw-window-handle 抽象接口的、跨平台的窗口创建及管理(crate)。 在桌面端(macOS、Windows、Linux)及移动端(iOS、Android),winit 会接管整个 App (应用程序)的窗口管理(包括事件循环(Events loop))。

毫无疑问,对于游戏类 App, 使用 wgpu + winit 的组合是非常合适的。但是,大量非游戏类 App 也经常有使用图形 API 的需求(比如,图表、图片滤镜等),这些 App 需要用到大量的系统 UI 组件及交互,winit 这种接管整个 App 窗口的方式是不合适的。所以,将 wgpu 集成到现有的 iOS、Android App 且不使用 winit 将非常有用。

我们都知道,调试和分析是优化程序性能的必备技能。

虽然 wgpu 会在运行时验证 API 调用及参数设置来保证只有有效的工作负载才能提交给 GPU 执行,但是,这并不能保证渲染计算着色(Compute Shading)的正确性。本章中我们还会学习到如何利用调试工具来分析 wgpu 程序的性能及查找难以发现的错误!

加入 wgpu 微信学习交流群

+ \ No newline at end of file diff --git a/integration-and-debugging/ios/index.html b/integration-and-debugging/ios/index.html index 3653af432..2dd4313b3 100644 --- a/integration-and-debugging/ios/index.html +++ b/integration-and-debugging/ios/index.html @@ -5,162 +5,165 @@ 与 iOS App 集成 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

与 iOS App 集成

与 iOS App 集成相比于 Android 要简单一些。

添加 iOS 构建目标支持

sh
# 添加 iOS 构建目标支持
-rustup target add aarch64-apple-ios 
+    
Skip to content

与 iOS App 集成

与 iOS App 集成相比于 Android 要简单一些。

添加 iOS 构建目标支持

sh
# 添加 iOS 构建目标支持
+rustup target add aarch64-apple-ios 
 
-# 添加 iOS 模拟器构建目标支持
-# Intel CPU Mac
-rustup target add x86_64-apple-ios
-# M1+ Mac
-rustup target add aarch64-apple-ios-sim

由于从 A7 芯片(iPhone 5S,iPad Mini 2) 开始,iPhone iPad 都是 64 位的设备,所以我们不需要 armv7s-apple-iosarmv7-apple-ios 这两个构建目标。

iOS 模拟器相比于真机设备的特殊之处

当运行 WebGPU 程序时,模拟器并不会试图完全模拟你正在模拟的 iOS 设备的 GPU。例如,如果选择 iPhone 14 Pro 模拟器,它不会试图模拟 A16 GPU 的能力。相反,模拟器会翻译你的任何调用,并将它们引导到 Mac 主机上的选定 GPU。

苹果为模拟器单独提供了一个设备对象,其功能被限制为苹果 GPU 家族的 Apple2 型号(也就是古早的 A8 芯片),这意味着模拟器往往比实际的 GPU 支持更少的功能或更多的限制。从这篇文档 可以查看到功能限制的详情。

开发调试 GPU 应用,使用真机永远是最好的选择。

定义 FFI

在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例,所以我们无须去实现 raw-window-handle 抽象接口。

先给项目添加上必要的依赖:

toml
[target.'cfg(target_os = "ios")'.dependencies]
-libc = "*"
-objc = "0.2.7"

然后定义一个 IOSViewObj 结构体:

rust
#[repr(C)]
-pub struct IOSViewObj {
-    // metal_layer 所在的 UIView 容器
-    // UIView 有一系列方便的函数可供我们在 Rust 端来调用
-    pub view: *mut Object,
-    // 指向 iOS 端 CAMetalLayer 的指针
-    pub metal_layer: *mut c_void,
-    // 不同的 iOS 设备支持不同的屏幕刷新率,有时我们的 GPU 程序需要用到这类信息
-    pub maximum_frames: i32,
-    // 外部函数接口,用于给 iOS 端传递状态码
-    pub callback_to_swift: extern "C" fn(arg: i32),
-}

#[repr(C)] 属性标注 IOSViewObj 的内存布局兼容 C-ABI。

什么是 ABI?

ABI 是⼀个规范,它涵盖以下内容: · 调⽤约定。⼀个函数的调⽤过程本质就是参数、函数、返回值如何传递。编译器按照调⽤规则去编译,把数据放到相应的堆栈中,函数的调⽤⽅和被调⽤⽅(函数本⾝)都需要遵循这个统⼀的约定。 · 内存布局。主要是⼤⼩和对齐⽅式。 · 处理器指令集。 · ⽬标⽂件和库的⼆进制格式。

为什么使用 C-ABI?

不同的操作系统、编程语⾔、每种编程语⾔的不同编译器 实现基本都有⾃⼰规定或者遵循的 ABI 和调⽤规范。⽬前只能通过 FFI 技术遵循 C 语⾔ ABI 才可以做到编程语⾔的相互调⽤。也就是说,C-ABI 是唯⼀通⽤的稳定的标准 ABI。这是由历史原因决定的,C 语⾔伴随着操作系 统⼀路发展⽽来,导致其成为事实上的标准 ABI。

假设我们已经实现好了一个 wgpu 程序叫 WgpuCanvas, 现在来实现两个供 iOS 端调用的、控制 WgpuCanvas 初始化及帧渲染的函数:

rust
#[no_mangle]
-pub fn create_wgpu_canvas(ios_obj: IOSViewObj) -> *mut libc::c_void {
-    let obj = WgpuCanvas::new(AppSurface::new(ios_obj), 0_i32);
-    // 使用 Box 对 Rust 对象进行装箱操作。
-    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
-    let box_obj = Box::new(obj);
-    Box::into_raw(box_obj) as *mut libc::c_void
-}
+# 添加 iOS 模拟器构建目标支持
+# Intel CPU Mac
+rustup target add x86_64-apple-ios
+# M1+ Mac
+rustup target add aarch64-apple-ios-sim

由于从 A7 芯片(iPhone 5S,iPad Mini 2) 开始,iPhone iPad 都是 64 位的设备,所以我们不需要 armv7s-apple-iosarmv7-apple-ios 这两个构建目标。

iOS 模拟器相比于真机设备的特殊之处

当运行 WebGPU 程序时,模拟器并不会试图完全模拟你正在模拟的 iOS 设备的 GPU。例如,如果选择 iPhone 14 Pro 模拟器,它不会试图模拟 A16 GPU 的能力。相反,模拟器会翻译你的任何调用,并将它们引导到 Mac 主机上的选定 GPU。

苹果为模拟器单独提供了一个设备对象,其功能被限制为苹果 GPU 家族的 Apple2 型号(也就是古早的 A8 芯片),这意味着模拟器往往比实际的 GPU 支持更少的功能或更多的限制。从这篇文档 可以查看到功能限制的详情。

开发调试 GPU 应用,使用真机永远是最好的选择。

定义 FFI

在 iOS/macOS 上,使用 CAMetalLayer 也能创建绘制表面的实例,所以我们无须去实现 raw-window-handle 抽象接口。

先给项目添加上必要的依赖:

toml
[target.'cfg(target_os = "ios")'.dependencies]
+libc = "*"
+objc = "0.2.7"

然后定义一个 IOSViewObj 结构体:

rust
#[repr(C)]
+pub struct IOSViewObj {
+    // metal_layer 所在的 UIView 容器
+    // UIView 有一系列方便的函数可供我们在 Rust 端来调用
+    pub view: *mut Object,
+    // 指向 iOS 端 CAMetalLayer 的指针
+    pub metal_layer: *mut c_void,
+    // 不同的 iOS 设备支持不同的屏幕刷新率,有时我们的 GPU 程序需要用到这类信息
+    pub maximum_frames: i32,
+    // 外部函数接口,用于给 iOS 端传递状态码
+    pub callback_to_swift: extern "C" fn(arg: i32),
+}

#[repr(C)] 属性标注 IOSViewObj 的内存布局兼容 C-ABI。

什么是 ABI?

ABI 是⼀个规范,它涵盖以下内容: · 调⽤约定。⼀个函数的调⽤过程本质就是参数、函数、返回值如何传递。编译器按照调⽤规则去编译,把数据放到相应的堆栈中,函数的调⽤⽅和被调⽤⽅(函数本⾝)都需要遵循这个统⼀的约定。 · 内存布局。主要是⼤⼩和对齐⽅式。 · 处理器指令集。 · ⽬标⽂件和库的⼆进制格式。

为什么使用 C-ABI?

不同的操作系统、编程语⾔、每种编程语⾔的不同编译器 实现基本都有⾃⼰规定或者遵循的 ABI 和调⽤规范。⽬前只能通过 FFI 技术遵循 C 语⾔ ABI 才可以做到编程语⾔的相互调⽤。也就是说,C-ABI 是唯⼀通⽤的稳定的标准 ABI。这是由历史原因决定的,C 语⾔伴随着操作系 统⼀路发展⽽来,导致其成为事实上的标准 ABI。

假设我们已经实现好了一个 wgpu 程序叫 WgpuCanvas, 现在来实现两个供 iOS 端调用的、控制 WgpuCanvas 初始化及帧渲染的函数:

rust
#[no_mangle]
+pub fn create_wgpu_canvas(ios_obj: IOSViewObj) -> *mut libc::c_void {
+    let obj = WgpuCanvas::new(AppSurface::new(ios_obj), 0_i32);
+    // 使用 Box 对 Rust 对象进行装箱操作。
+    // 我们无法将 Rust 对象直接传递给外部语言,通过装箱来传递此对象的裸指针 
+    let box_obj = Box::new(obj);
+    Box::into_raw(box_obj) as *mut libc::c_void
+}
 
-#[no_mangle]
-pub fn enter_frame(obj: *mut libc::c_void) {
-    // 将指针转换为其指代的实际 Rust 对象,同时也拿回此对象的内存管理权
-    // from_raw 是 unsafe 函数,它的调用需要放在 unsafe {} 块中
-    let mut obj: Box<WgpuCanvas> = unsafe { Box::from_raw(obj as *mut _) };
-    obj.enter_frame();
-    // 将 obj 对象的内存管理权重新转交给调用方
-    Box::into_raw(obj);
-}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

你应该已注意到了,上面的 enter_frame(obj: *mut libc::c_void) 函数里,我们做了两次内存管理权的转移,先是取回了内存管理权,后又再次转交给调用方。有没有办法避免这两次转移来提升性能呢?可以,直接从裸指针获取到对象的可变借用:

rust
#[no_mangle]
-pub fn enter_frame(obj: *mut libc::c_void) {
-    // 直接获取到指针指代的 Rust 对象的可变借用
-    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
-    obj.enter_frame();
-}

Unsafe Rust

Unsafe Rust 是 Safe Rust 的⼀个超集。也就是说,在 unsafe {} 块中,并不会禁⽤ Safe Rust 中的任何安全检查。它仅在进⾏以下五类操作时,不提供安全检查:

  • 裸指针的解引⽤或类型转换;
  • 调⽤ unsafe 的函数;
  • 访问或修改可变静态变量;
  • 实现 unsafe trait;
  • 读写 Union 联合体中的字段;

&mut *(obj as *mut WgpuCanvas) 之所以要放在 unsafe {} 块中,不仅仅是由于 obj 参数是裸指针,还因为 Rust 在编译阶段的静态安全检查此时完全没有⽤武之地,所以也就没必要提供安全检查了。

还需要写一个简单的 C 语言的头文件来对应上面定义的结构体与函数。 让我们按照惯例,使用项目编译出来的 .a 库文件名称为此头文件命名:

c
#ifndef libwgpu_on_app_h
-#define libwgpu_on_app_h
+#[no_mangle]
+pub fn enter_frame(obj: *mut libc::c_void) {
+    // 将指针转换为其指代的实际 Rust 对象,同时也拿回此对象的内存管理权
+    // from_raw 是 unsafe 函数,它的调用需要放在 unsafe {} 块中
+    let mut obj: Box<WgpuCanvas> = unsafe { Box::from_raw(obj as *mut _) };
+    obj.enter_frame();
+    // 将 obj 对象的内存管理权重新转交给调用方
+    Box::into_raw(obj);
+}

#[no_mangle] 属性告诉 Rust 关闭函数名称修改功能。如果不加这个属性,Rust 编译器就会修改函数名,这是现代编译器为了解决唯⼀名称解析引起的各种问题所引⼊的技术。如果函数名被修改了,外部编程语言就⽆法按原名称调⽤,开发者也没办法知道修改后的函数名。

你应该已注意到了,上面的 enter_frame(obj: *mut libc::c_void) 函数里,我们做了两次内存管理权的转移,先是取回了内存管理权,后又再次转交给调用方。有没有办法避免这两次转移来提升性能呢?可以,直接从裸指针获取到对象的可变借用:

rust
#[no_mangle]
+pub fn enter_frame(obj: *mut libc::c_void) {
+    // 直接获取到指针指代的 Rust 对象的可变借用
+    let obj = unsafe { &mut *(obj as *mut WgpuCanvas) };
+    obj.enter_frame();
+}

Unsafe Rust

Unsafe Rust 是 Safe Rust 的⼀个超集。也就是说,在 unsafe {} 块中,并不会禁⽤ Safe Rust 中的任何安全检查。它仅在进⾏以下五类操作时,不提供安全检查:

  • 裸指针的解引⽤或类型转换;
  • 调⽤ unsafe 的函数;
  • 访问或修改可变静态变量;
  • 实现 unsafe trait;
  • 读写 Union 联合体中的字段;

&mut *(obj as *mut WgpuCanvas) 之所以要放在 unsafe {} 块中,不仅仅是由于 obj 参数是裸指针,还因为 Rust 在编译阶段的静态安全检查此时完全没有⽤武之地,所以也就没必要提供安全检查了。

还需要写一个简单的 C 语言的头文件来对应上面定义的结构体与函数。 让我们按照惯例,使用项目编译出来的 .a 库文件名称为此头文件命名:

c

+#ifndef libwgpu_on_app_h
+#define libwgpu_on_app_h
 
-#include <stdint.h>
+#include <stdint.h>
 
-// 这个不透明结构体用来指代 Rust 端的 WgpuCanvas 对象
-struct wgpu_canvas;
+// 这个不透明结构体用来指代 Rust 端的 WgpuCanvas 对象
+struct wgpu_canvas;
 
-// 对应 Rust 端的 IOSViewObj 对象
-struct ios_view_obj {
-    void *view;
-    // CAMetalLayer
-    void *metal_layer;
-    int maximum_frames;
-    void (*callback_to_swift)(int32_t arg);
-};
+// 对应 Rust 端的 IOSViewObj 对象
+struct ios_view_obj {
+    void *view;
+    // CAMetalLayer
+    void *metal_layer;
+    int maximum_frames;
+    void (*callback_to_swift)(int32_t arg);
+};
 
-struct wgpu_canvas *create_wgpu_canvas(struct ios_view_obj object);
-void enter_frame(struct wgpu_canvas *data);
+struct wgpu_canvas *create_wgpu_canvas(struct ios_view_obj object);
+void enter_frame(struct wgpu_canvas *data);
 
-#endif /* libwgpu_on_app_h */

将上面的头文件放置到 iOS 项目中。如果你的 iOS 项目是使用 Swift 创建的,则还需要将头文件引入到桥接文件(XXX-Bridging-Header.h)中:

c
#ifndef wgpu_test_Bridging_Header_h
-#define wgpu_test_Bridging_Header_h
+#endif /* libwgpu_on_app_h */

将上面的头文件放置到 iOS 项目中。如果你的 iOS 项目是使用 Swift 创建的,则还需要将头文件引入到桥接文件(XXX-Bridging-Header.h)中:

c
#ifndef wgpu_test_Bridging_Header_h
+#define wgpu_test_Bridging_Header_h
 
-#import "libwgpu_on_app.h"
+#import "libwgpu_on_app.h"
 
-#endif /* wgpu_test_Bridging_Header_h */

App 中加载 WgpuCanvas 对象

先在 iOS 项目中自定义一个继承自 UIView 的 MetalView,代码很简单:

swift
class MetalView: UIView {
-    // 这里将 View 的默认 Layer 指定为 CAMetalLayer
-    override class var layerClass: AnyClass {
-        return CAMetalLayer.self
-    }
-    
-    override func awakeFromNib() {
-        super.awakeFromNib()
-        configLayer()
-    }
-    
-    private func configLayer() {
-        guard let layer = self.layer as? CAMetalLayer else {
-            return
-        }
-        layer.presentsWithTransaction = false
-        layer.framebufferOnly = true
-        // nativeScale is real physical pixel scale
-        // https://tomisacat.xyz/tech/2017/06/17/scale-nativescale-contentsscale.html
-        self.contentScaleFactor = UIScreen.main.nativeScale
-    }
-}

然后在 ViewController 中实例化 WgpuCanvas:

swift
// ...
-// 我是通过 StoryBoard 绑定的 MetalView,当然,你也可以手动创建
-@IBOutlet var metalV: MetalView!
-// 指向 Rust 端 WgpuCanvas 的指针
-var wgpuCanvas: OpaquePointer?
-lazy var displayLink: CADisplayLink = {
-    CADisplayLink.init(target: self, selector: #selector(enterFrame))
-}()
-// ...
-override func viewDidAppear(_ animated: Bool) {
-    super.viewDidAppear(animated)
-    // 我们需要保证 WgpuCanvas 只被实例化一次
-    if wgpuCanvas == nil {
-        // 将 Swift 对象转换为裸指针
-        let viewPointer = Unmanaged.passRetained(self.metalV).toOpaque()
-        let metalLayer = Unmanaged.passRetained(self.metalV.layer).toOpaque()
-        let maximumFrames = UIScreen.main.maximumFramesPerSecond
-        
-        // 创建 IOSViewObj 实例
-        let viewObj = ios_view_obj(view: viewPointer, metal_layer: metalLayer,maximum_frames: Int32(maximumFrames), callback_to_swift: callback_to_swift)
-        // 创建 WgpuCanvas 实例
-        wgpuCanvas = create_wgpu_canvas(viewObj)
-    }
-    self.displayLink.isPaused = false
-}
+#endif /* wgpu_test_Bridging_Header_h */

App 中加载 WgpuCanvas 对象

先在 iOS 项目中自定义一个继承自 UIView 的 MetalView,代码很简单:

swift
class MetalView: UIView {
+    // 这里将 View 的默认 Layer 指定为 CAMetalLayer
+    override class var layerClass: AnyClass {
+        return CAMetalLayer.self
+    }
+    
+    override func awakeFromNib() {
+        super.awakeFromNib()
+        configLayer()
+    }
+    
+    private func configLayer() {
+        guard let layer = self.layer as? CAMetalLayer else {
+            return
+        }
+        layer.presentsWithTransaction = false
+        layer.framebufferOnly = true
+        // nativeScale is real physical pixel scale
+        // https://tomisacat.xyz/tech/2017/06/17/scale-nativescale-contentsscale.html
+        self.contentScaleFactor = UIScreen.main.nativeScale
+    }
+}

然后在 ViewController 中实例化 WgpuCanvas:

swift
// ...
+// 我是通过 StoryBoard 绑定的 MetalView,当然,你也可以手动创建
+@IBOutlet var metalV: MetalView!
+// 指向 Rust 端 WgpuCanvas 的指针
+var wgpuCanvas: OpaquePointer?
+lazy var displayLink: CADisplayLink = {
+    CADisplayLink.init(target: self, selector: #selector(enterFrame))
+}()
+// ...
+override func viewDidAppear(_ animated: Bool) {
+    super.viewDidAppear(animated)
+    // 我们需要保证 WgpuCanvas 只被实例化一次
+    if wgpuCanvas == nil {
+        // 将 Swift 对象转换为裸指针
+        let viewPointer = Unmanaged.passRetained(self.metalV).toOpaque()
+        let metalLayer = Unmanaged.passRetained(self.metalV.layer).toOpaque()
+        let maximumFrames = UIScreen.main.maximumFramesPerSecond
+        
+        // 创建 IOSViewObj 实例
+        let viewObj = ios_view_obj(view: viewPointer, metal_layer: metalLayer,maximum_frames: Int32(maximumFrames), callback_to_swift: callback_to_swift)
+        // 创建 WgpuCanvas 实例
+        wgpuCanvas = create_wgpu_canvas(viewObj)
+    }
+    self.displayLink.isPaused = false
+}
 
-@objc func enterFrame() {
-    guard let canvas = self.wgpuCanvas else {
-        return
-    }
-    // 执行 WgpuCanvas 帧渲染
-    enter_frame(canvas)
-}
+@objc func enterFrame() {
+    guard let canvas = self.wgpuCanvas else {
+        return
+    }
+    // 执行 WgpuCanvas 帧渲染
+    enter_frame(canvas)
+}
 
-func callback_to_swift(arg: Int32) {
-    // callback_to_swift 函数是在 WgpuCanvas 中被调用的,WgpuCanvas 的代码很可能没有运行在 iOS 的 UI 线程,
-    // 如果此处涉及到 UI 操作,就必须切换到 UI 线程。
-    DispatchQueue.main.async {
-        switch arg {
-        // ...
-        }
-    }
-}

编译与运行

sh
# 编译为 iOS 真机支持的库
-# debug 库
-cargo build --target aarch64-apple-ios
-# release 库
-cargo build --target aarch64-apple-ios --release
+func callback_to_swift(arg: Int32) {
+    // callback_to_swift 函数是在 WgpuCanvas 中被调用的,WgpuCanvas 的代码很可能没有运行在 iOS 的 UI 线程,
+    // 如果此处涉及到 UI 操作,就必须切换到 UI 线程。
+    DispatchQueue.main.async {
+        switch arg {
+        // ...
+        }
+    }
+}

编译与运行

sh
# 编译为 iOS 真机支持的库
+# debug 库
+cargo build --target aarch64-apple-ios
+# release 库
+cargo build --target aarch64-apple-ios --release
 
-# 编译为 iOS 模拟器支持的库
-# M1+ Mac 上执行:
-cargo build --target aarch64-apple-ios-sim 
-# Intel 芯片的 Mac 上执行:
-cargo build --target x86_64-apple-ios
+# 编译为 iOS 模拟器支持的库
+# M1+ Mac 上执行:
+cargo build --target aarch64-apple-ios-sim 
+# Intel 芯片的 Mac 上执行:
+cargo build --target x86_64-apple-ios
 
-# 编译成功后需复制文件libwgpu_in_app.a至项目目录下
-# cp target/${TARGET}/${LIB_FOLDER}/libwgpu_in_app.a Apple/libs/${LIB_FOLDER}/libwgpu_in_app.a

打开 iOS 项目,在项目的 General 选项卡下找到 Frameworks, Libraries, and Embedded Content 栏, 导入系统的 libresolv.tbd 及我们刚编译的 .a 库,此导入只需要操作一次:

然后在 Build Settings 选项卡下找到 Search Paths -> Library Search Paths 栏, 将 .a 库的 debug 和 release 路径填到对应的字段中:

最后,还是在 Build Settings 选项卡下,找到 Linking -> Other Linker Flags 栏,添加 -ObjC-lc++ 两个链接标记:

当 Xcode 版本 >= 13 且 iOS Deployment Target >= 12.0 时,Other Linker Flags 栏的设置可以省略。

以上就是所有的关键代码和步骤了,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

- +# 编译成功后需复制文件libwgpu_in_app.a至项目目录下 +# cp target/${TARGET}/${LIB_FOLDER}/libwgpu_in_app.a Apple/libs/${LIB_FOLDER}/libwgpu_in_app.a

打开 iOS 项目,在项目的 General 选项卡下找到 Frameworks, Libraries, and Embedded Content 栏, 导入系统的 libresolv.tbd 及我们刚编译的 .a 库,此导入只需要操作一次:

然后在 Build Settings 选项卡下找到 Search Paths -> Library Search Paths 栏, 将 .a 库的 debug 和 release 路径填到对应的字段中:

最后,还是在 Build Settings 选项卡下,找到 Linking -> Other Linker Flags 栏,添加 -ObjC-lc++ 两个链接标记:

当 Xcode 版本 >= 13 且 iOS Deployment Target >= 12.0 时,Other Linker Flags 栏的设置可以省略。

以上就是所有的关键代码和步骤了,我写了一个叫 wgpu-in-app 的示例程序,效果如下:

+ \ No newline at end of file diff --git a/integration-and-debugging/snapdragon-profiler/index.html b/integration-and-debugging/snapdragon-profiler/index.html index 823a2d8a7..832c5e6c9 100644 --- a/integration-and-debugging/snapdragon-profiler/index.html +++ b/integration-and-debugging/snapdragon-profiler/index.html @@ -5,23 +5,25 @@ 使用 Snapdragon Profiler 调试 wgpu 程序 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

使用 Snapdragon Profiler 调试 wgpu 程序

与 Android App 集成章节我们已经学习了 wgpu 与 Android App 的集成,现在来看看集成后的调试。

Snapdragon Profiler 工具介绍

Snapdragon Profiler 是高通公司开发的一款可运行在 Windows、Mac 和 Linux 平台上的性能分析和帧调试工具。 它通过 USB 与安卓设备连接,允许开发人员分析 CPU、GPU、内存等数据,以便我们发现并修复性能瓶颈。

Snapdragon Profiler 工具的功能特点:

  • 实时监测 GPU 性能;
  • 查看 CPU 调度和 GPU 阶段数据,了解应用程序将时间花在哪里;
  • GPU 捕获;
  • 单步调试绘制;
  • 查看和编辑着色器并在设备上预览结果;
  • 查看和调试像素历史记录;
  • 捕获和查看每次绘制调用的 GPU 指标;

上面的官网链接提供了对应平台安装包的免费下载。如果是 Mac 和 Linux 平台, 在安装 Snapdragon Profiler 之前需要先安装 momo 框架(mono 是 Windows .Net 框架的跨平台开源实现)。 在运行 Snapdragon Profiler 之前需要确保系统上安装了 Android Studio 或者 AndroidSDK,并且已将 ADB 路径添加到系统环境变量中。

实时模式查看 GPU 统计数据

USB 连接要调试的 Android 手机后打开 Snapdragon Profiler,点击窗口左边栏的 Start a Session, 此时右边出现的小弹窗里会列出当前与电脑连接的所有可调试设备,我们选中列表中的设备,勾选上弹窗左下角的 Auto connect 再点击右下角的 Connect,这样,下回再次调试同一台设备时就能自动连接到 Snapdragon Profiler 了:

连接后,有四种调试模式供我们选择:实时、追踪、帧捕获及 CPU 采样,现在选择实时(左图),在实时窗口的左边栏展示了实时指标列表,我们可以选择相应的指标项来收集 CPU、GPU、内存、网络、电源和散热的实时指标(右图):

实时模式
实时预览

上面的右图中,我选择了 GPU GeneralGPU Stalls 两个指标类别,窗口右边展示了每个细分指标的实时数据图表,要添加新的指标图表,只需双击类别(以添加类别中的所有指标)或单个指标,或者将类别或指标拖放到右侧的“图表”窗格中。

追踪模式检查片上内存装载

片上内存(on-chip memory)装载是影响移动应用中 GPU 性能的常见问题之一。在本节中,我们来学习如何使用 Snapdragon Profiler 查找和定位引起片上内存装载的应用程序代码。

Snapdragon Profiler 里将片上内存称之为图形内存(GMEM,全称 Graphic Memory),但是这里的图形内存跟显存容易混淆,它俩并不是一回事。故,下边统一使用片上内存来指代 GMEM。

什么是片上内存装载?

移动 GPU 的 Tiling 架构管线包括一个渲染通道。在渲染过程中,每个 Tile 都是先被渲染到片上内存中。按照驱动程序的默认行为,先前的帧缓冲区数据被从设备内存加载到每个 Tile 的片上内存中,即发生片上内存装载。

所谓 Tiling,本质上就是管理 GPU 内存的技术。Tiling 利用片上内存(on-chip memory)去降低设备内存的访问次数,从而降低 GPU 内存带宽的消耗及访问延迟。 正确理解并利用 Tiling 架构的内存管理特性,可以有效的提高 GPU 程序的性能。

为什么要尽可能地减少或避免片上内存装载?

因为每一次片上内存的加载都会减慢 GPU 的处理速度。
如果在 begin_render_pass 时通过设置 Clear() 来清理片上内存,驱动程序就可以避免在片上内存中装载帧缓冲区数据。虽然这涉及到一个额外的图形指令调用及其相关的开销,但它比为每个正在渲染的 Tile 将帧缓冲区数据加载回片上内存的开销要低得多。

导致片上内存装载的最主要原因是: 对驱动程序的不恰当提示。 应用程序代码使驱动程序认为需要帧缓冲区的先前内容。

检测片上内存装载

在 Snapdragon Profiler 的追踪模式下,我们可以让渲染阶段(Rendering Stages) 指标突出显示其自身通道中的片上内存装载(GMEM Loads)。

GPU 应用必须在项目的 AndroidManifest.xml 文件中包含 INTERNET 权限以启用图形 API 及 GPU 指标的追踪:

toml
<uses-permission android:name="android.permission.INTERNET" />

另外,Snapdragon Profiler 的追踪模式不允许追踪捕获超过 10 秒。也就是说,从点击 Start Capture 开始到点击 Stop Capture 结束,时长不得超过 10 秒。

启用追踪模式的操作步骤:

  • 连接好 Android 设备后,从 Start Page 界面单击左边栏的 System Trace Analysis,此时,就创建了一个新的 Trace 选项卡。
  • 选择刚创建的 Trace 选项卡,进入一个类似于实时模式的视图,然后在 Data Sources 边栏上端的应用列表中选中要追踪的应用(如果列表中找不到,就通过列表右上角的 Launch 按钮去启动要追踪的应用)。
  • Data Sources 边栏下端,选中 Process -> Vulkan -> Rendering Stages 项。

点击 Start Capture 开始追踪,在 10 秒内的任意段点击 Stop Capture,在等待 N 秒(取决于电脑性能)后就会展示出如下图表:

上图渲染阶段的设置对话框显示,这些片上内存装载消耗了总渲染时间的 23% 左右。

我们来看看源码帧渲染中的这条 begin_render_pass() 命令,颜色附件的片上操作使用了 Load:

rust
ops: wgpu::Operations {
-    // load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
-    load: wgpu::LoadOp::Load,
-    store: wgpu::StoreOp::Store
-},

但此处实际上没有装载之前的帧缓冲区数据的必要,我们改为使用 Clear() 改善性能之后,就回收了之前片上内存装载消耗的性能,下图可以看到 GMEM Load 统计项消失了(没有发生片上内存装载时就不会显示):

帧捕获模式

帧捕获模式允许捕获 GPU 应用程序的单一帧, 可以详细显示一个场景在 GPU 上的渲染情况。

启用帧捕获模式的操作与追踪模式几乎一样,唯一不同之处就是帧捕获模式在点击 Take Snapshot 捕获一帧数据后会自动结束捕获:

左侧红框区域是当前帧的着色器代码,它们是由 WGSL 自动转换而来的 SPIR-V 代码(当然,此处的着色器代码还取决于 GPU 应用所使用的图形后端,我使用的是 Vulkan 后端,如果使用 OpenGL 后端,此处就会显示 GLSL 代码)。红框下方的区域可以显示着色器的错误信息。说到这里就不得不提 WebGPU 的 WGSL 着色器语言的优势了:WGSL 在编译阶段时就得到了很好的验证,运行时的验证更是能方便地指出着色器与管线不一致的地方。所以,我们不需要依赖 Snapdragon Profiler 的着色器调试功能。

中间绿框区域是命令队列(Queue)提交给当前帧的所有 Vulkan 命令。选中某一条命令,右侧资源面板将展示出此命令涉及的所有资源:图形|计算管线,纹理,着色器等等。

右侧蓝框区域是资源面板。选中某一项资源,下方的面板将能展示出资源详情。
比如,选择纹理资源后,下方的 Image Preview 选项卡会展示可缩放的大图预览,鼠标在纹理图片上滑动可显示对应像素的 RGB 色值,Inspector 选项卡会展示纹理的格式及层次细节参数等(左图); 选择布局描述符资源后,Inspector 选项卡会展示出绑定组布局描述符(BindGroupLayoutDescriptor)详情(右图):

- +
Skip to content

使用 Snapdragon Profiler 调试 wgpu 程序

与 Android App 集成章节我们已经学习了 wgpu 与 Android App 的集成,现在来看看集成后的调试。

Snapdragon Profiler 工具介绍

Snapdragon Profiler 是高通公司开发的一款可运行在 Windows、Mac 和 Linux 平台上的性能分析和帧调试工具。 它通过 USB 与安卓设备连接,允许开发人员分析 CPU、GPU、内存等数据,以便我们发现并修复性能瓶颈。

Snapdragon Profiler 工具的功能特点:

  • 实时监测 GPU 性能;
  • 查看 CPU 调度和 GPU 阶段数据,了解应用程序将时间花在哪里;
  • GPU 捕获;
  • 单步调试绘制;
  • 查看和编辑着色器并在设备上预览结果;
  • 查看和调试像素历史记录;
  • 捕获和查看每次绘制调用的 GPU 指标;

上面的官网链接提供了对应平台安装包的免费下载。如果是 Mac 和 Linux 平台, 在安装 Snapdragon Profiler 之前需要先安装 momo 框架(mono 是 Windows .Net 框架的跨平台开源实现)。 在运行 Snapdragon Profiler 之前需要确保系统上安装了 Android Studio 或者 AndroidSDK,并且已将 ADB 路径添加到系统环境变量中。

实时模式查看 GPU 统计数据

USB 连接要调试的 Android 手机后打开 Snapdragon Profiler,点击窗口左边栏的 Start a Session, 此时右边出现的小弹窗里会列出当前与电脑连接的所有可调试设备,我们选中列表中的设备,勾选上弹窗左下角的 Auto connect 再点击右下角的 Connect,这样,下回再次调试同一台设备时就能自动连接到 Snapdragon Profiler 了:

连接后,有四种调试模式供我们选择:实时、追踪、帧捕获及 CPU 采样,现在选择实时(左图),在实时窗口的左边栏展示了实时指标列表,我们可以选择相应的指标项来收集 CPU、GPU、内存、网络、电源和散热的实时指标(右图):

实时模式
实时预览

上面的右图中,我选择了 GPU GeneralGPU Stalls 两个指标类别,窗口右边展示了每个细分指标的实时数据图表,要添加新的指标图表,只需双击类别(以添加类别中的所有指标)或单个指标,或者将类别或指标拖放到右侧的“图表”窗格中。

追踪模式检查片上内存装载

片上内存(on-chip memory)装载是影响移动应用中 GPU 性能的常见问题之一。在本节中,我们来学习如何使用 Snapdragon Profiler 查找和定位引起片上内存装载的应用程序代码。

Snapdragon Profiler 里将片上内存称之为图形内存(GMEM,全称 Graphic Memory),但是这里的图形内存跟显存容易混淆,它俩并不是一回事。故,下边统一使用片上内存来指代 GMEM。

什么是片上内存装载?

移动 GPU 的 Tiling 架构管线包括一个渲染通道。在渲染过程中,每个 Tile 都是先被渲染到片上内存中。按照驱动程序的默认行为,先前的帧缓冲区数据被从设备内存加载到每个 Tile 的片上内存中,即发生片上内存装载。

所谓 Tiling,本质上就是管理 GPU 内存的技术。Tiling 利用片上内存(on-chip memory)去降低设备内存的访问次数,从而降低 GPU 内存带宽的消耗及访问延迟。 正确理解并利用 Tiling 架构的内存管理特性,可以有效的提高 GPU 程序的性能。

为什么要尽可能地减少或避免片上内存装载?

因为每一次片上内存的加载都会减慢 GPU 的处理速度。
如果在 begin_render_pass 时通过设置 Clear() 来清理片上内存,驱动程序就可以避免在片上内存中装载帧缓冲区数据。虽然这涉及到一个额外的图形指令调用及其相关的开销,但它比为每个正在渲染的 Tile 将帧缓冲区数据加载回片上内存的开销要低得多。

导致片上内存装载的最主要原因是: 对驱动程序的不恰当提示。 应用程序代码使驱动程序认为需要帧缓冲区的先前内容。

检测片上内存装载

在 Snapdragon Profiler 的追踪模式下,我们可以让渲染阶段(Rendering Stages) 指标突出显示其自身通道中的片上内存装载(GMEM Loads)。

GPU 应用必须在项目的 AndroidManifest.xml 文件中包含 INTERNET 权限以启用图形 API 及 GPU 指标的追踪:

toml
<uses-permission android:name="android.permission.INTERNET" />

另外,Snapdragon Profiler 的追踪模式不允许追踪捕获超过 10 秒。也就是说,从点击 Start Capture 开始到点击 Stop Capture 结束,时长不得超过 10 秒。

启用追踪模式的操作步骤:

  • 连接好 Android 设备后,从 Start Page 界面单击左边栏的 System Trace Analysis,此时,就创建了一个新的 Trace 选项卡。
  • 选择刚创建的 Trace 选项卡,进入一个类似于实时模式的视图,然后在 Data Sources 边栏上端的应用列表中选中要追踪的应用(如果列表中找不到,就通过列表右上角的 Launch 按钮去启动要追踪的应用)。
  • Data Sources 边栏下端,选中 Process -> Vulkan -> Rendering Stages 项。

点击 Start Capture 开始追踪,在 10 秒内的任意段点击 Stop Capture,在等待 N 秒(取决于电脑性能)后就会展示出如下图表:

上图渲染阶段的设置对话框显示,这些片上内存装载消耗了总渲染时间的 23% 左右。

我们来看看源码帧渲染中的这条 begin_render_pass() 命令,颜色附件的片上操作使用了 Load:

rust
ops: wgpu::Operations {
+    // load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
+    load: wgpu::LoadOp::Load,
+    store: wgpu::StoreOp::Store
+},

但此处实际上没有装载之前的帧缓冲区数据的必要,我们改为使用 Clear() 改善性能之后,就回收了之前片上内存装载消耗的性能,下图可以看到 GMEM Load 统计项消失了(没有发生片上内存装载时就不会显示):

帧捕获模式

帧捕获模式允许捕获 GPU 应用程序的单一帧, 可以详细显示一个场景在 GPU 上的渲染情况。

启用帧捕获模式的操作与追踪模式几乎一样,唯一不同之处就是帧捕获模式在点击 Take Snapshot 捕获一帧数据后会自动结束捕获:

左侧红框区域是当前帧的着色器代码,它们是由 WGSL 自动转换而来的 SPIR-V 代码(当然,此处的着色器代码还取决于 GPU 应用所使用的图形后端,我使用的是 Vulkan 后端,如果使用 OpenGL 后端,此处就会显示 GLSL 代码)。红框下方的区域可以显示着色器的错误信息。说到这里就不得不提 WebGPU 的 WGSL 着色器语言的优势了:WGSL 在编译阶段时就得到了很好的验证,运行时的验证更是能方便地指出着色器与管线不一致的地方。所以,我们不需要依赖 Snapdragon Profiler 的着色器调试功能。

中间绿框区域是命令队列(Queue)提交给当前帧的所有 Vulkan 命令。选中某一条命令,右侧资源面板将展示出此命令涉及的所有资源:图形|计算管线,纹理,着色器等等。

右侧蓝框区域是资源面板。选中某一项资源,下方的面板将能展示出资源详情。
比如,选择纹理资源后,下方的 Image Preview 选项卡会展示可缩放的大图预览,鼠标在纹理图片上滑动可显示对应像素的 RGB 色值,Inspector 选项卡会展示纹理的格式及层次细节参数等(左图); 选择布局描述符资源后,Inspector 选项卡会展示出绑定组布局描述符(BindGroupLayoutDescriptor)详情(右图):

+ \ No newline at end of file diff --git a/integration-and-debugging/xcode/index.html b/integration-and-debugging/xcode/index.html index be2a8b85a..e667c491b 100644 --- a/integration-and-debugging/xcode/index.html +++ b/integration-and-debugging/xcode/index.html @@ -5,27 +5,29 @@ 使用 Xcode 调试 wgpu 程序 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

使用 Xcode 调试 wgpu 程序

Xcode 与 Metal

Xcode 是苹果官方提供的运行在 macOS 系统上的 IDE,它提供了一整套工具来方便为苹果设备(比如,iPad、iPhone、Mac 等等)创建、测试、优化 App 并最终发布到 App Store。Xcode 是免费的,如果你使用的是 macOS 系统,就可以从 App Store 上下载安装。

Metal 是 wgpu 在苹果平台上使用的图形后端,Metal 框架(Framework)通过提供低开销的底层 GPU 编程 API、图形和计算管线之间的紧密集成以及一套强大的 GPU 分析和调试工具,为苹果平台上的图形硬件加速提供动力。

2014 年,苹果在全球开发者大会 (WWDC) 上宣布为 iOS 推出全新的 Metal 框架, 一年后的 WWDC 2015,苹果宣布了 macOS 上也将支持 Metal,随后是 watchOS 和 tvOS。 随着 Metal API 的不断发展,在 WWDC 2017 上推出了新版本,Metal 2。它增加了对虚拟现实 (VR)、增强现实 (AR) 和机器学习 (ML) 的支持,以及其它许多新功能.

今年(WWDC 2022)推出的 Metal 3,引入了更为强大的功能,能帮助游戏和专业应用程序充分挖掘苹果芯片的潜力:借助高性能放大和抗锯齿(anti-aliasing)功能,能在更短的时间内渲染高分辨率的图形; 使用异步 I/O 将资源数据直接从存储优化流式传输到 Metal 纹理和缓冲区,能更快地加载资源; 新的光线追踪(Metal Ray Tracing)构建加速结构所花费的 GPU 时间更少,可以通过将剔除(Culling)工作转移到 GPU 以减少 CPU 开销,并且通过直接访问原始数据来优化光线求交和着色; 还有机器学习加速及新的网格着色器(Mesh Shader)等等。

创建调试项目

首先,我们打开 Xcode,使用菜单或启动对话框中的 Create a new Xcode project 来创建一个新项目(左图),然后单击 Other 选项卡, 选中外部构建系统(External Build System)作为项目模板(右图):

创建一个新的 Xcode 项目
选择项目模板

然后,在构建工具(Build Tool)字段中填入要使用的构建工具,Xcode 在运行项目时,将会调用此处设置的构建工具。 如果希望 Xcode 运行 cargo 构建命令,就可以填写 cargo 在你的 macOS 上的安装路径(默认的路径是 ${HOME}/.cargo/bin/cargo),也可以留空来表示跳过构建步骤,以避免 Xcode 为我们构建项目。

其余的字段实际上对我们来说并不重要,因此可以随意填,但产品名称(Product Name)字段还是尽量填一个有意义的名称吧:

构建工具设置

编辑 Scheme

接着编辑项目的方案(Scheme)来启用 Metal 的 GPU 帧捕获(Frame Capture)及 接口验证(API Validation)工具集。 通常,如果 Xcode 项目里的代码调用了 Metal 框架或任何其他使用了 Metal API 的框架,Xcode 会自动启用 GPU 帧捕获 和 Metal 接口验证,但我们的这个项目使用的是外部构建系统(External Build System),只能手动设置。

手动设置其实非常简单:

1,单击项目名称,然后单击 Edit Scheme(左图)(或者,从顶部菜单栏上选择 Product -> Scheme -> Edit Scheme);

2,在弹出的方案功能面板中选中左边栏的 Run,将右边的功能配置表切换至 Options 选项卡,设置 GPU Frame Capture 栏为 Metal 来启用 GPU 帧捕获(右图);

GPU Frame Capture 栏三个选项的详细说明:
  • Automatically:自动捕获项目中的 Metal 或 OpenGL ES API 使用情况。如果项目未链接到 Metal 或 OpenGL ES 框架,则 Capture GPU Frame 按钮不会显示在调试栏中。如果项目同时使用 Metal 和 OpenGL ES API,则可以单击并按住 Capture GPU Frame 按钮来选择要捕获的 API;
  • Metal:仅捕获项目中的 Metal API 使用情况;
  • Disabled:禁用 GPU 帧捕获功能;
Edit Scheme
GPU frame capture

3,在 Info 选项卡下的 executable 栏(左图),我们来指定要运行的可执行文件:单击可选项里的 Other,然后在目标目录中找到由 cargo 创建的二进制文件(右图)。

Info 选项卡
如何找到 cargo 创建的二进制可执行文件?

我们以管线教程的示例代码为例,先在项目根目录(learn-wgpu-zh/)运行管线示例:

cargo run --bin tutorial3-pipeline

然后在 learn-wgpu-zh/target/debug/ 路径下你就能找到一个名为 tutorial3-pipeline 的可执行文件。

接下来,点击 Start 按钮,Xcode 就能运行我们指定的二进制文件了:

你应该能看三角形绘制程序正在运行,并且 Xcode 控制台的一些输出告诉我们已启用了 Metal 接口验证:

查看实时 GPU 统计数据

仅需点击 Start 按钮运行我们要调试的程序,然后将 Xcode 左边栏切换至调试导航栏(Debug Navigator,通常 Xcode 会在调试项目启动时自动跳转到调试导航栏), 就能查看到实时的内存、CPU 占用及帧率(FPS)等,选中每一栏还可查看详情,以帧率栏为例,详情内还提供了 CPU 与 GPU 的每帧耗时,GPU 顶点片元运算单元的利用率等信息,方便我们诊断出程序的性能瓶颈之所在:

GPU 帧捕获

要启动 Metal 的调试器(Debugger),在点击 Start 按钮运行程序之后,需再点击 Xcode 调试区(Debug Area)工具栏的 Capture GPU Frame 按钮(上面有个 Metal 图标的按钮)。 捕获一帧之后,我们就能够使用所有常规的 Metal 调试工具(例如 GPU 统计、着色器及缓冲区调试等)了:

调试帧数据

我们以 Uniform 缓冲区 教程的示例为例来调试 Uniform 缓冲区中的数据: 捕获一帧之后,在调试导航栏选择 Render Pass 下的 All Resources 项,右边的列表里将会列出当前程序使用到的各种资源(纹理,缓冲区等)(左图),双击 Camera Buffer 就能格式化展示此缓冲区的数据了,同时数据展示区的下边会多出来一栏工具,方便我们切换数据的格式化类型及展示列数等(右图)。

Debug 导航栏
Camera 缓冲区的数据
不知你有没有注意到

左侧的调试导航栏中的 Render Pass 及右侧资源列表里展示的名称(如,Camera Buffer)都是我们在代码里设置的 labal 参数:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-    label: Some("Render Pass"),
-    // ...
-};
-// ...
-let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-    label: Some("Camera Buffer"),
-    // ...
-});
- +
Skip to content

使用 Xcode 调试 wgpu 程序

Xcode 与 Metal

Xcode 是苹果官方提供的运行在 macOS 系统上的 IDE,它提供了一整套工具来方便为苹果设备(比如,iPad、iPhone、Mac 等等)创建、测试、优化 App 并最终发布到 App Store。Xcode 是免费的,如果你使用的是 macOS 系统,就可以从 App Store 上下载安装。

Metal 是 wgpu 在苹果平台上使用的图形后端,Metal 框架(Framework)通过提供低开销的底层 GPU 编程 API、图形和计算管线之间的紧密集成以及一套强大的 GPU 分析和调试工具,为苹果平台上的图形硬件加速提供动力。

2014 年,苹果在全球开发者大会 (WWDC) 上宣布为 iOS 推出全新的 Metal 框架, 一年后的 WWDC 2015,苹果宣布了 macOS 上也将支持 Metal,随后是 watchOS 和 tvOS。 随着 Metal API 的不断发展,在 WWDC 2017 上推出了新版本,Metal 2。它增加了对虚拟现实 (VR)、增强现实 (AR) 和机器学习 (ML) 的支持,以及其它许多新功能.

今年(WWDC 2022)推出的 Metal 3,引入了更为强大的功能,能帮助游戏和专业应用程序充分挖掘苹果芯片的潜力:借助高性能放大和抗锯齿(anti-aliasing)功能,能在更短的时间内渲染高分辨率的图形; 使用异步 I/O 将资源数据直接从存储优化流式传输到 Metal 纹理和缓冲区,能更快地加载资源; 新的光线追踪(Metal Ray Tracing)构建加速结构所花费的 GPU 时间更少,可以通过将剔除(Culling)工作转移到 GPU 以减少 CPU 开销,并且通过直接访问原始数据来优化光线求交和着色; 还有机器学习加速及新的网格着色器(Mesh Shader)等等。

创建调试项目

首先,我们打开 Xcode,使用菜单或启动对话框中的 Create a new Xcode project 来创建一个新项目(左图),然后单击 Other 选项卡, 选中外部构建系统(External Build System)作为项目模板(右图):

创建一个新的 Xcode 项目
选择项目模板

然后,在构建工具(Build Tool)字段中填入要使用的构建工具,Xcode 在运行项目时,将会调用此处设置的构建工具。 如果希望 Xcode 运行 cargo 构建命令,就可以填写 cargo 在你的 macOS 上的安装路径(默认的路径是 ${HOME}/.cargo/bin/cargo),也可以留空来表示跳过构建步骤,以避免 Xcode 为我们构建项目。

其余的字段实际上对我们来说并不重要,因此可以随意填,但产品名称(Product Name)字段还是尽量填一个有意义的名称吧:

构建工具设置

编辑 Scheme

接着编辑项目的方案(Scheme)来启用 Metal 的 GPU 帧捕获(Frame Capture)及 接口验证(API Validation)工具集。 通常,如果 Xcode 项目里的代码调用了 Metal 框架或任何其他使用了 Metal API 的框架,Xcode 会自动启用 GPU 帧捕获 和 Metal 接口验证,但我们的这个项目使用的是外部构建系统(External Build System),只能手动设置。

手动设置其实非常简单:

1,单击项目名称,然后单击 Edit Scheme(左图)(或者,从顶部菜单栏上选择 Product -> Scheme -> Edit Scheme);

2,在弹出的方案功能面板中选中左边栏的 Run,将右边的功能配置表切换至 Options 选项卡,设置 GPU Frame Capture 栏为 Metal 来启用 GPU 帧捕获(右图);

GPU Frame Capture 栏三个选项的详细说明:
  • Automatically:自动捕获项目中的 Metal 或 OpenGL ES API 使用情况。如果项目未链接到 Metal 或 OpenGL ES 框架,则 Capture GPU Frame 按钮不会显示在调试栏中。如果项目同时使用 Metal 和 OpenGL ES API,则可以单击并按住 Capture GPU Frame 按钮来选择要捕获的 API;
  • Metal:仅捕获项目中的 Metal API 使用情况;
  • Disabled:禁用 GPU 帧捕获功能;
Edit Scheme
GPU frame capture

3,在 Info 选项卡下的 executable 栏(左图),我们来指定要运行的可执行文件:单击可选项里的 Other,然后在目标目录中找到由 cargo 创建的二进制文件(右图)。

Info 选项卡
如何找到 cargo 创建的二进制可执行文件?

我们以管线教程的示例代码为例,先在项目根目录(learn-wgpu-zh/)运行管线示例:

cargo run --bin tutorial3-pipeline

然后在 learn-wgpu-zh/target/debug/ 路径下你就能找到一个名为 tutorial3-pipeline 的可执行文件。

接下来,点击 Start 按钮,Xcode 就能运行我们指定的二进制文件了:

你应该能看三角形绘制程序正在运行,并且 Xcode 控制台的一些输出告诉我们已启用了 Metal 接口验证:

查看实时 GPU 统计数据

仅需点击 Start 按钮运行我们要调试的程序,然后将 Xcode 左边栏切换至调试导航栏(Debug Navigator,通常 Xcode 会在调试项目启动时自动跳转到调试导航栏), 就能查看到实时的内存、CPU 占用及帧率(FPS)等,选中每一栏还可查看详情,以帧率栏为例,详情内还提供了 CPU 与 GPU 的每帧耗时,GPU 顶点片元运算单元的利用率等信息,方便我们诊断出程序的性能瓶颈之所在:

GPU 帧捕获

要启动 Metal 的调试器(Debugger),在点击 Start 按钮运行程序之后,需再点击 Xcode 调试区(Debug Area)工具栏的 Capture GPU Frame 按钮(上面有个 Metal 图标的按钮)。 捕获一帧之后,我们就能够使用所有常规的 Metal 调试工具(例如 GPU 统计、着色器及缓冲区调试等)了:

调试帧数据

我们以 Uniform 缓冲区 教程的示例为例来调试 Uniform 缓冲区中的数据: 捕获一帧之后,在调试导航栏选择 Render Pass 下的 All Resources 项,右边的列表里将会列出当前程序使用到的各种资源(纹理,缓冲区等)(左图),双击 Camera Buffer 就能格式化展示此缓冲区的数据了,同时数据展示区的下边会多出来一栏工具,方便我们切换数据的格式化类型及展示列数等(右图)。

Debug 导航栏
Camera 缓冲区的数据
不知你有没有注意到

左侧的调试导航栏中的 Render Pass 及右侧资源列表里展示的名称(如,Camera Buffer)都是我们在代码里设置的 labal 参数:

rust
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+    label: Some("Render Pass"),
+    // ...
+};
+// ...
+let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+    label: Some("Camera Buffer"),
+    // ...
+});
+ \ No newline at end of file diff --git a/intermediate/compute-pipeline/index.html b/intermediate/compute-pipeline/index.html index 982aa824e..1e99c49dd 100644 --- a/intermediate/compute-pipeline/index.html +++ b/intermediate/compute-pipeline/index.html @@ -5,83 +5,85 @@ 计算管线 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

计算管线

计算管线ComputePipeline)是 WebGPU 中控制计算着色器(Compute Shader)阶段的管线。

计算管线适用于各种 GPU 通用计算场景,这是 WebGL 所不具备的。我们通过绑定的存储缓冲区(Storage Buffer)及存储纹理(Storage Texture)来获得计算输出。

创建一个计算管线

计算管线的创建相比于渲染管线简单得多,甚至我们都不需要显式创建并指定绑定组布局(BindGroupLayout):

rust
let compute_shader = device.create_shader_module(...);
-let compute_pipeline = device
-    .create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
-        layout: None, // 此处使用了隐式布局
-        module: &compute_shader,
-        entry_point: "cs_main",
-        label: None,
-    });

使用隐式绑定组布局有一个小小的局限:绑定的每个资源必须在入口点(Entry Point)中有被访问到。如果有没被访问的绑定资源,就必须显式指定布局。

使用存储缓冲区与存储纹理

存储缓冲区存储纹理都是 WGSL 中的资源类型。

而我们在 wgpu 中创建及绑定它们时,与其它缓冲区及纹理的创建是一样的,只需要在 usage 字段中标记出用途:

rust
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-        // ...
-        // VERTEX | STORAGE 表示此缓冲区可以做为顶点缓冲区以及存储缓冲区来使用
-        usage: BufferUsages::VERTEX | BufferUsages::STORAGE,
-    });
+    
Skip to content

计算管线

计算管线ComputePipeline)是 WebGPU 中控制计算着色器(Compute Shader)阶段的管线。

计算管线适用于各种 GPU 通用计算场景,这是 WebGL 所不具备的。我们通过绑定的存储缓冲区(Storage Buffer)及存储纹理(Storage Texture)来获得计算输出。

创建一个计算管线

计算管线的创建相比于渲染管线简单得多,甚至我们都不需要显式创建并指定绑定组布局(BindGroupLayout):

rust
let compute_shader = device.create_shader_module(...);
+let compute_pipeline = device
+    .create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
+        layout: None, // 此处使用了隐式布局
+        module: &compute_shader,
+        entry_point: "cs_main",
+        label: None,
+    });

使用隐式绑定组布局有一个小小的局限:绑定的每个资源必须在入口点(Entry Point)中有被访问到。如果有没被访问的绑定资源,就必须显式指定布局。

使用存储缓冲区与存储纹理

存储缓冲区存储纹理都是 WGSL 中的资源类型。

而我们在 wgpu 中创建及绑定它们时,与其它缓冲区及纹理的创建是一样的,只需要在 usage 字段中标记出用途:

rust
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+        // ...
+        // VERTEX | STORAGE 表示此缓冲区可以做为顶点缓冲区以及存储缓冲区来使用
+        usage: BufferUsages::VERTEX | BufferUsages::STORAGE,
+    });
 
-let tex = app.device.create_texture(&wgpu::TextureDescriptor {
-        // ...
-        // TEXTURE_BINDING | STORAGE_BINDING 表示此纹理可以做为采样纹理以及存储纹理来使用
-        usage: TextureUsages::TEXTURE_BINDING | TextureUsages::STORAGE_BINDING,
-    });

在 WGSL 中,存储缓冲区存储纹理有一些使用上的区别:

  • 存储缓冲区:默认访问模式是只读,可以通过 read_write 声明为可读可写, 读写操作类似于数组的访问与赋值;
  • 存储纹理:默认访问模式是只写,而且在 Web 端只能用只写模式,在 Native 端我们可以使用 TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES feature 来打开可读可写的访问模式。还有,存储纹理必须明确声明纹素的格式,且不支持带 Srgb 后缀的格式。从这里可以查阅到所有受支持的格式: WGSL 标准:存储纹素格式
rust
struct Particle {
-  pos : vec2f,
-  vel : vec2f,
-};
-// 存储缓冲区
-@group(0) @binding(0) var<storage, read_write> particles: array<Particle>;
-@group(0) @binding(1) var from_tex: texture_2d<f32>;
-// 存储纹理
-@group(0) @binding(2) var to_tex: texture_storage_2d<rgba8unorm, write>;
+let tex = app.device.create_texture(&wgpu::TextureDescriptor {
+        // ...
+        // TEXTURE_BINDING | STORAGE_BINDING 表示此纹理可以做为采样纹理以及存储纹理来使用
+        usage: TextureUsages::TEXTURE_BINDING | TextureUsages::STORAGE_BINDING,
+    });

在 WGSL 中,存储缓冲区存储纹理有一些使用上的区别:

  • 存储缓冲区:默认访问模式是只读,可以通过 read_write 声明为可读可写, 读写操作类似于数组的访问与赋值;
  • 存储纹理:默认访问模式是只写,而且在 Web 端只能用只写模式,在 Native 端我们可以使用 TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES feature 来打开可读可写的访问模式。还有,存储纹理必须明确声明纹素的格式,且不支持带 Srgb 后缀的格式。从这里可以查阅到所有受支持的格式: WGSL 标准:存储纹素格式
rust
struct Particle {
+  pos : vec2f,
+  vel : vec2f,
+};
+// 存储缓冲区
+@group(0) @binding(0) var<storage, read_write> particles: array<Particle>;
+@group(0) @binding(1) var from_tex: texture_2d<f32>;
+// 存储纹理
+@group(0) @binding(2) var to_tex: texture_storage_2d<rgba8unorm, write>;
 
-@compute @workgroup_size(16, 16)
-fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
-    let uv = vec2<i32>(global_id.xy);
-    // 读取存储缓冲区
-    let particle = particles[vu.x * uv.y];
+@compute @workgroup_size(16, 16)
+fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
+    let uv = vec2<i32>(global_id.xy);
+    // 读取存储缓冲区
+    let particle = particles[vu.x * uv.y];
 
-    var texel: vec4f;
-    // ...
-    // 写入纹素数据到存储纹理
-    textureStore(to_tex, uv, texel);
-}

计算通道

与创建渲染通道类似,我们需要使用 encoder 来创建计算通道ComputePass),然后用计算通道来编码所有的计算命令

rust
// let encoder = ...
-{
-    let mut cpass = encoder.begin_compute_pass(&ComputePassDescriptor::default());
-    cpass.set_pipeline(&self.compute_pipeline);
-    cpass.set_bind_group(0, &self.bind_group, &[]);
-    cpass.dispatch_workgroups(self.workgroup_count.0, self.workgroup_count.1, 1);
-}
-queue.submit(iter::once(encoder.finish()));

展示平面章节已讲解过 {} 开辟块空间的用途,这里就不再赘述。

dispatch_workgroups() 就是调度计算任务的命令,接受 3 个 u32 类型的值做为参数。这些参数具体是什么意思呢?那就要说到计算管线里最重要的一个概念了:

工作组

GPU 通过同时运行大量线程来实现并行处理的能力,而工作组(Workgroup)就是用于指定 GPU 如何组织这些线程。
一个工作组实质上就是一组调用,同一工作组中的线程同时分别执行一个计算着色器实例,并共享对工作组地址空间中着色器变量的访问。计算着色器通常被设计成线程相互独立运行,但线程在其工作组上进行协作也很常见。

可以将工作组理解为一个三维网格,网格中的每个点就代表一个线程。在图像处理中,网格通常是一个二维的线程矩阵(二维就是其中一个维度为 1 的特殊三维形式),代表整个图像,每个线程对应于正在处理的图像的一个像素。

我们通过(x, y, z)三个维度来声明计算着色器的工作组大小,每个维度上的默认值都是 1:

rust
@workgroup_size(32, 16) // x = 32, y = 16, z = 1

下图显示了正在被着色器处理的图像如何被划分为工作组,以及每个工作组是如何由单个线程组成:

工作组网格

那么,线程与工作组在计算着色器网格中的位置是如何被标识的呢?

内建输入量

WGSL 计算着色器有 5 个内建输入量(Buit-in Input Values)用于标识当前线程及工作组:

  • global_invocation_id:当前线程在计算着色器网格中的全局三维坐标;
  • local_invocation_id:当前线程在所处的工作组中的局部三维坐标;
  • local_invocation_index:当前线程在所处的工作组中的线性化索引;
  • workgroup_id:当前工作组在工作组网格中的三维坐标;
  • num_workgroups:当前调度(dispatch)的工作组维度量;

最常用的是前三个内建输入量

例如,给定一个由 16 * 16 * 1 个线程组成的网格,将其划分为 2 * 4 * 1 个工作组,8 * 4 * 1 个线程。 那么:

  • 一个线程在网格中的 global_invocation_id 全局三维坐标是 (9, 10)(左图);
  • 此线程在所处工作组中的 local_invocation_id 局部三维坐标是 (1, 2)local_invocation_index 线性化索引是 17(右图);
  • 所处工作组在工作组网格中的 workgroup_id 三维坐标就是 (1, 2) (右图的蓝绿色块):
global_invocation_id
local_invocation_id

这些内建输入量在着色器中具体如何使用呢?下边的着色器中演示了如何直接通过 global_invocation_id 来获取纹素的坐标:

rust
struct UniformParams {
-  img_size: vec2<i32>,
-  uv_offset: vec2<i32>,
-};
-@group(0) @binding(0) var<uniform> params: UniformParams;
-@group(0) @binding(1) var from_tex: texture_2d<f32>;
+    var texel: vec4f;
+    // ...
+    // 写入纹素数据到存储纹理
+    textureStore(to_tex, uv, texel);
+}

计算通道

与创建渲染通道类似,我们需要使用 encoder 来创建计算通道ComputePass),然后用计算通道来编码所有的计算命令

rust
// let encoder = ...
+{
+    let mut cpass = encoder.begin_compute_pass(&ComputePassDescriptor::default());
+    cpass.set_pipeline(&self.compute_pipeline);
+    cpass.set_bind_group(0, &self.bind_group, &[]);
+    cpass.dispatch_workgroups(self.workgroup_count.0, self.workgroup_count.1, 1);
+}
+queue.submit(iter::once(encoder.finish()));

展示平面章节已讲解过 {} 开辟块空间的用途,这里就不再赘述。

dispatch_workgroups() 就是调度计算任务的命令,接受 3 个 u32 类型的值做为参数。这些参数具体是什么意思呢?那就要说到计算管线里最重要的一个概念了:

工作组

GPU 通过同时运行大量线程来实现并行处理的能力,而工作组(Workgroup)就是用于指定 GPU 如何组织这些线程。
一个工作组实质上就是一组调用,同一工作组中的线程同时分别执行一个计算着色器实例,并共享对工作组地址空间中着色器变量的访问。计算着色器通常被设计成线程相互独立运行,但线程在其工作组上进行协作也很常见。

可以将工作组理解为一个三维网格,网格中的每个点就代表一个线程。在图像处理中,网格通常是一个二维的线程矩阵(二维就是其中一个维度为 1 的特殊三维形式),代表整个图像,每个线程对应于正在处理的图像的一个像素。

我们通过(x, y, z)三个维度来声明计算着色器的工作组大小,每个维度上的默认值都是 1:

rust
@workgroup_size(32, 16) // x = 32, y = 16, z = 1

下图显示了正在被着色器处理的图像如何被划分为工作组,以及每个工作组是如何由单个线程组成:

工作组网格

那么,线程与工作组在计算着色器网格中的位置是如何被标识的呢?

内建输入量

WGSL 计算着色器有 5 个内建输入量(Buit-in Input Values)用于标识当前线程及工作组:

  • global_invocation_id:当前线程在计算着色器网格中的全局三维坐标;
  • local_invocation_id:当前线程在所处的工作组中的局部三维坐标;
  • local_invocation_index:当前线程在所处的工作组中的线性化索引;
  • workgroup_id:当前工作组在工作组网格中的三维坐标;
  • num_workgroups:当前调度(dispatch)的工作组维度量;

最常用的是前三个内建输入量

例如,给定一个由 16 * 16 * 1 个线程组成的网格,将其划分为 2 * 4 * 1 个工作组,8 * 4 * 1 个线程。 那么:

  • 一个线程在网格中的 global_invocation_id 全局三维坐标是 (9, 10)(左图);
  • 此线程在所处工作组中的 local_invocation_id 局部三维坐标是 (1, 2)local_invocation_index 线性化索引是 17(右图);
  • 所处工作组在工作组网格中的 workgroup_id 三维坐标就是 (1, 2) (右图的蓝绿色块):
global_invocation_id
local_invocation_id

这些内建输入量在着色器中具体如何使用呢?下边的着色器中演示了如何直接通过 global_invocation_id 来获取纹素的坐标:

rust
struct UniformParams {
+  img_size: vec2<i32>,
+  uv_offset: vec2<i32>,
+};
+@group(0) @binding(0) var<uniform> params: UniformParams;
+@group(0) @binding(1) var from_tex: texture_2d<f32>;
 
-@compute @workgroup_size(16, 16)
-fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
-  // 纹素(图片的像素)坐标
-  let uv = vec2<i32>(global_id.xy);
-  // 判断当前坐标是否超出了纹素坐标范围
-  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
-    return;
-  }
+@compute @workgroup_size(16, 16)
+fn cs_main(@builtin(global_invocation_id) global_id: vec3<u32>) {
+  // 纹素(图片的像素)坐标
+  let uv = vec2<i32>(global_id.xy);
+  // 判断当前坐标是否超出了纹素坐标范围
+  if (uv.x >= params.img_size.x || uv.y >= params.img_size.y) {
+    return;
+  }
 
-  // 从纹理图像中读取纹素
-  var texel = textureLoad(from_tex, uv, 0)
-  // ...
-}

使用计算着色器需要注意避免坐标越界问题,因为通常纹理图像的分辨率与我们的工作组大小不是整除关系。

确定工作组大小与数量

工作组的最佳大小(指 x, y, z 三个维度的大小)并没有固定的值,需要结合实际使用场景来确定,而且能支持的最大值还与目标硬件有关。

我们从 adapter.limits() 里,能获取到当前设备支持的最大工作组大小 (maxComputeWorkgroupSizeX,maxComputeWorkgroupSizeY,maxComputeWorkgroupSizeZ),它们的默认值分别为 (256, 256, 64)

这三个维度的最大值容易让人误解,以为可以在计算着色器中设置 @workgroup_size(256, 256, 64)

事实上 WebGPU spec 的验证规则是:x * y * z <= max(x, max(y, z)), 也就是说,设置的 @workgroup_size 三个维度的乘积不能大于 maxComputeWorkgroupSizeX,Y,Z 三个维度中的最大值。

通常,当只需要在计算着色器中操作存储缓冲区时,使用一维工作组 @workgroup_size(x) 是合适的, y、z 维度保持默认值 1; 当需要操作纹理,使用二维或三维工作组 @workgroup_size(x,y) 会更便利。

现在我们可以来回答开头的问题了:

rust
cpass.dispatch_workgroups(workgroup_count.0, workgroup_count.1, workgroup_count.2);

上面计算通道的调度命令接收的参数具体是什么意思呢?

它们就是工作组网格的 3 个维度量。
1000 * 768 个线程组成的网格为例,假设工作组大小为@workgroup_size(32,16),那么:

rust
// 计算维度值并向上取整
-workgroup_count = ((1000 + (32 -1)) / 32, (768 + (16 -1)) / 16, 1);

CPU 端读取计算管线输出

案例展示/离屏渲染章节已讲解过如何从缓冲区中读取数据,存储纹理的读取也是一样的,这里不再赘述。

实战:实现高斯模糊

要掌握 WebGPU 计算管线,核心内容就是上边讲解的 “使用存储缓冲区与存储纹理” 及 “工作组”,某个具体实现的着色器逻辑代码并不重要,因为它们与顶点及片元着色器代码没有太大的区别。

点击下方的查看源码就能看到所有实现代码。如对图片模糊算法的细节感兴趣,可以查看这里

运行示例代码

此示例可在桌面端及 Chrome/Edge 113+、Chrome/Edge Canary、Firefox Nightly 浏览器中运行(如何开启浏览器 webgpu 试验功能)

- + // 从纹理图像中读取纹素 + var texel = textureLoad(from_tex, uv, 0) + // ... +}

使用计算着色器需要注意避免坐标越界问题,因为通常纹理图像的分辨率与我们的工作组大小不是整除关系。

确定工作组大小与数量

工作组的最佳大小(指 x, y, z 三个维度的大小)并没有固定的值,需要结合实际使用场景来确定,而且能支持的最大值还与目标硬件有关。

我们从 adapter.limits() 里,能获取到当前设备支持的最大工作组大小 (maxComputeWorkgroupSizeX,maxComputeWorkgroupSizeY,maxComputeWorkgroupSizeZ),它们的默认值分别为 (256, 256, 64)

这三个维度的最大值容易让人误解,以为可以在计算着色器中设置 @workgroup_size(256, 256, 64)

事实上 WebGPU spec 的验证规则是:x * y * z <= max(x, max(y, z)), 也就是说,设置的 @workgroup_size 三个维度的乘积不能大于 maxComputeWorkgroupSizeX,Y,Z 三个维度中的最大值。

通常,当只需要在计算着色器中操作存储缓冲区时,使用一维工作组 @workgroup_size(x) 是合适的, y、z 维度保持默认值 1; 当需要操作纹理,使用二维或三维工作组 @workgroup_size(x,y) 会更便利。

现在我们可以来回答开头的问题了:

rust
cpass.dispatch_workgroups(workgroup_count.0, workgroup_count.1, workgroup_count.2);

上面计算通道的调度命令接收的参数具体是什么意思呢?

它们就是工作组网格的 3 个维度量。
1000 * 768 个线程组成的网格为例,假设工作组大小为@workgroup_size(32,16),那么:

rust
// 计算维度值并向上取整
+workgroup_count = ((1000 + (32 -1)) / 32, (768 + (16 -1)) / 16, 1);

CPU 端读取计算管线输出

案例展示/离屏渲染章节已讲解过如何从缓冲区中读取数据,存储纹理的读取也是一样的,这里不再赘述。

实战:实现高斯模糊

要掌握 WebGPU 计算管线,核心内容就是上边讲解的 “使用存储缓冲区与存储纹理” 及 “工作组”,某个具体实现的着色器逻辑代码并不重要,因为它们与顶点及片元着色器代码没有太大的区别。

点击下方的查看源码就能看到所有实现代码。如对图片模糊算法的细节感兴趣,可以查看这里

运行示例代码

此示例可在桌面端及 Chrome/Edge 113+、Chrome/Edge Canary、Firefox Nightly 浏览器中运行(如何开启浏览器 webgpu 试验功能)

+ \ No newline at end of file diff --git a/intermediate/pbr-notes.html b/intermediate/pbr-notes.html index 0ce7e4853..7aaa3db42 100644 --- a/intermediate/pbr-notes.html +++ b/intermediate/pbr-notes.html @@ -5,19 +5,21 @@ Sources | 学习 wgpu - + + - - - - - + + + + + - + + - - + + \ No newline at end of file diff --git a/intermediate/tutorial10-lighting/index.html b/intermediate/tutorial10-lighting/index.html index 78bc805c8..c50905a88 100644 --- a/intermediate/tutorial10-lighting/index.html +++ b/intermediate/tutorial10-lighting/index.html @@ -5,593 +5,595 @@ 光照 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

光照

虽然我们的场景是 3D 的,但它们看起来像是平的,对象表面缺乏现实光照环境中的明暗变化,所以无法体现模型的三维特性。这是因为我们的模型没有考虑光线和对象表面之间的相互作用,无论如何摆放都会保持着相同的着色。

如果想修正这一点,就需要在我们的场景中添加光照(Lighting)。

在现实世界中,光源发出的光子会四处反射,最后进入我们的眼睛。 当观察对象上的一点时,我们所看到的颜色取决于多个光源和多个反射表面之间的多次相互作用。

在计算机图形学领域,为单个光子建模的计算成本极高。一个 100 瓦的灯泡每秒钟发出大约 3.27×10^20 个光子,再试想一下太阳每秒发出的光子的数量级。为了解决这个问题,我们要用数学来 “作弊”(也就是模拟。严格来说,这不是作弊,计算机图形学里有这么一句名言:"If it looks right, it is right.", 意思就是,如果它看起来是对的,那么它就是对的)。

我们来看看计算机图形学里常用的几个光照模型。

光线/路径追踪

光线/路径追踪(Ray/Path tracing)以虛拟摄像机模型为基础,但是对于每条与某个三角形相交的投影线,在计算光源对交点处明暗值的直接贡献之前,还要确定是否有一个或者多个光源能够照射到这个交点。

它是最接近光的真实工作方式的模型,所以我觉得必须提到它。但这是一个高级话题,我们不会在这里深入讨论。

Blinn-Phong 反射模型

对于大多数实时(real-time)应用来说,光线/路径追踪的计算成本十在太高了(尽管这种情况已经开始改变),所以通常使用一种更有效的,精度较低的 Phong 反射模型 来解决光照问题。它考虑了光线与材质的 3 种相互作用:环境光反射、漫反射和镜面反射。我们将学习 Blinn-Phong 反射模型,它能加速镜面反射的计算。

在开始学习之前,需要在我们的场景中添加一个光源:

rust
// lib.rs
-#[repr(C)]
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct LightUniform {
-    position: [f32; 3],
-    // 由于 Uniform 需要字段按 16 字节对齐,我们需要在这里使用一个填充字段
-    _padding: u32,
-    color: [f32; 3],
-    _padding2: u32,
-}

LightUniform 代表空间中的一个彩色点光源。虽然通常是使用纯白色的光,但使用其它颜色的光也是可以的。

使 WGSL 结构体内存字节对齐的经验法则是:字段保持按 2 的 N 次幂来对齐。 例如,一个 vec3 如果是 3 个单精度浮点数,它的大小为 12 字节,对齐后将被提升到 2 的下一个次幂,即 16 字节. 这意味着必须更加小心地布局你的结构体。

一些开发者会选择使用 vec4 而不是 vec3 来避免对齐问题。 你可以在 wgsl spec 中了解更多关于对齐规则的信息。

接下来,创建一个 Uniform 缓冲区来存储我们的光源:

rust
let light_uniform = LightUniform {
-    position: [2.0, 2.0, 2.0],
-    _padding: 0,
-    color: [1.0, 1.0, 1.0],
-    _padding2: 0,
-};
-
- // 我们希望能更新光源位置,所以用了 COPY_DST 这个使用范围标志
-let light_buffer = device.create_buffer_init(
-    &wgpu::util::BufferInitDescriptor {
-        label: Some("Light VB"),
-        contents: bytemuck::cast_slice(&[light_uniform]),
-        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
-    }
-);

别忘记把 light_uniformlight_buffer 添加到 State。之后,我们为光源创建一个绑定组的布局绑定组

rust
let light_bind_group_layout =
-    device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-        entries: &[wgpu::BindGroupLayoutEntry {
-            binding: 0,
-            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
-            ty: wgpu::BindingType::Buffer {
-                ty: wgpu::BufferBindingType::Uniform,
-                has_dynamic_offset: false,
-                min_binding_size: None,
-            },
-            count: None,
-        }],
-        label: None,
-    });
-
-let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-    layout: &light_bind_group_layout,
-    entries: &[wgpu::BindGroupEntry {
-        binding: 0,
-        resource: light_buffer.as_entire_binding(),
-    }],
-    label: None,
-});

把它们添加到 State 中,同时更新 render_pipeline_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-    bind_group_layouts: &[
-        &texture_bind_group_layout,
-        &camera_bind_group_layout,
-        &light_bind_group_layout,
-    ],
-});

update() 函数中更新光源的位置,这样便能看到对象在不同角度下的光照效果:

rust
// 更新光源
-let old_position = glam::Vec3::from_array(self.light_uniform.position);
-self.light_uniform.position =
-    (glam::Quat::from_axis_angle(glam::Vec3::Y, consts::PI / 180.)
-        * old_position).into();
-self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light_uniform]));

上面的代码使光源围绕原点以每 1 度的速度旋转。

查看光源

出于调试的目的,如果能够查看光源本身的位置,以确保场景的光照效果是正确的,那就太好了。

尽管可以直接调整现有的渲染管线来绘制光源,但这可能不利于代码的维护。所以我们把创建渲染管线的代码提取到一个叫做 create_render_pipeline() 的新函数中:

rust
fn create_render_pipeline(
-    device: &wgpu::Device,
-    layout: &wgpu::PipelineLayout,
-    color_format: wgpu::TextureFormat,
-    depth_format: Option<wgpu::TextureFormat>,
-    vertex_layouts: &[wgpu::VertexBufferLayout],
-    shader: wgpu::ShaderModuleDescriptor,
-) -> wgpu::RenderPipeline {
-    let shader = device.create_shader_module(shader);
-
-    device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-        label: Some("Render Pipeline"),
-        layout: Some(layout),
-        vertex: wgpu::VertexState {
-            module: &shader,
-            entry_point: "vs_main",
-            buffers: vertex_layouts,
-        },
-        fragment: Some(wgpu::FragmentState {
-            module: &shader,
-            entry_point: "fs_main",
-            targets: &[Some(wgpu::ColorTargetState {
-                format: color_format,
-                blend: Some(wgpu::BlendState {
-                    alpha: wgpu::BlendComponent::REPLACE,
-                    color: wgpu::BlendComponent::REPLACE,
-                }),
-                write_mask: wgpu::ColorWrites::ALL,
-            })],
-        }),
-        primitive: wgpu::PrimitiveState {
-            topology: wgpu::PrimitiveTopology::TriangleList,
-            strip_index_format: None,
-            front_face: wgpu::FrontFace::Ccw,
-            cull_mode: Some(wgpu::Face::Back),
-            // 此处设置为 Fill 以外的任何值都需要开启 Feature::NON_FILL_POLYGON_MODE
-            polygon_mode: wgpu::PolygonMode::Fill,
-            unclipped_depth: false,
-            conservative: false,
-        },
-        depth_stencil: depth_format.map(|format| wgpu::DepthStencilState {
-            format,
-            depth_write_enabled: true,
-            depth_compare: wgpu::CompareFunction::Less,
-            stencil: wgpu::StencilState::default(),
-            bias: wgpu::DepthBiasState::default(),
-        }),
-        multisample: wgpu::MultisampleState {
-            count: 1,
-            mask: !0,
-            alpha_to_coverage_enabled: false,
-        },
-    })
-}

修改 State::new() 中的代码来调用 create_render_pipeline 函数:

rust
let render_pipeline = {
-    let shader = wgpu::ShaderModuleDescriptor {
-        label: Some("Normal Shader"),
-        source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
-    };
-    create_render_pipeline(
-        &device,
-        &render_pipeline_layout,
-        config.format,
-        Some(texture::Texture::DEPTH_FORMAT),
-        &[model::ModelVertex::desc(), InstanceRaw::desc()],
-        shader,
-    )
-};

修改 model::DrawModel 以使用 light_bind_group

rust
// model.rs
-pub trait DrawModel<'a> {
-    fn draw_mesh(
-        &mut self,
-        mesh: &'a Mesh,
-        material: &'a Material,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        material: &'a Material,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-
-    fn draw_model(
-        &mut self,
-        model: &'a Model,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_model_instanced(
-        &mut self,
-        model: &'a Model,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_mesh(
-        &mut self,
-        mesh: &'b Mesh,
-        material: &'b Material,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group, light_bind_group);
-    }
-
-    fn draw_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        material: &'b Material,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.set_bind_group(0, &material.bind_group, &[]);
-        self.set_bind_group(1, camera_bind_group, &[]);
-        self.set_bind_group(2, light_bind_group, &[]);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-
-    fn draw_model(
-        &mut self,
-        model: &'b Model,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
-    }
-
-    fn draw_model_instanced(
-        &mut self,
-        model: &'b Model,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            let material = &model.materials[mesh.material];
-            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
-        }
-    }
-}

完成这些后,就可以为我们的光源创建另一条渲染管线了:

rust
// lib.rs
-let light_render_pipeline = {
-    let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-        label: Some("Light Pipeline Layout"),
-        bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout],
-        push_constant_ranges: &[],
-    });
-    let shader = wgpu::ShaderModuleDescriptor {
-        label: Some("Light Shader"),
-        source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()),
-    };
-    create_render_pipeline(
-        &device,
-        &layout,
-        config.format,
-        Some(texture::Texture::DEPTH_FORMAT),
-        &[model::ModelVertex::desc()],
-        shader,
-    )
-};

我选择为 light_render_pipeline 创建一个单独的布局,因为它不需要常规渲染管线所需要的资源(主要是纹理)。

之后,我们来编写实际的着色器代码:

rust
// light.wgsl
-// 顶点着色器
-
-struct Camera {
-    view_proj: mat4x4f,
-}
-@group(0) @binding(0)
-var<uniform> camera: Camera;
-
-struct Light {
-    position: vec3f,
-    color: vec3f,
-}
-@group(1) @binding(0)
-var<uniform> light: Light;
-
-struct VertexInput {
-    @location(0) position: vec3f,
-};
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) color: vec3f,
-};
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-) -> VertexOutput {
-    let scale = 0.25;
-    var out: VertexOutput;
-    out.clip_position = camera.view_proj * vec4f(model.position * scale + light.position, 1.0);
-    out.color = light.color;
-    return out;
-}
-
-// 片元着色器
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(in.color, 1.0);
-}

现在就能在 render() 函数中手动实现光源的绘制代码了,但是为了保持之前开发的绘制模式,让我们来创建一个名为 DrawLight 的新 trait:

rust
// model.rs
-pub trait DrawLight<'a> {
-    fn draw_light_mesh(
-        &mut self,
-        mesh: &'a Mesh,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_light_mesh_instanced(
-        &mut self,
-        mesh: &'a Mesh,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-
-    fn draw_light_model(
-        &mut self,
-        model: &'a Model,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-    fn draw_light_model_instanced(
-        &mut self,
-        model: &'a Model,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawLight<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    fn draw_light_mesh(
-        &mut self,
-        mesh: &'b Mesh,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_light_mesh_instanced(mesh, 0..1, camera_bind_group, light_bind_group);
-    }
-
-    fn draw_light_mesh_instanced(
-        &mut self,
-        mesh: &'b Mesh,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
-        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
-        self.set_bind_group(0, camera_bind_group, &[]);
-        self.set_bind_group(1, light_bind_group, &[]);
-        self.draw_indexed(0..mesh.num_elements, 0, instances);
-    }
-
-    fn draw_light_model(
-        &mut self,
-        model: &'b Model,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        self.draw_light_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
-    }
-    fn draw_light_model_instanced(
-        &mut self,
-        model: &'b Model,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            self.draw_light_mesh_instanced(mesh, instances.clone(), camera_bind_group, light_bind_group);
-        }
-    }
-}

最后,在渲染通道中加入光源的渲染:

rust
impl State {
-    // ...
-   fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
-        // ...
-        render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
-
-        use crate::model::DrawLight; // 新增!
-        render_pass.set_pipeline(&self.light_render_pipeline); // 新增!
-        render_pass.draw_light_model(
-            &self.obj_model,
-            &self.camera_bind_group,
-            &self.light_bind_group,
-        ); // 新增!
-
-        render_pass.set_pipeline(&self.render_pipeline);
-        render_pass.draw_model_instanced(
-            &self.obj_model,
-            0..self.instances.len() as u32,
-            &self.camera_bind_group,
-            &self.light_bind_group, // 新增
-        );
-}

完成上面这些后,我们将看到如下渲染效果:

./light-in-scene.png

环境光反射

现实世界中,光线在进入我们的眼睛之前往往在物体表面之间经历了多次反射。这就是为什么你能看见阴影区域的东西。在计算机上实现这种互动模型很昂贵,所以需要“作弊”(模拟)。

环境光反射(Ambient Reflection)定义了对象表面所有点的环境光强度相同,代表从场景的其他部分反射过来的光照亮我们的对象。 环境光反射值 = 光源颜色 _ 环境光强度 _ 片元的颜色。

请在 shader.wgsl 中的纹理 Uniform 之下添加以下代码:

rust
struct Light {
-    position: vec3f,
-    color: vec3f,
-}
-@group(2) @binding(0)
-var<uniform> light: Light;

然后更新片元色器代码来计算和使用环境光的色值:

rust
@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
-
-    // 我们不需要太强的环境光,强度设置为 0.1 就够了
-    let ambient_strength = 0.1;
-    let ambient_color = light.color * ambient_strength;
-
-    let result = ambient_color * object_color.rgb;
-
-    return vec4f(result, object_color.a);
-}

完成上面的修改后,我们将得到如下渲染效果:

./ambient_lighting.png

漫反射

理想的漫反射(Diffuse Reflection)表面将光线向所有方向均匀地散射,因此,这样的表面在所有的观察者看来亮度都一样。不过,反射出去的光线强度依赖于材质以及光源相对于表面的位置。

还记得我们的模型中包含的法向量(Normal Vector)吗?现在终于要使用它们了。 法向量(也叫做法线)代表一个表面的朝向。通过计算片元的法向量和它指向光源的向量之间的夹角,可以得到该片元漫反射强度值。我们使用点积来计算向量之间夹角的余弦值:

./normal_diagram.png

如果法向量和光源方向向量的点积为 1.0,则表示当前片元与光源对齐,将反射光线的全部强度。值为 0 或更低表示表面垂直于或远离光源,因此反射强度小。

我们将法向量加入到 shader.wgsl 中:

rust
struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f,
-    @location(2) normal: vec3f, // 新增!
-};

接着定义该值以及顶点的位置来传递给片元着色器:

rust
struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-    @location(1) world_normal: vec3f,
-    @location(2) world_position: vec3f,
-};

我们先按原样传递法向量的值。这是错误的,稍后会修复它:

rust
@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    let model_matrix = mat4x4f(
-        instance.model_matrix_0,
-        instance.model_matrix_1,
-        instance.model_matrix_2,
-        instance.model_matrix_3,
-    );
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.world_normal = model.normal;
-    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
-    out.world_position = world_position.xyz;
-    out.clip_position = camera.view_proj * world_position;
-    return out;
-}

现在来进行实际的计算,在 ambient_colorresult 代码行之间,添加如下代码:

rust
let light_dir = normalize(light.position - in.world_position);
-
-let diffuse_strength = max(dot(in.world_normal, light_dir), 0.0);
-let diffuse_color = light.color * diffuse_strength;

然后在 result 中包含漫反射光(diffuse_color):

rust
let result = (ambient_color + diffuse_color) * object_color.xyz;

完成后,我们将获得如下渲染效果:

./ambient_diffuse_wrong.png

法线矩阵

还记得我说过将顶点法向量直接传递给片元着色器是错误的吗?我们通过只在场景中保留一个在 y 轴上旋转了 180 度的立方体来探索这一点:

rust
const NUM_INSTANCES_PER_ROW: u32 = 1;
-
-// In the loop we create the instances in
-let rotation = glam::Quat::from_axis_angle(glam::Vec3::Y, (180.0).to_radians());

同时从 result 中移除环境光 ambient_color

rust
let result = (diffuse_color) * object_color.xyz;

我们将得到如下渲染效果:

./diffuse_wrong.png

渲染结果显然是错误的,因为光线照亮了立方体的背光侧。这是由于法向量并没有随对象一起旋转,因此无论对象转向哪个方向,法向量的方向始终没变:

./normal_not_rotated.png

我们将使用法线矩阵(Normal Matrix)将法向量变换为正确的方向。需要注意的是,法向量表示一个方向,它应该做为单位向量(Unit Vector)来参与整个计算过程。

虽然可以在顶点着色器中计算法线矩阵,但这涉及到反转模型矩阵 model_matrix,而 WGSL 实际上没有矩阵求逆的函数,必须自己编写此代码。更重要的是,矩阵求逆的计算在着色器里实际上非常昂贵,特别是每个顶点都要计算一遍。

我们的替代方案是,向 InstanceRaw 结构体添加一个 normal 字段。不用去反转模型矩阵,而是使用模型实例的旋转来创建一个 Matrix3 类型的法线矩阵。

我们只需要用到矩阵的旋转分量,故法线矩阵的类型是 Matrix3 而不是 Matrix4

rust
#[repr(C)]
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-#[allow(dead_code)]
-struct InstanceRaw {
-    model: [[f32; 4]; 4],
-    normal: [[f32; 3]; 3],
-}
-
-impl model::Vertex for InstanceRaw {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
-            // step_mode 的值需要从 Vertex 改为 Instance
-            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
-            step_mode: wgpu::VertexStepMode::Instance,
-            attributes: &[
-                wgpu::VertexAttribute {
-                    offset: 0,
-                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
-                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
-                    shader_location: 5,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
-                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
-                    shader_location: 6,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
-                    shader_location: 7,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
-                    shader_location: 8,
-                    format: wgpu::VertexFormat::Float32x4,
-                },
-                // 新增!
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress,
-                    shader_location: 9,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress,
-                    shader_location: 10,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress,
-                    shader_location: 11,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-            ],
-        }
-    }
-}

然后修改 Instance 以创建法线矩阵:

rust
struct Instance {
-    position: glam::Vec3,
-    rotation: glam::Quat,
-}
-
-impl Instance {
-    fn to_raw(&self) -> InstanceRaw {
-        let model =
-            glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation);
-        InstanceRaw {
-            model: model.to_cols_array_2d(),
-            // 新增!
-            normal: glam::Mat3::from_mat4(glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
-        }
-    }
-}

现在,我们在顶点着色器中重构法线矩阵:

rust
struct InstanceInput {
-    @location(5) model_matrix_0: vec4f,
-    @location(6) model_matrix_1: vec4f,
-    @location(7) model_matrix_2: vec4f,
-    @location(8) model_matrix_3: vec4f,
-    // 新增!
-    @location(9) normal_matrix_0: vec3f,
-    @location(10) normal_matrix_1: vec3f,
-    @location(11) normal_matrix_2: vec3f,
-};
-
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-    @location(0) tex_coords: vec2f,
-    @location(1) world_normal: vec3f,
-    @location(2) world_position: vec3f,
-};
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    let model_matrix = mat4x4f(
-        instance.model_matrix_0,
-        instance.model_matrix_1,
-        instance.model_matrix_2,
-        instance.model_matrix_3,
-    );
-    // 新增!
-    let normal_matrix = mat3x3f(
-        instance.normal_matrix_0,
-        instance.normal_matrix_1,
-        instance.normal_matrix_2,
-    );
-    var out: VertexOutput;
-    out.tex_coords = model.tex_coords;
-    out.world_normal = normal_matrix * model.normal; // UPDATED!
-    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
-    out.world_position = world_position.xyz;
-    out.clip_position = camera.view_proj * world_position;
-    return out;
-}

上边的实现是基于 世界空间 的。在视图空间(view-space),也就是眼空间(eye-space)来实现是更标准的做法,因为对象在离原点较远的地方会产生光照问题。 如果改为使用视图空间,就需要包括由视图矩阵产生的旋转。还须使用 view_matrix * model_matrix * light_position 来变换光源的位置,以防止摄像机移动后产生计算错误。

使用视图空间的最大优势是:能避免在大规模的场景中进行光照和其他计算时,由于对象之间的空间间距导致的问题。 因为当数字变得非常大时,浮点数精度会下降。视图空间使摄像机保持在原点,这意味着所有的计算都会使用较小的数字。 最终的光照计算过程是一样的,只是需要多一点点设置。

经过以上修改,光照效果现在看起来已经正确了:

./diffuse_right.png

现在把场景中其他对象加回来,再加上环境光反射,我们就会得到如下渲染效果:

./ambient_diffuse_lighting.png

如果能保证模型矩阵总是对对象应用统一的缩放因子,你就可以只使用模型矩阵了。Github 用户 @julhe 与我分享的这段代码可以做到这一点:

rust
out.world_normal = (model_matrix * vec4f(model.normal, 0.0)).xyz;

他利用的是这样一个事实:即用一个 4x4 矩阵乘以一个 w 分量为 0 的向量时,只有旋转和缩放将被应用于向量。 不过你需要对这个向量进行归一化(Normalize)处理,因为法向量必须是单位向量

模型矩阵的缩放因子必须是统一的才能适用。否则产生的法向量将是倾斜于表面的,如下图片所示:

./normal-scale-issue.png

镜面反射

镜面反射(Specular Reflection)模拟了现实世界中从特定角度观察物体时出现的高光(Highlights,亮点)。 如果曾在阳光下观察过汽车,定会注意到车身出现的高亮部分。基本上来说,我们在观察有光泽的物体时就会看到高光。 从表面光滑的物体上反射出去的光线会倾向于集中在一个角度的附近,所以高光的位置会根据你观察的角度而变化。

./specular_diagram.png

因为镜面反射是相对于视角而言的,所以我们需要将摄像机的位置传入顶点及片元着色器中:

rust
struct Camera {
-    view_pos: vec4f,
-    view_proj: mat4x4f,
-}
-@group(1) @binding(0)
-var<uniform> camera: Camera;

别忘了也要更新 light.wgsl 中的 Camera 结构体,一旦它与 Rust 中的 CameraUniform 结构体不匹配,光照效果就会渲染错误。

同时也需要更新 CameraUniform 结构体:

rust
// lib.rs
-#[repr(C)]
-#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct CameraUniform {
-    view_position: [f32; 4],
-    view_proj: [[f32; 4]; 4],
-}
-
-impl CameraUniform {
-    fn new() -> Self {
-        Self {
-            view_position: [0.0; 4],
-            view_proj: glam::Mat4::IDENTITY.into(),
-        }
-    }
-
-    fn update_view_proj(&mut self, camera: &Camera) {
-        // 使用 vec4 纯粹是因为 Uniform 的 16 字节对齐要求
-        self.view_position = camera.eye.extend(1.0).into();
-        self.view_proj = (camera.build_view_projection_matrix()).into();
-    }
-}

由于现在要在片元着色器中使用 Uniform,得修改它的可见性:

rust
// lib.rs
-let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-    entries: &[
-        wgpu::BindGroupLayoutBinding {
-            // ...
-            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, // 更新!
-            // ...
-        },
-        // ...
-    ],
-    label: None,
-});

计算从片元位置到摄像机的方向向量,并用此向量和法向量来计算反射方向 reflect_dir

rust
// shader.wgsl
-// 片元着色器内...
-let view_dir = normalize(camera.view_pos.xyz - in.world_position);
-let reflect_dir = reflect(-light_dir, in.world_normal);

然后使用点积来计算镜面反射的强度 specular_strength,并用它算出高光颜色 specular_color

rust
let specular_strength = pow(max(dot(view_dir, reflect_dir), 0.0), 32.0);
-let specular_color = specular_strength * light.color;

最后,将高光颜色合成到片元输出结果中:

rust
let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;

完成全部代码之后,就能得到如下渲染效果:

./ambient_diffuse_specular_lighting.png

假如只查看镜面反射得到的高光颜色 specular_color 本身,渲染效果如下:

./specular_lighting.png

半程向量

所谓的半程向量(Halfway Vector)也是一个单位向量,它正好在视图方向和光源方向的中间。

到目前为止,我们实际上只实现了 Blinn-Phong 的 Phong 部分。Phong 反射模型很好用,但在某些情况下会产生 bug。 Blinn-Phong 的 Blinn 部分来自于这样的事实:如果把 view_dirlight_dir 加在一起,对结果进行归一化处理后得到一个半程向量,然后再与法向量 normal 求点积,就会得到大致相同的渲染结果,且不会有使用反射方向 reflect_dir 可能产生的问题。

rust
let view_dir = normalize(camera.view_pos.xyz - in.world_position);
-let half_dir = normalize(view_dir + light_dir);
-
-let specular_strength = pow(max(dot(in.world_normal, half_dir), 0.0), 32.0);

在我们这个场景下很难看出有何不同,但以下就是改进了光照计算后的渲染效果:

./half_dir.png

- +
Skip to content

光照

虽然我们的场景是 3D 的,但它们看起来像是平的,对象表面缺乏现实光照环境中的明暗变化,所以无法体现模型的三维特性。这是因为我们的模型没有考虑光线和对象表面之间的相互作用,无论如何摆放都会保持着相同的着色。

如果想修正这一点,就需要在我们的场景中添加光照(Lighting)。

在现实世界中,光源发出的光子会四处反射,最后进入我们的眼睛。 当观察对象上的一点时,我们所看到的颜色取决于多个光源和多个反射表面之间的多次相互作用。

在计算机图形学领域,为单个光子建模的计算成本极高。一个 100 瓦的灯泡每秒钟发出大约 3.27×10^20 个光子,再试想一下太阳每秒发出的光子的数量级。为了解决这个问题,我们要用数学来 “作弊”(也就是模拟。严格来说,这不是作弊,计算机图形学里有这么一句名言:"If it looks right, it is right.", 意思就是,如果它看起来是对的,那么它就是对的)。

我们来看看计算机图形学里常用的几个光照模型。

光线/路径追踪

光线/路径追踪(Ray/Path tracing)以虛拟摄像机模型为基础,但是对于每条与某个三角形相交的投影线,在计算光源对交点处明暗值的直接贡献之前,还要确定是否有一个或者多个光源能够照射到这个交点。

它是最接近光的真实工作方式的模型,所以我觉得必须提到它。但这是一个高级话题,我们不会在这里深入讨论。

Blinn-Phong 反射模型

对于大多数实时(real-time)应用来说,光线/路径追踪的计算成本十在太高了(尽管这种情况已经开始改变),所以通常使用一种更有效的,精度较低的 Phong 反射模型 来解决光照问题。它考虑了光线与材质的 3 种相互作用:环境光反射、漫反射和镜面反射。我们将学习 Blinn-Phong 反射模型,它能加速镜面反射的计算。

在开始学习之前,需要在我们的场景中添加一个光源:

rust
// lib.rs
+#[repr(C)]
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct LightUniform {
+    position: [f32; 3],
+    // 由于 Uniform 需要字段按 16 字节对齐,我们需要在这里使用一个填充字段
+    _padding: u32,
+    color: [f32; 3],
+    _padding2: u32,
+}

LightUniform 代表空间中的一个彩色点光源。虽然通常是使用纯白色的光,但使用其它颜色的光也是可以的。

使 WGSL 结构体内存字节对齐的经验法则是:字段保持按 2 的 N 次幂来对齐。 例如,一个 vec3 如果是 3 个单精度浮点数,它的大小为 12 字节,对齐后将被提升到 2 的下一个次幂,即 16 字节. 这意味着必须更加小心地布局你的结构体。

一些开发者会选择使用 vec4 而不是 vec3 来避免对齐问题。 你可以在 wgsl spec 中了解更多关于对齐规则的信息。

接下来,创建一个 Uniform 缓冲区来存储我们的光源:

rust
let light_uniform = LightUniform {
+    position: [2.0, 2.0, 2.0],
+    _padding: 0,
+    color: [1.0, 1.0, 1.0],
+    _padding2: 0,
+};
+
+ // 我们希望能更新光源位置,所以用了 COPY_DST 这个使用范围标志
+let light_buffer = device.create_buffer_init(
+    &wgpu::util::BufferInitDescriptor {
+        label: Some("Light VB"),
+        contents: bytemuck::cast_slice(&[light_uniform]),
+        usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
+    }
+);

别忘记把 light_uniformlight_buffer 添加到 State。之后,我们为光源创建一个绑定组的布局绑定组

rust
let light_bind_group_layout =
+    device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+        entries: &[wgpu::BindGroupLayoutEntry {
+            binding: 0,
+            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
+            ty: wgpu::BindingType::Buffer {
+                ty: wgpu::BufferBindingType::Uniform,
+                has_dynamic_offset: false,
+                min_binding_size: None,
+            },
+            count: None,
+        }],
+        label: None,
+    });
+
+let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+    layout: &light_bind_group_layout,
+    entries: &[wgpu::BindGroupEntry {
+        binding: 0,
+        resource: light_buffer.as_entire_binding(),
+    }],
+    label: None,
+});

把它们添加到 State 中,同时更新 render_pipeline_layout

rust
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+    bind_group_layouts: &[
+        &texture_bind_group_layout,
+        &camera_bind_group_layout,
+        &light_bind_group_layout,
+    ],
+});

update() 函数中更新光源的位置,这样便能看到对象在不同角度下的光照效果:

rust
// 更新光源
+let old_position = glam::Vec3::from_array(self.light_uniform.position);
+self.light_uniform.position =
+    (glam::Quat::from_axis_angle(glam::Vec3::Y, consts::PI / 180.)
+        * old_position).into();
+self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light_uniform]));

上面的代码使光源围绕原点以每 1 度的速度旋转。

查看光源

出于调试的目的,如果能够查看光源本身的位置,以确保场景的光照效果是正确的,那就太好了。

尽管可以直接调整现有的渲染管线来绘制光源,但这可能不利于代码的维护。所以我们把创建渲染管线的代码提取到一个叫做 create_render_pipeline() 的新函数中:

rust
fn create_render_pipeline(
+    device: &wgpu::Device,
+    layout: &wgpu::PipelineLayout,
+    color_format: wgpu::TextureFormat,
+    depth_format: Option<wgpu::TextureFormat>,
+    vertex_layouts: &[wgpu::VertexBufferLayout],
+    shader: wgpu::ShaderModuleDescriptor,
+) -> wgpu::RenderPipeline {
+    let shader = device.create_shader_module(shader);
+
+    device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+        label: Some("Render Pipeline"),
+        layout: Some(layout),
+        vertex: wgpu::VertexState {
+            module: &shader,
+            entry_point: "vs_main",
+            buffers: vertex_layouts,
+        },
+        fragment: Some(wgpu::FragmentState {
+            module: &shader,
+            entry_point: "fs_main",
+            targets: &[Some(wgpu::ColorTargetState {
+                format: color_format,
+                blend: Some(wgpu::BlendState {
+                    alpha: wgpu::BlendComponent::REPLACE,
+                    color: wgpu::BlendComponent::REPLACE,
+                }),
+                write_mask: wgpu::ColorWrites::ALL,
+            })],
+        }),
+        primitive: wgpu::PrimitiveState {
+            topology: wgpu::PrimitiveTopology::TriangleList,
+            strip_index_format: None,
+            front_face: wgpu::FrontFace::Ccw,
+            cull_mode: Some(wgpu::Face::Back),
+            // 此处设置为 Fill 以外的任何值都需要开启 Feature::NON_FILL_POLYGON_MODE
+            polygon_mode: wgpu::PolygonMode::Fill,
+            unclipped_depth: false,
+            conservative: false,
+        },
+        depth_stencil: depth_format.map(|format| wgpu::DepthStencilState {
+            format,
+            depth_write_enabled: true,
+            depth_compare: wgpu::CompareFunction::Less,
+            stencil: wgpu::StencilState::default(),
+            bias: wgpu::DepthBiasState::default(),
+        }),
+        multisample: wgpu::MultisampleState {
+            count: 1,
+            mask: !0,
+            alpha_to_coverage_enabled: false,
+        },
+    })
+}

修改 State::new() 中的代码来调用 create_render_pipeline 函数:

rust
let render_pipeline = {
+    let shader = wgpu::ShaderModuleDescriptor {
+        label: Some("Normal Shader"),
+        source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
+    };
+    create_render_pipeline(
+        &device,
+        &render_pipeline_layout,
+        config.format,
+        Some(texture::Texture::DEPTH_FORMAT),
+        &[model::ModelVertex::desc(), InstanceRaw::desc()],
+        shader,
+    )
+};

修改 model::DrawModel 以使用 light_bind_group

rust
// model.rs
+pub trait DrawModel<'a> {
+    fn draw_mesh(
+        &mut self,
+        mesh: &'a Mesh,
+        material: &'a Material,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        material: &'a Material,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+
+    fn draw_model(
+        &mut self,
+        model: &'a Model,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_model_instanced(
+        &mut self,
+        model: &'a Model,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_mesh(
+        &mut self,
+        mesh: &'b Mesh,
+        material: &'b Material,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group, light_bind_group);
+    }
+
+    fn draw_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        material: &'b Material,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.set_bind_group(0, &material.bind_group, &[]);
+        self.set_bind_group(1, camera_bind_group, &[]);
+        self.set_bind_group(2, light_bind_group, &[]);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+
+    fn draw_model(
+        &mut self,
+        model: &'b Model,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
+    }
+
+    fn draw_model_instanced(
+        &mut self,
+        model: &'b Model,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            let material = &model.materials[mesh.material];
+            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
+        }
+    }
+}

完成这些后,就可以为我们的光源创建另一条渲染管线了:

rust
// lib.rs
+let light_render_pipeline = {
+    let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+        label: Some("Light Pipeline Layout"),
+        bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout],
+        push_constant_ranges: &[],
+    });
+    let shader = wgpu::ShaderModuleDescriptor {
+        label: Some("Light Shader"),
+        source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()),
+    };
+    create_render_pipeline(
+        &device,
+        &layout,
+        config.format,
+        Some(texture::Texture::DEPTH_FORMAT),
+        &[model::ModelVertex::desc()],
+        shader,
+    )
+};

我选择为 light_render_pipeline 创建一个单独的布局,因为它不需要常规渲染管线所需要的资源(主要是纹理)。

之后,我们来编写实际的着色器代码:

rust
// light.wgsl
+// 顶点着色器
+
+struct Camera {
+    view_proj: mat4x4f,
+}
+@group(0) @binding(0)
+var<uniform> camera: Camera;
+
+struct Light {
+    position: vec3f,
+    color: vec3f,
+}
+@group(1) @binding(0)
+var<uniform> light: Light;
+
+struct VertexInput {
+    @location(0) position: vec3f,
+};
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) color: vec3f,
+};
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+) -> VertexOutput {
+    let scale = 0.25;
+    var out: VertexOutput;
+    out.clip_position = camera.view_proj * vec4f(model.position * scale + light.position, 1.0);
+    out.color = light.color;
+    return out;
+}
+
+// 片元着色器
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(in.color, 1.0);
+}

现在就能在 render() 函数中手动实现光源的绘制代码了,但是为了保持之前开发的绘制模式,让我们来创建一个名为 DrawLight 的新 trait:

rust
// model.rs
+pub trait DrawLight<'a> {
+    fn draw_light_mesh(
+        &mut self,
+        mesh: &'a Mesh,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_light_mesh_instanced(
+        &mut self,
+        mesh: &'a Mesh,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+
+    fn draw_light_model(
+        &mut self,
+        model: &'a Model,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+    fn draw_light_model_instanced(
+        &mut self,
+        model: &'a Model,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawLight<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    fn draw_light_mesh(
+        &mut self,
+        mesh: &'b Mesh,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_light_mesh_instanced(mesh, 0..1, camera_bind_group, light_bind_group);
+    }
+
+    fn draw_light_mesh_instanced(
+        &mut self,
+        mesh: &'b Mesh,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
+        self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
+        self.set_bind_group(0, camera_bind_group, &[]);
+        self.set_bind_group(1, light_bind_group, &[]);
+        self.draw_indexed(0..mesh.num_elements, 0, instances);
+    }
+
+    fn draw_light_model(
+        &mut self,
+        model: &'b Model,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        self.draw_light_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
+    }
+    fn draw_light_model_instanced(
+        &mut self,
+        model: &'b Model,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            self.draw_light_mesh_instanced(mesh, instances.clone(), camera_bind_group, light_bind_group);
+        }
+    }
+}

最后,在渲染通道中加入光源的渲染:

rust
impl State {
+    // ...
+   fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
+        // ...
+        render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
+
+        use crate::model::DrawLight; // 新增!
+        render_pass.set_pipeline(&self.light_render_pipeline); // 新增!
+        render_pass.draw_light_model(
+            &self.obj_model,
+            &self.camera_bind_group,
+            &self.light_bind_group,
+        ); // 新增!
+
+        render_pass.set_pipeline(&self.render_pipeline);
+        render_pass.draw_model_instanced(
+            &self.obj_model,
+            0..self.instances.len() as u32,
+            &self.camera_bind_group,
+            &self.light_bind_group, // 新增
+        );
+}

完成上面这些后,我们将看到如下渲染效果:

./light-in-scene.png

环境光反射

现实世界中,光线在进入我们的眼睛之前往往在物体表面之间经历了多次反射。这就是为什么你能看见阴影区域的东西。在计算机上实现这种互动模型很昂贵,所以需要“作弊”(模拟)。

环境光反射(Ambient Reflection)定义了对象表面所有点的环境光强度相同,代表从场景的其他部分反射过来的光照亮我们的对象。 环境光反射值 = 光源颜色 _ 环境光强度 _ 片元的颜色。

请在 shader.wgsl 中的纹理 Uniform 之下添加以下代码:

rust
struct Light {
+    position: vec3f,
+    color: vec3f,
+}
+@group(2) @binding(0)
+var<uniform> light: Light;

然后更新片元色器代码来计算和使用环境光的色值:

rust
@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
+
+    // 我们不需要太强的环境光,强度设置为 0.1 就够了
+    let ambient_strength = 0.1;
+    let ambient_color = light.color * ambient_strength;
+
+    let result = ambient_color * object_color.rgb;
+
+    return vec4f(result, object_color.a);
+}

完成上面的修改后,我们将得到如下渲染效果:

./ambient_lighting.png

漫反射

理想的漫反射(Diffuse Reflection)表面将光线向所有方向均匀地散射,因此,这样的表面在所有的观察者看来亮度都一样。不过,反射出去的光线强度依赖于材质以及光源相对于表面的位置。

还记得我们的模型中包含的法向量(Normal Vector)吗?现在终于要使用它们了。 法向量(也叫做法线)代表一个表面的朝向。通过计算片元的法向量和它指向光源的向量之间的夹角,可以得到该片元漫反射强度值。我们使用点积来计算向量之间夹角的余弦值:

./normal_diagram.png

如果法向量和光源方向向量的点积为 1.0,则表示当前片元与光源对齐,将反射光线的全部强度。值为 0 或更低表示表面垂直于或远离光源,因此反射强度小。

我们将法向量加入到 shader.wgsl 中:

rust
struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f,
+    @location(2) normal: vec3f, // 新增!
+};

接着定义该值以及顶点的位置来传递给片元着色器:

rust
struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+    @location(1) world_normal: vec3f,
+    @location(2) world_position: vec3f,
+};

我们先按原样传递法向量的值。这是错误的,稍后会修复它:

rust
@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    let model_matrix = mat4x4f(
+        instance.model_matrix_0,
+        instance.model_matrix_1,
+        instance.model_matrix_2,
+        instance.model_matrix_3,
+    );
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.world_normal = model.normal;
+    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
+    out.world_position = world_position.xyz;
+    out.clip_position = camera.view_proj * world_position;
+    return out;
+}

现在来进行实际的计算,在 ambient_colorresult 代码行之间,添加如下代码:

rust
let light_dir = normalize(light.position - in.world_position);
+
+let diffuse_strength = max(dot(in.world_normal, light_dir), 0.0);
+let diffuse_color = light.color * diffuse_strength;

然后在 result 中包含漫反射光(diffuse_color):

rust
let result = (ambient_color + diffuse_color) * object_color.xyz;

完成后,我们将获得如下渲染效果:

./ambient_diffuse_wrong.png

法线矩阵

还记得我说过将顶点法向量直接传递给片元着色器是错误的吗?我们通过只在场景中保留一个在 y 轴上旋转了 180 度的立方体来探索这一点:

rust
const NUM_INSTANCES_PER_ROW: u32 = 1;
+
+// In the loop we create the instances in
+let rotation = glam::Quat::from_axis_angle(glam::Vec3::Y, (180.0).to_radians());

同时从 result 中移除环境光 ambient_color

rust
let result = (diffuse_color) * object_color.xyz;

我们将得到如下渲染效果:

./diffuse_wrong.png

渲染结果显然是错误的,因为光线照亮了立方体的背光侧。这是由于法向量并没有随对象一起旋转,因此无论对象转向哪个方向,法向量的方向始终没变:

./normal_not_rotated.png

我们将使用法线矩阵(Normal Matrix)将法向量变换为正确的方向。需要注意的是,法向量表示一个方向,它应该做为单位向量(Unit Vector)来参与整个计算过程。

虽然可以在顶点着色器中计算法线矩阵,但这涉及到反转模型矩阵 model_matrix,而 WGSL 实际上没有矩阵求逆的函数,必须自己编写此代码。更重要的是,矩阵求逆的计算在着色器里实际上非常昂贵,特别是每个顶点都要计算一遍。

我们的替代方案是,向 InstanceRaw 结构体添加一个 normal 字段。不用去反转模型矩阵,而是使用模型实例的旋转来创建一个 Matrix3 类型的法线矩阵。

我们只需要用到矩阵的旋转分量,故法线矩阵的类型是 Matrix3 而不是 Matrix4

rust
#[repr(C)]
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+#[allow(dead_code)]
+struct InstanceRaw {
+    model: [[f32; 4]; 4],
+    normal: [[f32; 3]; 3],
+}
+
+impl model::Vertex for InstanceRaw {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
+            // step_mode 的值需要从 Vertex 改为 Instance
+            // 这意味着只有着色器开始处理一次新实例化绘制时,才会使用下一个实例数据
+            step_mode: wgpu::VertexStepMode::Instance,
+            attributes: &[
+                wgpu::VertexAttribute {
+                    offset: 0,
+                    // 虽然顶点着色器现在只使用了插槽 0 和 1,但在后面的教程中将会使用 2、3 和 4
+                    // 此处从插槽 5 开始,确保与后面的教程不会有冲突
+                    shader_location: 5,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                // mat4 从技术的角度来看是由 4 个 vec4 构成,占用 4 个插槽。
+                // 我们需要为每个 vec4 定义一个插槽,然后在着色器中重新组装出 mat4。
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
+                    shader_location: 6,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
+                    shader_location: 7,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
+                    shader_location: 8,
+                    format: wgpu::VertexFormat::Float32x4,
+                },
+                // 新增!
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress,
+                    shader_location: 9,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress,
+                    shader_location: 10,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress,
+                    shader_location: 11,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+            ],
+        }
+    }
+}

然后修改 Instance 以创建法线矩阵:

rust
struct Instance {
+    position: glam::Vec3,
+    rotation: glam::Quat,
+}
+
+impl Instance {
+    fn to_raw(&self) -> InstanceRaw {
+        let model =
+            glam::Mat4::from_translation(self.position) * glam::Mat4::from_quat(self.rotation);
+        InstanceRaw {
+            model: model.to_cols_array_2d(),
+            // 新增!
+            normal: glam::Mat3::from_mat4(glam::Mat4::from_quat(self.rotation)).to_cols_array_2d(),
+        }
+    }
+}

现在,我们在顶点着色器中重构法线矩阵:

rust
struct InstanceInput {
+    @location(5) model_matrix_0: vec4f,
+    @location(6) model_matrix_1: vec4f,
+    @location(7) model_matrix_2: vec4f,
+    @location(8) model_matrix_3: vec4f,
+    // 新增!
+    @location(9) normal_matrix_0: vec3f,
+    @location(10) normal_matrix_1: vec3f,
+    @location(11) normal_matrix_2: vec3f,
+};
+
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+    @location(0) tex_coords: vec2f,
+    @location(1) world_normal: vec3f,
+    @location(2) world_position: vec3f,
+};
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    let model_matrix = mat4x4f(
+        instance.model_matrix_0,
+        instance.model_matrix_1,
+        instance.model_matrix_2,
+        instance.model_matrix_3,
+    );
+    // 新增!
+    let normal_matrix = mat3x3f(
+        instance.normal_matrix_0,
+        instance.normal_matrix_1,
+        instance.normal_matrix_2,
+    );
+    var out: VertexOutput;
+    out.tex_coords = model.tex_coords;
+    out.world_normal = normal_matrix * model.normal; // UPDATED!
+    var world_position: vec4f = model_matrix * vec4f(model.position, 1.0);
+    out.world_position = world_position.xyz;
+    out.clip_position = camera.view_proj * world_position;
+    return out;
+}

上边的实现是基于 世界空间 的。在视图空间(view-space),也就是眼空间(eye-space)来实现是更标准的做法,因为对象在离原点较远的地方会产生光照问题。 如果改为使用视图空间,就需要包括由视图矩阵产生的旋转。还须使用 view_matrix * model_matrix * light_position 来变换光源的位置,以防止摄像机移动后产生计算错误。

使用视图空间的最大优势是:能避免在大规模的场景中进行光照和其他计算时,由于对象之间的空间间距导致的问题。 因为当数字变得非常大时,浮点数精度会下降。视图空间使摄像机保持在原点,这意味着所有的计算都会使用较小的数字。 最终的光照计算过程是一样的,只是需要多一点点设置。

经过以上修改,光照效果现在看起来已经正确了:

./diffuse_right.png

现在把场景中其他对象加回来,再加上环境光反射,我们就会得到如下渲染效果:

./ambient_diffuse_lighting.png

如果能保证模型矩阵总是对对象应用统一的缩放因子,你就可以只使用模型矩阵了。Github 用户 @julhe 与我分享的这段代码可以做到这一点:

rust
out.world_normal = (model_matrix * vec4f(model.normal, 0.0)).xyz;

他利用的是这样一个事实:即用一个 4x4 矩阵乘以一个 w 分量为 0 的向量时,只有旋转和缩放将被应用于向量。 不过你需要对这个向量进行归一化(Normalize)处理,因为法向量必须是单位向量

模型矩阵的缩放因子必须是统一的才能适用。否则产生的法向量将是倾斜于表面的,如下图片所示:

./normal-scale-issue.png

镜面反射

镜面反射(Specular Reflection)模拟了现实世界中从特定角度观察物体时出现的高光(Highlights,亮点)。 如果曾在阳光下观察过汽车,定会注意到车身出现的高亮部分。基本上来说,我们在观察有光泽的物体时就会看到高光。 从表面光滑的物体上反射出去的光线会倾向于集中在一个角度的附近,所以高光的位置会根据你观察的角度而变化。

./specular_diagram.png

因为镜面反射是相对于视角而言的,所以我们需要将摄像机的位置传入顶点及片元着色器中:

rust
struct Camera {
+    view_pos: vec4f,
+    view_proj: mat4x4f,
+}
+@group(1) @binding(0)
+var<uniform> camera: Camera;

别忘了也要更新 light.wgsl 中的 Camera 结构体,一旦它与 Rust 中的 CameraUniform 结构体不匹配,光照效果就会渲染错误。

同时也需要更新 CameraUniform 结构体:

rust
// lib.rs
+#[repr(C)]
+#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct CameraUniform {
+    view_position: [f32; 4],
+    view_proj: [[f32; 4]; 4],
+}
+
+impl CameraUniform {
+    fn new() -> Self {
+        Self {
+            view_position: [0.0; 4],
+            view_proj: glam::Mat4::IDENTITY.into(),
+        }
+    }
+
+    fn update_view_proj(&mut self, camera: &Camera) {
+        // 使用 vec4 纯粹是因为 Uniform 的 16 字节对齐要求
+        self.view_position = camera.eye.extend(1.0).into();
+        self.view_proj = (camera.build_view_projection_matrix()).into();
+    }
+}

由于现在要在片元着色器中使用 Uniform,得修改它的可见性:

rust
// lib.rs
+let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+    entries: &[
+        wgpu::BindGroupLayoutBinding {
+            // ...
+            visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, // 更新!
+            // ...
+        },
+        // ...
+    ],
+    label: None,
+});

计算从片元位置到摄像机的方向向量,并用此向量和法向量来计算反射方向 reflect_dir

rust
// shader.wgsl
+// 片元着色器内...
+let view_dir = normalize(camera.view_pos.xyz - in.world_position);
+let reflect_dir = reflect(-light_dir, in.world_normal);

然后使用点积来计算镜面反射的强度 specular_strength,并用它算出高光颜色 specular_color

rust
let specular_strength = pow(max(dot(view_dir, reflect_dir), 0.0), 32.0);
+let specular_color = specular_strength * light.color;

最后,将高光颜色合成到片元输出结果中:

rust
let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;

完成全部代码之后,就能得到如下渲染效果:

./ambient_diffuse_specular_lighting.png

假如只查看镜面反射得到的高光颜色 specular_color 本身,渲染效果如下:

./specular_lighting.png

半程向量

所谓的半程向量(Halfway Vector)也是一个单位向量,它正好在视图方向和光源方向的中间。

到目前为止,我们实际上只实现了 Blinn-Phong 的 Phong 部分。Phong 反射模型很好用,但在某些情况下会产生 bug。 Blinn-Phong 的 Blinn 部分来自于这样的事实:如果把 view_dirlight_dir 加在一起,对结果进行归一化处理后得到一个半程向量,然后再与法向量 normal 求点积,就会得到大致相同的渲染结果,且不会有使用反射方向 reflect_dir 可能产生的问题。

rust
let view_dir = normalize(camera.view_pos.xyz - in.world_position);
+let half_dir = normalize(view_dir + light_dir);
+
+let specular_strength = pow(max(dot(in.world_normal, half_dir), 0.0), 32.0);

在我们这个场景下很难看出有何不同,但以下就是改进了光照计算后的渲染效果:

./half_dir.png

+ \ No newline at end of file diff --git a/intermediate/tutorial11-normals/index.html b/intermediate/tutorial11-normals/index.html index b39976262..e4a610256 100644 --- a/intermediate/tutorial11-normals/index.html +++ b/intermediate/tutorial11-normals/index.html @@ -5,450 +5,452 @@ 法线映射 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

法线映射

添加光照后,我们的场景已经看起来很不错了。不过,对象表面还缺少实物的那种凹凸细节。如果使用的纹理是光滑的就不是问题,但是砖块纹理应该是比较粗糙的。 我们可以给模型添加更多的三角形来雕刻出表面的凹凸细节,但这将使得顶点数据倍增而渲染变慢,而且也很难知道在哪里添加新的三角形。这,就是法线映射(Normal Mapping)的用武之地了。

法线映射也叫凹凸映射(Bump Mapping),是一种纹理映射技术,它不用增加模型的几何复杂度就能够模拟具有复杂凹凸细节的表面。与简单的纹理映射不同,当绘制对象的表面时,法线映射技术通过扰动表面的法向量来改变它的形状,使得着色处理后的颜色能反映出表面几何特性的变化。

还记得在实例化绘制教程中,我们尝试了在纹理中存储实例数据吗?法线贴图(Normal Map)就是存储着法向量数据的纹理!除了顶点法向量外,我们还将在光照计算中使用法线贴图中的法向量。

我们的砖块纹理对应的法线贴图(也就是法线纹理)长这样:

./cube-normal.png

纹理的 r、g、b 分量对应于法向量的 x、y 和 z 坐标分量。所有的 z 值都应该是正的,这就是为什么法线贴图有一个蓝色的色调。

我们来修改 model.rs 中的材质 Material 结构体,新增一个法线纹理 normal_texture 字段:

rust
pub struct Material {
-    pub name: String,
-    pub diffuse_texture: texture::Texture,
-    pub normal_texture: texture::Texture, // 更新!
-    pub bind_group: wgpu::BindGroup,
-}

还得更新纹理绑定组布局 texture_bind_group_layout 以包括法线贴图:

rust
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
-    entries: &[
-        // ...
-        // 法线贴图
-        wgpu::BindGroupLayoutEntry {
-            binding: 2,
-            visibility: wgpu::ShaderStages::FRAGMENT,
-            ty: wgpu::BindingType::Texture {
-                multisampled: false,
-                sample_type: wgpu::TextureSampleType::Float { filterable: true },
-                view_dimension: wgpu::TextureViewDimension::D2,
-            },
-            count: None,
-        },
-        wgpu::BindGroupLayoutEntry {
-            binding: 3,
-            visibility: wgpu::ShaderStages::FRAGMENT,
-            ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
-            count: None,
-        },
-    ],
-    label: Some("texture_bind_group_layout"),
-});

resources.rsload_model() 函数中创建材质的循环里,添加以下代码来实际加载法线贴图:

rust
// resources.rs
-let mut materials = Vec::new();
-for m in obj_materials? {
-    let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
-    // 新增!
-    let normal_texture = load_texture(&m.normal_texture, device, queue).await?;
-
-    materials.push(model::Material::new(
-        device,
-        &m.name,
-        diffuse_texture,
-        normal_texture, // 新增!
-        layout,
-    ));
-}

上面使用的 Material::new() 函数的具体代码如下:

rust
impl Material {
-    pub fn new(
-        device: &wgpu::Device,
-        name: &str,
-        diffuse_texture: texture::Texture,
-        normal_texture: texture::Texture, // 新增!
-        layout: &wgpu::BindGroupLayout,
-    ) -> Self {
-        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
-            layout,
-            entries: &[
-                wgpu::BindGroupEntry {
-                    binding: 0,
-                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
-                },
-                wgpu::BindGroupEntry {
-                    binding: 1,
-                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
-                },
-                // 新增!
-                wgpu::BindGroupEntry {
-                    binding: 2,
-                    resource: wgpu::BindingResource::TextureView(&normal_texture.view),
-                },
-                wgpu::BindGroupEntry {
-                    binding: 3,
-                    resource: wgpu::BindingResource::Sampler(&normal_texture.sampler),
-                },
-            ],
-            label: Some(name),
-        });
-
-        Self {
-            name: String::from(name),
-            diffuse_texture,
-            normal_texture, // 新增!
-            bind_group,
-        }
-    }
-}

现在我们可以在片元着色器中使用纹理了:

rust
// 片元着色器
-
-@group(0) @binding(0)
-var t_diffuse: texture_2d<f32>;
-@group(0)@binding(1)
-var s_diffuse: sampler;
-@group(0)@binding(2)
-var t_normal: texture_2d<f32>;
-@group(0) @binding(3)
-var s_normal: sampler;
-
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
-    let object_normal: vec4f = textureSample(t_normal, s_normal, in.tex_coords);
-    // 环境光强度
-    let ambient_strength = 0.1;
-    let ambient_color = light.color * ambient_strength;
-
-    // Create the lighting vectors
-    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
-    let light_dir = normalize(light.position - in.world_position);
-    let view_dir = normalize(camera.view_pos.xyz - in.world_position);
-    let half_dir = normalize(view_dir + light_dir);
-
-    let diffuse_strength = max(dot(tangent_normal, light_dir), 0.0);
-    let diffuse_color = light.color * diffuse_strength;
-
-    let specular_strength = pow(max(dot(tangent_normal, half_dir), 0.0), 32.0);
-    let specular_color = specular_strength * light.color;
-
-    let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
-
-    return vec4f(result, object_color.a);
-}

如果现在运行代码,你会发现渲染效果看起来不太对劲。让我们将效果与上一个教程比较一下:

场景中应该被点亮的部分是黑暗的,反之亦然。

从切空间到世界空间

光照教程的法线矩阵 部分有提到:我们是在世界空间中进行光照计算的。也就是说,整个场景的方向是相对于世界坐标系而言的。 从法线纹理中提取的法向量都处在正 Z 方向上,也就是说我们的光照计算认为模型的所有表面都朝向大致相同的方向。这被称为切空间(Tangent Space,也叫做切向量空间)。

光照教程 中我们用顶点法向量来表示表面的方向。现在,可以用它来将法线贴图中的法向量从切空间变换到世界空间。实现此变换需要用到一点点线性代数。

我们将创建一个矩阵,代表相对于顶点法向量的坐标空间(Coordinate Space)。然后使用它来变换法线贴图数据,使其处于世界空间:

rust
let coordinate_system = mat3x3f(
-    vec3(1, 0, 0), // x axis (右)
-    vec3(0, 1, 0), // y axis (上)
-    vec3(0, 0, 1)  // z axis (前)
-);

切向量与副切向量

我们已经有了需要的 3 个向量中的一个,即法向量。另外两个是切向量(Tangent Vector)与副切向量(Bitangent Vector, 也被叫作副法向量(Binormal))。切向量是与法向量垂直且表面平行的向量(也就是不与表面相交)。副切向量是同时垂直于由法向量与切向量的向量,所以可以由法向量与切向量的叉积计算得出。切向量、副切向量和法向量一起分别代表坐标空间 x、y 和 z 轴。

一些模型格式会在顶点数据中包括切向量副切向量,但 OBJ 没有。我们得手动计算,可以从现有的顶点数据中推导出切向量与副切向量。请看下图:

可以使用三角形的边和法线来计算切向量与副切向量。首先,我们需要更新在 model.rs 中的顶点 ModelVertex 结构体:

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-pub struct ModelVertex {
-    position: [f32; 3],
-    tex_coords: [f32; 2],
-    normal: [f32; 3],
-    // 新增!
-    tangent: [f32; 3],
-    bitangent: [f32; 3],
-}

同时也需要更新顶点缓冲区布局 VertexBufferLayout:

rust
impl Vertex for ModelVertex {
-    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
-        use std::mem;
-        wgpu::VertexBufferLayout {
-            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
-            step_mode: wgpu::VertexStepMode::Vertex,
-            attributes: &[
-                // ...
-
-                // Tangent and bitangent
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
-                    shader_location: 3,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-                wgpu::VertexAttribute {
-                    offset: mem::size_of::<[f32; 11]>() as wgpu::BufferAddress,
-                    shader_location: 4,
-                    format: wgpu::VertexFormat::Float32x3,
-                },
-            ],
-        }
-    }
-}

现在可以计算新的切向量副切向量了, 用以下代码来更新 resource.rsload_model() 函数的网格生成:

rust
let meshes = models
-    .into_iter()
-    .map(|m| {
-        let mut vertices = (0..m.mesh.positions.len() / 3)
-            .map(|i| model::ModelVertex {
-                position: [
-                    m.mesh.positions[i * 3],
-                    m.mesh.positions[i * 3 + 1],
-                    m.mesh.positions[i * 3 + 2],
-                ],
-                tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
-                normal: [
-                    m.mesh.normals[i * 3],
-                    m.mesh.normals[i * 3 + 1],
-                    m.mesh.normals[i * 3 + 2],
-                ],
-                // 随后会计算实际值来替换
-                tangent: [0.0; 3],
-                bitangent: [0.0; 3],
-            })
-            .collect::<Vec<_>>();
-
-        let indices = &m.mesh.indices;
-        let mut triangles_included = vec![0; vertices.len()];
-
-        // 遍历三角形的三个顶点来计算切向量与副切向量.
-        for c in indices.chunks(3) {
-            let v0 = vertices[c[0] as usize];
-            let v1 = vertices[c[1] as usize];
-            let v2 = vertices[c[2] as usize];
-
-            let pos0: glam::Vec3 = v0.position.into();
-            let pos1: glam::Vec3 = v1.position.into();
-            let pos2: glam::Vec3 = v2.position.into();
-
-            let uv0: glam::Vec2 = v0.tex_coords.into();
-            let uv1: glam::Vec2 = v1.tex_coords.into();
-            let uv2: glam::Vec2 = v2.tex_coords.into();
-
-            // 计算三角形的边
-            let delta_pos1 = pos1 - pos0;
-            let delta_pos2 = pos2 - pos0;
-
-            // 计算切向量/副切向量需要用到的两个方向向量
-            let delta_uv1 = uv1 - uv0;
-            let delta_uv2 = uv2 - uv0;
-
-            // 求解以下方程组
-            //     delta_pos1 = delta_uv1.x * T + delta_u.y * B
-            //     delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
-            // 幸运的是,在我发现这个方程的地方提供了如下求解方案!
-            let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
-            let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
-            // 我们翻转副切向量以启用具有 wgpu 纹理坐标系的右手标架的法线贴图
-            let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * -r;
-
-            // 我们为三角形中的每个顶点使用相同的切向量/副切向量
-            vertices[c[0] as usize].tangent =
-                (tangent + glam::Vec3::from_array(vertices[c[0] as usize].tangent)).into();
-            vertices[c[1] as usize].tangent =
-                (tangent + glam::Vec3::from_array(vertices[c[1] as usize].tangent)).into();
-            vertices[c[2] as usize].tangent =
-                (tangent + glam::Vec3::from_array(vertices[c[2] as usize].tangent)).into();
-            vertices[c[0] as usize].bitangent =
-                (bitangent + glam::Vec3::from_array(vertices[c[0] as usize].bitangent)).into();
-            vertices[c[1] as usize].bitangent =
-                (bitangent + glam::Vec3::from_array(vertices[c[1] as usize].bitangent)).into();
-            vertices[c[2] as usize].bitangent =
-                (bitangent + glam::Vec3::from_array(vertices[c[2] as usize].bitangent)).into();
-
-            // 用于计算顶点上切向量/副切向量的平均值
-            triangles_included[c[0] as usize] += 1;
-            triangles_included[c[1] as usize] += 1;
-            triangles_included[c[2] as usize] += 1;
-        }
-
-        // 计算切向量/副切向量的平均值
-        for (i, n) in triangles_included.into_iter().enumerate() {
-            let denom = 1.0 / n as f32;
-            let mut v = &mut vertices[i];
-            v.tangent = (glam::Vec3::from_array(v.tangent) * denom).into();
-            v.bitangent = (glam::Vec3::from_array(v.bitangent) * denom).into();
-        }
-
-        let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-            label: Some(&format!("{:?} Vertex Buffer", file_name)),
-            contents: bytemuck::cast_slice(&vertices),
-            usage: wgpu::BufferUsages::VERTEX,
-        });
-        let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
-            label: Some(&format!("{:?} Index Buffer", file_name)),
-            contents: bytemuck::cast_slice(&m.mesh.indices),
-            usage: wgpu::BufferUsages::INDEX,
-        });
-
-        model::Mesh {
-            name: file_name.to_string(),
-            vertex_buffer,
-            index_buffer,
-            num_elements: m.mesh.indices.len() as u32,
-            material: m.mesh.material_id.unwrap_or(0),
-        }
-    })
-    .collect::<Vec<_>>();

从世界空间到切空间

由于法线贴图默认是在切空间中,该计算中使用的所有其他变量也得变换为切空间。我们需要在顶点着色器中构建切向量矩阵,首先,修改 VertexInput 来包括之前计算的切向量与副切向量:

rust
struct VertexInput {
-    @location(0) position: vec3f,
-    @location(1) tex_coords: vec2f;
-    @location(2) normal: vec3f;
-    @location(3) tangent: vec3f;
-    @location(4) bitangent: vec3f;
-};

接下来构建切向量矩阵 tangent_matrix,然后将顶点,光源和视图坐标变换到切空间:

rust
struct VertexOutput {
-    @builtin(position) clip_position: vec4f;
-    @location(0) tex_coords: vec2f;
-    // 更新!
-    @location(1) tangent_position: vec3f;
-    @location(2) tangent_light_position: vec3f;
-    @location(3) tangent_view_position: vec3f;
-};
-
-@vertex
-fn vs_main(
-    model: VertexInput,
-    instance: InstanceInput,
-) -> VertexOutput {
-    // ...
-    let normal_matrix = mat3x3f(
-        instance.normal_matrix_0,
-        instance.normal_matrix_1,
-        instance.normal_matrix_2,
-    );
-
-    // 构建切向量矩阵
-    let world_normal = normalize(normal_matrix * model.normal);
-    let world_tangent = normalize(normal_matrix * model.tangent);
-    let world_bitangent = normalize(normal_matrix * model.bitangent);
-    let tangent_matrix = transpose(mat3x3f(
-        world_tangent,
-        world_bitangent,
-        world_normal,
-    ));
-
-    let world_position = model_matrix * vec4f(model.position, 1.0);
-
-    var out: VertexOutput;
-    out.clip_position = camera.view_proj * world_position;
-    out.tex_coords = model.tex_coords;
-    out.tangent_position = tangent_matrix * world_position.xyz;
-    out.tangent_view_position = tangent_matrix * camera.view_pos.xyz;
-    out.tangent_light_position = tangent_matrix * light.position;
-    return out;
-}

最后,更新片元着色器以使用这些转换后的光照值:

rust
@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    // Sample textures..
-
-    // 光照计算需要的向量
-    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
-    let light_dir = normalize(in.tangent_light_position - in.tangent_position);
-    let view_dir = normalize(in.tangent_view_position - in.tangent_position);
-
-    // 执行光照计算...
-}

完成上边的计算,我们会得到如下渲染效果:

sRGB 与法线纹理

光线的强度是对其能量的物理度量,而亮度 (brightness) 度量的是人眼所感知到的光线强度。 由于人眼中的光感受器对不同波长的光线能量的响应不同,即使红光和绿光的物理强度相同,在我们看来它们也并不具有相同的亮度,事实上,人眼是按对数关系来感知光线强度的。根据人类视觉系统所具有的这种特性,如果希望亮度看起来按等间隔的步长递增,那么赋给像素的光强值应该按指数的形式递增。显示设备可以根据所能产生的最小和最大光强值通过计算得到亮度变化的步长。

sRGB 色彩空间是一种于计算机显示设备和打印机等设备的标准颜色系统,包括 WebGPU 在内的大部分图形绘制系统都支持 sRGB。它通过对色值的 𝛄 (gamma) 编码,实现了图像在有限的色值范围(红、绿、蓝每个颜色通道的取值都在 [0, 255] 范围内)内隐藏人眼对色彩的感知差异。

GPU 硬件对 sRGB 色彩空间提供了特殊支持,可以将颜色值从线性值转换到 𝛄 编码,并通过 𝛄 校正(Gamma Correction)解码回线性值。 我们一直在使用 Rgba8UnormSrgb 格式来制作所有的纹理。Srgb 位就是指示 wgpu:

  • 当着色器代码对 sRGB 格式的纹理进行采样时,GPU 硬件要将其从 sRGB 采样值解码为线性值再返回给着色器;
  • 当着色器代码写入线性颜色值到 sRGB 格式的纹理时,GPU 硬件要对其进行 𝛄 编码后再写入;

如果纹理数据不是基于 sRGB 色彩空间制作的,但指定了 RgbaUnormSrgb 格式,会由于改变了 GPU 对纹理的采样方式而导致渲染结果与预期不符。 这可以通过在创建纹理时使用 Rgba8Unorm 来避免。让我们给 Texture 结构体添加一个 is_normal_map 参数。

rust
pub fn from_image(
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    img: &image::DynamicImage,
-    label: Option<&str>,
-    is_normal_map: bool, // 新增!
-) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
-    // ...
-    let texture = device.create_texture(&wgpu::TextureDescriptor {
-        label,
-        size,
-        mip_level_count: 1,
-        sample_count: 1,
-        dimension: wgpu::TextureDimension::D2,
-        // 更新!
-        format: if is_normal_map {
-            wgpu::TextureFormat::Rgba8Unorm
-        } else {
-            wgpu::TextureFormat::Rgba8UnormSrgb
-        },
-        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
-        view_formats: &[],
-    });
-
-    // ...
-
-    Ok((Self { texture, view, sampler }, cmd_buffer))
-}

并将这一修改同步到其他有关的函数:

rust
pub fn from_bytes(
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    bytes: &[u8],
-    label: &str,
-    is_normal_map: bool, // 新增!
-) -> Result<Self> {
-    let img = image::load_from_memory(bytes)?;
-    Self::from_image(device, queue, &img, Some(label), is_normal_map) // 更新!
-}

同时也还要更新 resource.rs

rust
pub async fn load_texture(
-    file_name: &str,
-    is_normal_map: bool,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-) -> anyhow::Result<texture::Texture> {
-    let data = load_binary(file_name).await?;
-    texture::Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
-}
-
-pub async fn load_model(
-    file_name: &str,
-    device: &wgpu::Device,
-    queue: &wgpu::Queue,
-    layout: &wgpu::BindGroupLayout,
-) -> anyhow::Result<model::Model> {
-    // ...
-
-    let mut materials = Vec::new();
-    for m in obj_materials? {
-        let diffuse_texture = load_texture(&m.diffuse_texture, false, device, queue).await?; // 更新!
-        let normal_texture = load_texture(&m.normal_texture, true, device, queue).await?; // 更新!
-
-        materials.push(model::Material::new(
-            device,
-            &m.name,
-            diffuse_texture,
-            normal_texture,
-            layout,
-        ));
-    }
-}

现在的渲染效果如下:

试试其他材质

现在改用其他材质来试试效果,在 DrawModel trait 中添加了一个 draw_model_instanced_with_material() 接口并在渲染通道对象上实现此接口:

rust
pub trait DrawModel<'a> {
-    // ...
-    fn draw_model_instanced_with_material(
-        &mut self,
-        model: &'a Model,
-        material: &'a Material,
-        instances: Range<u32>,
-        camera_bind_group: &'a wgpu::BindGroup,
-        light_bind_group: &'a wgpu::BindGroup,
-    );
-}
-
-impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
-where
-    'b: 'a,
-{
-    // ...
-    fn draw_model_instanced_with_material(
-        &mut self,
-        model: &'b Model,
-        material: &'b Material,
-        instances: Range<u32>,
-        camera_bind_group: &'b wgpu::BindGroup,
-        light_bind_group: &'b wgpu::BindGroup,
-    ) {
-        for mesh in &model.meshes {
-            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
-        }
-    }
-}

我找到了一个鹅卵石纹理及匹配的法线贴图,并为它创建一个叫 debug_material 的材质实例:

rust
// lib.rs
-impl State {
-    async fn new(window: &Window) -> Result<Self> {
-        // ...
-        let debug_material = {
-            let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
-            let normal_bytes = include_bytes!("../res/cobble-normal.png");
-
-            let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
-            let normal_texture = texture::Texture::from_bytes(&device, &queue, normal_bytes, "res/alt-normal.png", true).unwrap();
-
-            model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
-        };
-        Self {
-            // ...
-            #[allow(dead_code)]
-            debug_material,
-        }
-    }
-}

然后调用刚实现的 draw_model_instanced_with_material() 函数来使用 debug_material 渲染:

rust
render_pass.set_pipeline(&self.render_pipeline);
-render_pass.draw_model_instanced_with_material(
-    &self.obj_model,
-    &self.debug_material,
-    0..self.instances.len() as u32,
-    &self.camera_bind_group,
-    &self.light_bind_group,
-);

得到的渲染效果如下:

上面使用的纹理可以在 Github 源码库中找到。

- +
Skip to content

法线映射

添加光照后,我们的场景已经看起来很不错了。不过,对象表面还缺少实物的那种凹凸细节。如果使用的纹理是光滑的就不是问题,但是砖块纹理应该是比较粗糙的。 我们可以给模型添加更多的三角形来雕刻出表面的凹凸细节,但这将使得顶点数据倍增而渲染变慢,而且也很难知道在哪里添加新的三角形。这,就是法线映射(Normal Mapping)的用武之地了。

法线映射也叫凹凸映射(Bump Mapping),是一种纹理映射技术,它不用增加模型的几何复杂度就能够模拟具有复杂凹凸细节的表面。与简单的纹理映射不同,当绘制对象的表面时,法线映射技术通过扰动表面的法向量来改变它的形状,使得着色处理后的颜色能反映出表面几何特性的变化。

还记得在实例化绘制教程中,我们尝试了在纹理中存储实例数据吗?法线贴图(Normal Map)就是存储着法向量数据的纹理!除了顶点法向量外,我们还将在光照计算中使用法线贴图中的法向量。

我们的砖块纹理对应的法线贴图(也就是法线纹理)长这样:

./cube-normal.png

纹理的 r、g、b 分量对应于法向量的 x、y 和 z 坐标分量。所有的 z 值都应该是正的,这就是为什么法线贴图有一个蓝色的色调。

我们来修改 model.rs 中的材质 Material 结构体,新增一个法线纹理 normal_texture 字段:

rust
pub struct Material {
+    pub name: String,
+    pub diffuse_texture: texture::Texture,
+    pub normal_texture: texture::Texture, // 更新!
+    pub bind_group: wgpu::BindGroup,
+}

还得更新纹理绑定组布局 texture_bind_group_layout 以包括法线贴图:

rust
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+    entries: &[
+        // ...
+        // 法线贴图
+        wgpu::BindGroupLayoutEntry {
+            binding: 2,
+            visibility: wgpu::ShaderStages::FRAGMENT,
+            ty: wgpu::BindingType::Texture {
+                multisampled: false,
+                sample_type: wgpu::TextureSampleType::Float { filterable: true },
+                view_dimension: wgpu::TextureViewDimension::D2,
+            },
+            count: None,
+        },
+        wgpu::BindGroupLayoutEntry {
+            binding: 3,
+            visibility: wgpu::ShaderStages::FRAGMENT,
+            ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
+            count: None,
+        },
+    ],
+    label: Some("texture_bind_group_layout"),
+});

resources.rsload_model() 函数中创建材质的循环里,添加以下代码来实际加载法线贴图:

rust
// resources.rs
+let mut materials = Vec::new();
+for m in obj_materials? {
+    let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
+    // 新增!
+    let normal_texture = load_texture(&m.normal_texture, device, queue).await?;
+
+    materials.push(model::Material::new(
+        device,
+        &m.name,
+        diffuse_texture,
+        normal_texture, // 新增!
+        layout,
+    ));
+}

上面使用的 Material::new() 函数的具体代码如下:

rust
impl Material {
+    pub fn new(
+        device: &wgpu::Device,
+        name: &str,
+        diffuse_texture: texture::Texture,
+        normal_texture: texture::Texture, // 新增!
+        layout: &wgpu::BindGroupLayout,
+    ) -> Self {
+        let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
+            layout,
+            entries: &[
+                wgpu::BindGroupEntry {
+                    binding: 0,
+                    resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
+                },
+                wgpu::BindGroupEntry {
+                    binding: 1,
+                    resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
+                },
+                // 新增!
+                wgpu::BindGroupEntry {
+                    binding: 2,
+                    resource: wgpu::BindingResource::TextureView(&normal_texture.view),
+                },
+                wgpu::BindGroupEntry {
+                    binding: 3,
+                    resource: wgpu::BindingResource::Sampler(&normal_texture.sampler),
+                },
+            ],
+            label: Some(name),
+        });
+
+        Self {
+            name: String::from(name),
+            diffuse_texture,
+            normal_texture, // 新增!
+            bind_group,
+        }
+    }
+}

现在我们可以在片元着色器中使用纹理了:

rust
// 片元着色器
+
+@group(0) @binding(0)
+var t_diffuse: texture_2d<f32>;
+@group(0)@binding(1)
+var s_diffuse: sampler;
+@group(0)@binding(2)
+var t_normal: texture_2d<f32>;
+@group(0) @binding(3)
+var s_normal: sampler;
+
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    let object_color: vec4f = textureSample(t_diffuse, s_diffuse, in.tex_coords);
+    let object_normal: vec4f = textureSample(t_normal, s_normal, in.tex_coords);
+    // 环境光强度
+    let ambient_strength = 0.1;
+    let ambient_color = light.color * ambient_strength;
+
+    // Create the lighting vectors
+    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
+    let light_dir = normalize(light.position - in.world_position);
+    let view_dir = normalize(camera.view_pos.xyz - in.world_position);
+    let half_dir = normalize(view_dir + light_dir);
+
+    let diffuse_strength = max(dot(tangent_normal, light_dir), 0.0);
+    let diffuse_color = light.color * diffuse_strength;
+
+    let specular_strength = pow(max(dot(tangent_normal, half_dir), 0.0), 32.0);
+    let specular_color = specular_strength * light.color;
+
+    let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
+
+    return vec4f(result, object_color.a);
+}

如果现在运行代码,你会发现渲染效果看起来不太对劲。让我们将效果与上一个教程比较一下:

场景中应该被点亮的部分是黑暗的,反之亦然。

从切空间到世界空间

光照教程的法线矩阵 部分有提到:我们是在世界空间中进行光照计算的。也就是说,整个场景的方向是相对于世界坐标系而言的。 从法线纹理中提取的法向量都处在正 Z 方向上,也就是说我们的光照计算认为模型的所有表面都朝向大致相同的方向。这被称为切空间(Tangent Space,也叫做切向量空间)。

光照教程 中我们用顶点法向量来表示表面的方向。现在,可以用它来将法线贴图中的法向量从切空间变换到世界空间。实现此变换需要用到一点点线性代数。

我们将创建一个矩阵,代表相对于顶点法向量的坐标空间(Coordinate Space)。然后使用它来变换法线贴图数据,使其处于世界空间:

rust
let coordinate_system = mat3x3f(
+    vec3(1, 0, 0), // x axis (右)
+    vec3(0, 1, 0), // y axis (上)
+    vec3(0, 0, 1)  // z axis (前)
+);

切向量与副切向量

我们已经有了需要的 3 个向量中的一个,即法向量。另外两个是切向量(Tangent Vector)与副切向量(Bitangent Vector, 也被叫作副法向量(Binormal))。切向量是与法向量垂直且表面平行的向量(也就是不与表面相交)。副切向量是同时垂直于由法向量与切向量的向量,所以可以由法向量与切向量的叉积计算得出。切向量、副切向量和法向量一起分别代表坐标空间 x、y 和 z 轴。

一些模型格式会在顶点数据中包括切向量副切向量,但 OBJ 没有。我们得手动计算,可以从现有的顶点数据中推导出切向量与副切向量。请看下图:

可以使用三角形的边和法线来计算切向量与副切向量。首先,我们需要更新在 model.rs 中的顶点 ModelVertex 结构体:

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+pub struct ModelVertex {
+    position: [f32; 3],
+    tex_coords: [f32; 2],
+    normal: [f32; 3],
+    // 新增!
+    tangent: [f32; 3],
+    bitangent: [f32; 3],
+}

同时也需要更新顶点缓冲区布局 VertexBufferLayout:

rust
impl Vertex for ModelVertex {
+    fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
+        use std::mem;
+        wgpu::VertexBufferLayout {
+            array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
+            step_mode: wgpu::VertexStepMode::Vertex,
+            attributes: &[
+                // ...
+
+                // Tangent and bitangent
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
+                    shader_location: 3,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+                wgpu::VertexAttribute {
+                    offset: mem::size_of::<[f32; 11]>() as wgpu::BufferAddress,
+                    shader_location: 4,
+                    format: wgpu::VertexFormat::Float32x3,
+                },
+            ],
+        }
+    }
+}

现在可以计算新的切向量副切向量了, 用以下代码来更新 resource.rsload_model() 函数的网格生成:

rust
let meshes = models
+    .into_iter()
+    .map(|m| {
+        let mut vertices = (0..m.mesh.positions.len() / 3)
+            .map(|i| model::ModelVertex {
+                position: [
+                    m.mesh.positions[i * 3],
+                    m.mesh.positions[i * 3 + 1],
+                    m.mesh.positions[i * 3 + 2],
+                ],
+                tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
+                normal: [
+                    m.mesh.normals[i * 3],
+                    m.mesh.normals[i * 3 + 1],
+                    m.mesh.normals[i * 3 + 2],
+                ],
+                // 随后会计算实际值来替换
+                tangent: [0.0; 3],
+                bitangent: [0.0; 3],
+            })
+            .collect::<Vec<_>>();
+
+        let indices = &m.mesh.indices;
+        let mut triangles_included = vec![0; vertices.len()];
+
+        // 遍历三角形的三个顶点来计算切向量与副切向量.
+        for c in indices.chunks(3) {
+            let v0 = vertices[c[0] as usize];
+            let v1 = vertices[c[1] as usize];
+            let v2 = vertices[c[2] as usize];
+
+            let pos0: glam::Vec3 = v0.position.into();
+            let pos1: glam::Vec3 = v1.position.into();
+            let pos2: glam::Vec3 = v2.position.into();
+
+            let uv0: glam::Vec2 = v0.tex_coords.into();
+            let uv1: glam::Vec2 = v1.tex_coords.into();
+            let uv2: glam::Vec2 = v2.tex_coords.into();
+
+            // 计算三角形的边
+            let delta_pos1 = pos1 - pos0;
+            let delta_pos2 = pos2 - pos0;
+
+            // 计算切向量/副切向量需要用到的两个方向向量
+            let delta_uv1 = uv1 - uv0;
+            let delta_uv2 = uv2 - uv0;
+
+            // 求解以下方程组
+            //     delta_pos1 = delta_uv1.x * T + delta_u.y * B
+            //     delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
+            // 幸运的是,在我发现这个方程的地方提供了如下求解方案!
+            let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
+            let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
+            // 我们翻转副切向量以启用具有 wgpu 纹理坐标系的右手标架的法线贴图
+            let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * -r;
+
+            // 我们为三角形中的每个顶点使用相同的切向量/副切向量
+            vertices[c[0] as usize].tangent =
+                (tangent + glam::Vec3::from_array(vertices[c[0] as usize].tangent)).into();
+            vertices[c[1] as usize].tangent =
+                (tangent + glam::Vec3::from_array(vertices[c[1] as usize].tangent)).into();
+            vertices[c[2] as usize].tangent =
+                (tangent + glam::Vec3::from_array(vertices[c[2] as usize].tangent)).into();
+            vertices[c[0] as usize].bitangent =
+                (bitangent + glam::Vec3::from_array(vertices[c[0] as usize].bitangent)).into();
+            vertices[c[1] as usize].bitangent =
+                (bitangent + glam::Vec3::from_array(vertices[c[1] as usize].bitangent)).into();
+            vertices[c[2] as usize].bitangent =
+                (bitangent + glam::Vec3::from_array(vertices[c[2] as usize].bitangent)).into();
+
+            // 用于计算顶点上切向量/副切向量的平均值
+            triangles_included[c[0] as usize] += 1;
+            triangles_included[c[1] as usize] += 1;
+            triangles_included[c[2] as usize] += 1;
+        }
+
+        // 计算切向量/副切向量的平均值
+        for (i, n) in triangles_included.into_iter().enumerate() {
+            let denom = 1.0 / n as f32;
+            let mut v = &mut vertices[i];
+            v.tangent = (glam::Vec3::from_array(v.tangent) * denom).into();
+            v.bitangent = (glam::Vec3::from_array(v.bitangent) * denom).into();
+        }
+
+        let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+            label: Some(&format!("{:?} Vertex Buffer", file_name)),
+            contents: bytemuck::cast_slice(&vertices),
+            usage: wgpu::BufferUsages::VERTEX,
+        });
+        let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
+            label: Some(&format!("{:?} Index Buffer", file_name)),
+            contents: bytemuck::cast_slice(&m.mesh.indices),
+            usage: wgpu::BufferUsages::INDEX,
+        });
+
+        model::Mesh {
+            name: file_name.to_string(),
+            vertex_buffer,
+            index_buffer,
+            num_elements: m.mesh.indices.len() as u32,
+            material: m.mesh.material_id.unwrap_or(0),
+        }
+    })
+    .collect::<Vec<_>>();

从世界空间到切空间

由于法线贴图默认是在切空间中,该计算中使用的所有其他变量也得变换为切空间。我们需要在顶点着色器中构建切向量矩阵,首先,修改 VertexInput 来包括之前计算的切向量与副切向量:

rust
struct VertexInput {
+    @location(0) position: vec3f,
+    @location(1) tex_coords: vec2f;
+    @location(2) normal: vec3f;
+    @location(3) tangent: vec3f;
+    @location(4) bitangent: vec3f;
+};

接下来构建切向量矩阵 tangent_matrix,然后将顶点,光源和视图坐标变换到切空间:

rust
struct VertexOutput {
+    @builtin(position) clip_position: vec4f;
+    @location(0) tex_coords: vec2f;
+    // 更新!
+    @location(1) tangent_position: vec3f;
+    @location(2) tangent_light_position: vec3f;
+    @location(3) tangent_view_position: vec3f;
+};
+
+@vertex
+fn vs_main(
+    model: VertexInput,
+    instance: InstanceInput,
+) -> VertexOutput {
+    // ...
+    let normal_matrix = mat3x3f(
+        instance.normal_matrix_0,
+        instance.normal_matrix_1,
+        instance.normal_matrix_2,
+    );
+
+    // 构建切向量矩阵
+    let world_normal = normalize(normal_matrix * model.normal);
+    let world_tangent = normalize(normal_matrix * model.tangent);
+    let world_bitangent = normalize(normal_matrix * model.bitangent);
+    let tangent_matrix = transpose(mat3x3f(
+        world_tangent,
+        world_bitangent,
+        world_normal,
+    ));
+
+    let world_position = model_matrix * vec4f(model.position, 1.0);
+
+    var out: VertexOutput;
+    out.clip_position = camera.view_proj * world_position;
+    out.tex_coords = model.tex_coords;
+    out.tangent_position = tangent_matrix * world_position.xyz;
+    out.tangent_view_position = tangent_matrix * camera.view_pos.xyz;
+    out.tangent_light_position = tangent_matrix * light.position;
+    return out;
+}

最后,更新片元着色器以使用这些转换后的光照值:

rust
@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    // Sample textures..
+
+    // 光照计算需要的向量
+    let tangent_normal = object_normal.xyz * 2.0 - 1.0;
+    let light_dir = normalize(in.tangent_light_position - in.tangent_position);
+    let view_dir = normalize(in.tangent_view_position - in.tangent_position);
+
+    // 执行光照计算...
+}

完成上边的计算,我们会得到如下渲染效果:

sRGB 与法线纹理

光线的强度是对其能量的物理度量,而亮度 (brightness) 度量的是人眼所感知到的光线强度。 由于人眼中的光感受器对不同波长的光线能量的响应不同,即使红光和绿光的物理强度相同,在我们看来它们也并不具有相同的亮度,事实上,人眼是按对数关系来感知光线强度的。根据人类视觉系统所具有的这种特性,如果希望亮度看起来按等间隔的步长递增,那么赋给像素的光强值应该按指数的形式递增。显示设备可以根据所能产生的最小和最大光强值通过计算得到亮度变化的步长。

sRGB 色彩空间是一种于计算机显示设备和打印机等设备的标准颜色系统,包括 WebGPU 在内的大部分图形绘制系统都支持 sRGB。它通过对色值的 𝛄 (gamma) 编码,实现了图像在有限的色值范围(红、绿、蓝每个颜色通道的取值都在 [0, 255] 范围内)内隐藏人眼对色彩的感知差异。

GPU 硬件对 sRGB 色彩空间提供了特殊支持,可以将颜色值从线性值转换到 𝛄 编码,并通过 𝛄 校正(Gamma Correction)解码回线性值。 我们一直在使用 Rgba8UnormSrgb 格式来制作所有的纹理。Srgb 位就是指示 wgpu:

  • 当着色器代码对 sRGB 格式的纹理进行采样时,GPU 硬件要将其从 sRGB 采样值解码为线性值再返回给着色器;
  • 当着色器代码写入线性颜色值到 sRGB 格式的纹理时,GPU 硬件要对其进行 𝛄 编码后再写入;

如果纹理数据不是基于 sRGB 色彩空间制作的,但指定了 RgbaUnormSrgb 格式,会由于改变了 GPU 对纹理的采样方式而导致渲染结果与预期不符。 这可以通过在创建纹理时使用 Rgba8Unorm 来避免。让我们给 Texture 结构体添加一个 is_normal_map 参数。

rust
pub fn from_image(
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    img: &image::DynamicImage,
+    label: Option<&str>,
+    is_normal_map: bool, // 新增!
+) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
+    // ...
+    let texture = device.create_texture(&wgpu::TextureDescriptor {
+        label,
+        size,
+        mip_level_count: 1,
+        sample_count: 1,
+        dimension: wgpu::TextureDimension::D2,
+        // 更新!
+        format: if is_normal_map {
+            wgpu::TextureFormat::Rgba8Unorm
+        } else {
+            wgpu::TextureFormat::Rgba8UnormSrgb
+        },
+        usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
+        view_formats: &[],
+    });
+
+    // ...
+
+    Ok((Self { texture, view, sampler }, cmd_buffer))
+}

并将这一修改同步到其他有关的函数:

rust
pub fn from_bytes(
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    bytes: &[u8],
+    label: &str,
+    is_normal_map: bool, // 新增!
+) -> Result<Self> {
+    let img = image::load_from_memory(bytes)?;
+    Self::from_image(device, queue, &img, Some(label), is_normal_map) // 更新!
+}

同时也还要更新 resource.rs

rust
pub async fn load_texture(
+    file_name: &str,
+    is_normal_map: bool,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+) -> anyhow::Result<texture::Texture> {
+    let data = load_binary(file_name).await?;
+    texture::Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
+}
+
+pub async fn load_model(
+    file_name: &str,
+    device: &wgpu::Device,
+    queue: &wgpu::Queue,
+    layout: &wgpu::BindGroupLayout,
+) -> anyhow::Result<model::Model> {
+    // ...
+
+    let mut materials = Vec::new();
+    for m in obj_materials? {
+        let diffuse_texture = load_texture(&m.diffuse_texture, false, device, queue).await?; // 更新!
+        let normal_texture = load_texture(&m.normal_texture, true, device, queue).await?; // 更新!
+
+        materials.push(model::Material::new(
+            device,
+            &m.name,
+            diffuse_texture,
+            normal_texture,
+            layout,
+        ));
+    }
+}

现在的渲染效果如下:

试试其他材质

现在改用其他材质来试试效果,在 DrawModel trait 中添加了一个 draw_model_instanced_with_material() 接口并在渲染通道对象上实现此接口:

rust
pub trait DrawModel<'a> {
+    // ...
+    fn draw_model_instanced_with_material(
+        &mut self,
+        model: &'a Model,
+        material: &'a Material,
+        instances: Range<u32>,
+        camera_bind_group: &'a wgpu::BindGroup,
+        light_bind_group: &'a wgpu::BindGroup,
+    );
+}
+
+impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
+where
+    'b: 'a,
+{
+    // ...
+    fn draw_model_instanced_with_material(
+        &mut self,
+        model: &'b Model,
+        material: &'b Material,
+        instances: Range<u32>,
+        camera_bind_group: &'b wgpu::BindGroup,
+        light_bind_group: &'b wgpu::BindGroup,
+    ) {
+        for mesh in &model.meshes {
+            self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group);
+        }
+    }
+}

我找到了一个鹅卵石纹理及匹配的法线贴图,并为它创建一个叫 debug_material 的材质实例:

rust
// lib.rs
+impl State {
+    async fn new(window: &Window) -> Result<Self> {
+        // ...
+        let debug_material = {
+            let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
+            let normal_bytes = include_bytes!("../res/cobble-normal.png");
+
+            let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
+            let normal_texture = texture::Texture::from_bytes(&device, &queue, normal_bytes, "res/alt-normal.png", true).unwrap();
+
+            model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
+        };
+        Self {
+            // ...
+            #[allow(dead_code)]
+            debug_material,
+        }
+    }
+}

然后调用刚实现的 draw_model_instanced_with_material() 函数来使用 debug_material 渲染:

rust
render_pass.set_pipeline(&self.render_pipeline);
+render_pass.draw_model_instanced_with_material(
+    &self.obj_model,
+    &self.debug_material,
+    0..self.instances.len() as u32,
+    &self.camera_bind_group,
+    &self.light_bind_group,
+);

得到的渲染效果如下:

上面使用的纹理可以在 Github 源码库中找到。

+ \ No newline at end of file diff --git a/intermediate/tutorial12-camera/index.html b/intermediate/tutorial12-camera/index.html index 5561e8b7e..22d7afe38 100644 --- a/intermediate/tutorial12-camera/index.html +++ b/intermediate/tutorial12-camera/index.html @@ -5,330 +5,333 @@ 更好的摄像机 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

更好的摄像机

这个问题已经被推迟了一段时间。实现一个虚拟摄像机与正确使用 wgpu 关系不大,但它一直困扰着我,所以现在来实现它吧。

lib.rs 已经堆砌很多代码了,所以我们创建一个 camera.rs 文件来放置摄像机代码。先导入一些要用到的文件:

rust
use winit::event::*;
-use winit::dpi::PhysicalPosition;
-use instant::Duration;
-use std::f32::consts::FRAC_PI_2;
+    
Skip to content

更好的摄像机

这个问题已经被推迟了一段时间。实现一个虚拟摄像机与正确使用 wgpu 关系不大,但它一直困扰着我,所以现在来实现它吧。

lib.rs 已经堆砌很多代码了,所以我们创建一个 camera.rs 文件来放置摄像机代码。先导入一些要用到的文件:

rust
use winit::event::*;
+use winit::dpi::PhysicalPosition;
+use instant::Duration;
+use std::f32::consts::FRAC_PI_2;
 
-const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;

在 WASM 中使用 std::time::instant 会导致程序恐慌,所以我们使用 instant 包来替代,在 Cargo.toml 引入此依赖:

toml
instant = "0.1"

虚拟摄像机

接下来,需要创建一个新的 Camera 结构体。我们将使用一个 FPS 风格的摄像机,所以要存储位置(position)、 yaw(偏航,水平旋转)以及 pitch(俯仰,垂直旋转), 定义并实现一个 calc_matrix 函数用于创建视图矩阵:

rust
#[derive(Debug)]
-pub struct Camera {
-    pub position: glam::Vec3,
-    yaw: f32,
-    pitch: f32,
-}
+const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;

在 WASM 中使用 std::time::instant 会导致程序恐慌,所以我们使用 instant 包来替代,在 Cargo.toml 引入此依赖:

toml
instant = "0.1"

虚拟摄像机

接下来,需要创建一个新的 Camera 结构体。我们将使用一个 FPS 风格的摄像机,所以要存储位置(position)、 yaw(偏航,水平旋转)以及 pitch(俯仰,垂直旋转), 定义并实现一个 calc_matrix 函数用于创建视图矩阵:

rust
#[derive(Debug)]
+pub struct Camera {
+    pub position: glam::Vec3,
+    yaw: f32,
+    pitch: f32,
+}
 
-impl Camera {
-    pub fn new<V: Into<glam::Vec3>>(position: V, yaw: f32, pitch: f32) -> Self {
-        Self {
-            position: position.into(),
-            yaw,
-            pitch,
-        }
-    }
+impl Camera {
+    pub fn new<V: Into<glam::Vec3>>(position: V, yaw: f32, pitch: f32) -> Self {
+        Self {
+            position: position.into(),
+            yaw,
+            pitch,
+        }
+    }
 
-    pub fn calc_matrix(&self) -> glam::Mat4 {
-        let (sin_pitch, cos_pitch) = self.pitch.sin_cos();
-        let (sin_yaw, cos_yaw) = self.yaw.sin_cos();
+    pub fn calc_matrix(&self) -> glam::Mat4 {
+        let (sin_pitch, cos_pitch) = self.pitch.sin_cos();
+        let (sin_yaw, cos_yaw) = self.yaw.sin_cos();
 
-        glam::Mat4::look_to_rh(
-            self.position,
-            glam::Vec3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
-            glam::Vec3::Y,
-        )
-    }
-}

投影

只有在窗口调整大小时,投影(Projection)才真正需要改变,所以我们将投影与摄像机分开,创建一个 Projection 结构体:

rust
pub struct Projection {
-    aspect: f32,
-    fovy: f32,
-    znear: f32,
-    zfar: f32,
-}
+        glam::Mat4::look_to_rh(
+            self.position,
+            glam::Vec3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
+            glam::Vec3::Y,
+        )
+    }
+}

投影

只有在窗口调整大小时,投影(Projection)才真正需要改变,所以我们将投影与摄像机分开,创建一个 Projection 结构体:

rust
pub struct Projection {
+    aspect: f32,
+    fovy: f32,
+    znear: f32,
+    zfar: f32,
+}
 
-impl Projection {
-    pub fn new(width: u32, height: u32, fovy: f32, znear: f32, zfar: f32) -> Self {
-        Self {
-            aspect: width as f32 / height as f32,
-            fovy: fovy.to_radians(),
-            znear,
-            zfar,
-        }
-    }
+impl Projection {
+    pub fn new(width: u32, height: u32, fovy: f32, znear: f32, zfar: f32) -> Self {
+        Self {
+            aspect: width as f32 / height as f32,
+            fovy: fovy.to_radians(),
+            znear,
+            zfar,
+        }
+    }
 
-    pub fn resize(&mut self, width: u32, height: u32) {
-        self.aspect = width as f32 / height as f32;
-    }
+    pub fn resize(&mut self, width: u32, height: u32) {
+        self.aspect = width as f32 / height as f32;
+    }
 
-    pub fn calc_matrix(&self) -> glam::Mat4 {
-        glam::Mat4::perspective_rh(self.fovy, self.aspect, self.znear, self.zfar)
-    }
-}

有一点需要注意:从 perspective_rh 函数返回的是右手坐标系(right-handed coordinate system)的投影矩阵。也就是说,Z 轴是指向屏幕外的,想让 Z 轴指向屏幕内(也就是左手坐标系的投影矩阵)需要使用 perspective_lh

可以这样分辨右手坐标系和左手坐标系的区别:在身体的正前方把你的拇指指向右边代表 X 轴,食指指向上方代表 Y 轴,伸出中指代表 Z 轴。此时在你的右手上,中指应该指是向你自己。而在左手上,应该是指向远方。

./left_right_hand.gif

摄像机控制器

现在,我们需要一个新的摄像机控制器,在 camera.rs 中添加以下代码:

rust
#[derive(Debug)]
-pub struct CameraController {
-    amount_left: f32,
-    amount_right: f32,
-    amount_forward: f32,
-    amount_backward: f32,
-    amount_up: f32,
-    amount_down: f32,
-    rotate_horizontal: f32,
-    rotate_vertical: f32,
-    scroll: f32,
-    speed: f32,
-    sensitivity: f32,
-}
+    pub fn calc_matrix(&self) -> glam::Mat4 {
+        glam::Mat4::perspective_rh(self.fovy, self.aspect, self.znear, self.zfar)
+    }
+}

有一点需要注意:从 perspective_rh 函数返回的是右手坐标系(right-handed coordinate system)的投影矩阵。也就是说,Z 轴是指向屏幕外的,想让 Z 轴指向屏幕内(也就是左手坐标系的投影矩阵)需要使用 perspective_lh

可以这样分辨右手坐标系和左手坐标系的区别:在身体的正前方把你的拇指指向右边代表 X 轴,食指指向上方代表 Y 轴,伸出中指代表 Z 轴。此时在你的右手上,中指应该指是向你自己。而在左手上,应该是指向远方。

./left_right_hand.gif

摄像机控制器

现在,我们需要一个新的摄像机控制器,在 camera.rs 中添加以下代码:

rust
#[derive(Debug)]
+pub struct CameraController {
+    amount_left: f32,
+    amount_right: f32,
+    amount_forward: f32,
+    amount_backward: f32,
+    amount_up: f32,
+    amount_down: f32,
+    rotate_horizontal: f32,
+    rotate_vertical: f32,
+    scroll: f32,
+    speed: f32,
+    sensitivity: f32,
+}
 
-impl CameraController {
-    pub fn new(speed: f32, sensitivity: f32) -> Self {
-        Self {
-            amount_left: 0.0,
-            amount_right: 0.0,
-            amount_forward: 0.0,
-            amount_backward: 0.0,
-            amount_up: 0.0,
-            amount_down: 0.0,
-            rotate_horizontal: 0.0,
-            rotate_vertical: 0.0,
-            scroll: 0.0,
-            speed,
-            sensitivity,
-        }
-    }
+impl CameraController {
+    pub fn new(speed: f32, sensitivity: f32) -> Self {
+        Self {
+            amount_left: 0.0,
+            amount_right: 0.0,
+            amount_forward: 0.0,
+            amount_backward: 0.0,
+            amount_up: 0.0,
+            amount_down: 0.0,
+            rotate_horizontal: 0.0,
+            rotate_vertical: 0.0,
+            scroll: 0.0,
+            speed,
+            sensitivity,
+        }
+    }
 
-    pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
-        let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
-        match key {
-            VirtualKeyCode::W | VirtualKeyCode::Up => {
-                self.amount_forward = amount;
-                true
-            }
-            VirtualKeyCode::S | VirtualKeyCode::Down => {
-                self.amount_backward = amount;
-                true
-            }
-            VirtualKeyCode::A | VirtualKeyCode::Left => {
-                self.amount_left = amount;
-                true
-            }
-            VirtualKeyCode::D | VirtualKeyCode::Right => {
-                self.amount_right = amount;
-                true
-            }
-            VirtualKeyCode::Space => {
-                self.amount_up = amount;
-                true
-            }
-            VirtualKeyCode::LShift => {
-                self.amount_down = amount;
-                true
-            }
-            _ => false,
-        }
-    }
+    pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
+        let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
+        match key {
+            VirtualKeyCode::W | VirtualKeyCode::Up => {
+                self.amount_forward = amount;
+                true
+            }
+            VirtualKeyCode::S | VirtualKeyCode::Down => {
+                self.amount_backward = amount;
+                true
+            }
+            VirtualKeyCode::A | VirtualKeyCode::Left => {
+                self.amount_left = amount;
+                true
+            }
+            VirtualKeyCode::D | VirtualKeyCode::Right => {
+                self.amount_right = amount;
+                true
+            }
+            VirtualKeyCode::Space => {
+                self.amount_up = amount;
+                true
+            }
+            VirtualKeyCode::LShift => {
+                self.amount_down = amount;
+                true
+            }
+            _ => false,
+        }
+    }
 
-    pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
-        self.rotate_horizontal = mouse_dx as f32;
-        self.rotate_vertical = mouse_dy as f32;
-    }
+    pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
+        self.rotate_horizontal = mouse_dx as f32;
+        self.rotate_vertical = mouse_dy as f32;
+    }
 
-    pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
-        self.scroll = -match delta {
-            // 假定一行为 100 个像素,你可以随意修改这个值
-            MouseScrollDelta::LineDelta(_, scroll) => scroll * 100.0,
-            MouseScrollDelta::PixelDelta(PhysicalPosition {
-                y: scroll,
-                ..
-            }) => *scroll as f32,
-        };
-    }
+    pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
+        self.scroll = -match delta {
+            // 假定一行为 100 个像素,你可以随意修改这个值
+            MouseScrollDelta::LineDelta(_, scroll) => scroll * 100.0,
+            MouseScrollDelta::PixelDelta(PhysicalPosition {
+                y: scroll,
+                ..
+            }) => *scroll as f32,
+        };
+    }
 
-    pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
-        let dt = dt.as_secs_f32();
+    pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
+        let dt = dt.as_secs_f32();
 
-        // 前后左右移动
-        let (yaw_sin, yaw_cos) = camera.yaw.sin_cos();
-        let forward = glam::Vec3::new(yaw_cos, 0.0, yaw_sin).normalize();
-        let right = glam::Vec3::new(-yaw_sin, 0.0, yaw_cos).normalize();
-        camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
-        camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
+        // 前后左右移动
+        let (yaw_sin, yaw_cos) = camera.yaw.sin_cos();
+        let forward = glam::Vec3::new(yaw_cos, 0.0, yaw_sin).normalize();
+        let right = glam::Vec3::new(-yaw_sin, 0.0, yaw_cos).normalize();
+        camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
+        camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
 
-        // 变焦(缩放)
-        // 注意:这不是一个真实的变焦。
-        // 通过摄像机的位置变化来模拟变焦,使你更容易靠近想聚焦的物体。
-        let (pitch_sin, pitch_cos) = camera.pitch.sin_cos();
-        let scrollward = glam::Vec3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
-        camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
-        self.scroll = 0.0;
+        // 变焦(缩放)
+        // 注意:这不是一个真实的变焦。
+        // 通过摄像机的位置变化来模拟变焦,使你更容易靠近想聚焦的物体。
+        let (pitch_sin, pitch_cos) = camera.pitch.sin_cos();
+        let scrollward = glam::Vec3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
+        camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
+        self.scroll = 0.0;
 
-        // 由于我们没有使用滚动,所以直接修改 y 坐标来上下移动。
-        camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
+        // 由于我们没有使用滚动,所以直接修改 y 坐标来上下移动。
+        camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
 
-        // 旋转
-        camera.yaw += self.rotate_horizontal * self.sensitivity * dt;
-        camera.pitch += -self.rotate_vertical * self.sensitivity * dt;
+        // 旋转
+        camera.yaw += self.rotate_horizontal * self.sensitivity * dt;
+        camera.pitch += -self.rotate_vertical * self.sensitivity * dt;
 
-        // 重置旋转值为 0。没有鼠标移动发生时,摄像机就停止旋转。
-        self.rotate_horizontal = 0.0;
-        self.rotate_vertical = 0.0;
+        // 重置旋转值为 0。没有鼠标移动发生时,摄像机就停止旋转。
+        self.rotate_horizontal = 0.0;
+        self.rotate_vertical = 0.0;
 
-        // 保持摄像机的角度不要太高/太低。
-        if camera.pitch < -SAFE_FRAC_PI_2 {
-            camera.pitch = -SAFE_FRAC_PI_2;
-        } else if camera.pitch > SAFE_FRAC_PI_2 {
-            camera.pitch = SAFE_FRAC_PI_2;
-        }
-    }
-}

清理 lib.rs

首先,我们从 lib.rs 中删除 CameraCameraController,然后导入 camera.rs

rust
mod model;
-mod texture;
-mod camera; // 新增!

接着更新 update_view_proj 以使用新的 CameraProjection

rust
impl CameraUniform {
-    // ...
+        // 保持摄像机的角度不要太高/太低。
+        if camera.pitch < -SAFE_FRAC_PI_2 {
+            camera.pitch = -SAFE_FRAC_PI_2;
+        } else if camera.pitch > SAFE_FRAC_PI_2 {
+            camera.pitch = SAFE_FRAC_PI_2;
+        }
+    }
+}

清理 lib.rs

首先,我们从 lib.rs 中删除 CameraCameraController,然后导入 camera.rs

rust
mod model;
+mod texture;
+mod camera; // 新增!

接着更新 update_view_proj 以使用新的 CameraProjection

rust

+impl CameraUniform {
+    // ...
 
-    // 更新!
-    fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
-        self.view_position = camera.position.extend(1.0).into();
-        self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
-    }
-}

我们还要修改 State 来使用新的 CameraCameraProjectionProjection,再添加一个mouse_pressed 字段来存储鼠标是否被按下:

rust
struct State {
-    // ...
-    camera: camera::Camera, // 更新!
-    projection: camera::Projection, // 新增!
-    camera_controller: camera::CameraController, // 更新!
-    // ...
-    // 新增!
-    mouse_pressed: bool,
-}

别忘了需要导入 winit::dpi::PhysicalPosition

然后更新 new() 函数:

rust
impl State {
-    async fn new(window: &Window) -> Self {
-        // ...
+    // 更新!
+    fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
+        self.view_position = camera.position.extend(1.0).into();
+        self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
+    }
+}

我们还要修改 State 来使用新的 CameraCameraProjectionProjection,再添加一个mouse_pressed 字段来存储鼠标是否被按下:

rust
struct State {
+    // ...
+    camera: camera::Camera, // 更新!
+    projection: camera::Projection, // 新增!
+    camera_controller: camera::CameraController, // 更新!
+    // ...
+    // 新增!
+    mouse_pressed: bool,
+}

别忘了需要导入 winit::dpi::PhysicalPosition

然后更新 new() 函数:

rust
impl State {
+    async fn new(window: &Window) -> Self {
+        // ...
 
-        // 更新!
-        let camera = camera::Camera::new((0.0, 5.0, 10.0), -90.0, -20.0);
-        let projection = camera::Projection::new(config.width, config.height, 45.0, 0.1, 100.0);
-        let camera_controller = camera::CameraController::new(4.0, 0.4);
+        // 更新!
+        let camera = camera::Camera::new((0.0, 5.0, 10.0), -90.0, -20.0);
+        let projection = camera::Projection::new(config.width, config.height, 45.0, 0.1, 100.0);
+        let camera_controller = camera::CameraController::new(4.0, 0.4);
 
-        // ...
+        // ...
 
-        camera_uniform.update_view_proj(&camera, &projection); // 更新!
+        camera_uniform.update_view_proj(&camera, &projection); // 更新!
 
-        // ...
+        // ...
 
-        Self {
-            // ...
-            camera,
-            projection, // 新增!
-            camera_controller,
-            // ...
-            mouse_pressed: false, // 新增!
-        }
-    }
-}

接着在 resize 函数中更新投影矩阵 projection

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
-    // 更新!
-    self.projection.resize(new_size.width, new_size.height);
-    // ...
-}

事件输入函数 input() 也需要被更新。 到目前为止,我们一直在使用 WindowEvent 来控制摄像机,这很有效,但它并不是最好的解决方案。winit 文档告诉我们,操作系统通常会对 CursorMoved 事件的数据进行转换,以实现光标加速等效果。

现在为了解决这个问题,可以修改 input() 函数来处理 DeviceEvent 而不是 WindowEvent,但是在 macOS 和 WASM 上,键盘和按键事件不会被当作 DeviceEvent 发送出来。 做为替代方案,我们删除 input() 中的 CursorMoved 检查,并在 run() 函数中手动调用 camera_controller.process_mouse()

rust
// 更新!
-fn input(&mut self, event: &WindowEvent) -> bool {
-    match event {
-        WindowEvent::KeyboardInput {
-            input:
-                KeyboardInput {
-                    virtual_keycode: Some(key),
-                    state,
-                    ..
-                },
-            ..
-        } => self.camera_controller.process_keyboard(*key, *state),
-        WindowEvent::MouseWheel { delta, .. } => {
-            self.camera_controller.process_scroll(delta);
-            true
-        }
-        WindowEvent::MouseInput {
-            button: MouseButton::Left,
-            state,
-            ..
-        } => {
-            self.mouse_pressed = *state == ElementState::Pressed;
-            true
-        }
-        _ => false,
-    }
-}

下面是对事件循环代理(event_loop)的 run() 函数的修改:

rust
fn main() {
-    // ...
-    event_loop.run(move |event, _, control_flow| {
-        *control_flow = ControlFlow::Poll;
-        match event {
-            // ...
-            // 新增!
-            Event::DeviceEvent {
-                event: DeviceEvent::MouseMotion{ delta, },
-                .. // 我们现在没有用到 device_id
-            } => if state.mouse_pressed {
-                state.camera_controller.process_mouse(delta.0, delta.1)
-            }
-            // 更新!
-            Event::WindowEvent {
-                ref event,
-                window_id,
-            } if window_id == state.app.view.id() && !state.input(event) => {
-                match event {
-                    #[cfg(not(target_arch="wasm32"))]
-                    WindowEvent::CloseRequested
-                    | WindowEvent::KeyboardInput {
-                        input:
-                            KeyboardInput {
-                                state: ElementState::Pressed,
-                                virtual_keycode: Some(VirtualKeyCode::Escape),
-                                ..
-                            },
-                        ..
-                    } => *control_flow = ControlFlow::Exit,
-                    WindowEvent::Resized(physical_size) => {
-                        state.resize(*physical_size);
-                    }
-                    WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
-                        state.resize(**new_inner_size);
-                    }
-                    _ => {}
-                }
-            }
-            // ...
-        }
-    });
-}

update 函数需要多解释一下:CameraController 上的 update_camera 函数有一个参数 dt,它是之间的时间差(delta time,也可以说是时间间隔),用来辅助实现摄像机的平滑移动,使其不被帧速率所锁定。所以将它作为一个参数传入 update

rust
fn update(&mut self, dt: instant::Duration) {
-    // 更新!
-    self.camera_controller.update_camera(&mut self.camera, dt);
-    self.camera_uniform.update_view_proj(&self.camera, &self.projection);
+        Self {
+            // ...
+            camera,
+            projection, // 新增!
+            camera_controller,
+            // ...
+            mouse_pressed: false, // 新增!
+        }
+    }
+}

接着在 resize 函数中更新投影矩阵 projection

rust
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
+    // 更新!
+    self.projection.resize(new_size.width, new_size.height);
+    // ...
+}

事件输入函数 input() 也需要被更新。 到目前为止,我们一直在使用 WindowEvent 来控制摄像机,这很有效,但它并不是最好的解决方案。winit 文档告诉我们,操作系统通常会对 CursorMoved 事件的数据进行转换,以实现光标加速等效果。

现在为了解决这个问题,可以修改 input() 函数来处理 DeviceEvent 而不是 WindowEvent,但是在 macOS 和 WASM 上,键盘和按键事件不会被当作 DeviceEvent 发送出来。 做为替代方案,我们删除 input() 中的 CursorMoved 检查,并在 run() 函数中手动调用 camera_controller.process_mouse()

rust
// 更新!
+fn input(&mut self, event: &WindowEvent) -> bool {
+    match event {
+        WindowEvent::KeyboardInput {
+            input:
+                KeyboardInput {
+                    virtual_keycode: Some(key),
+                    state,
+                    ..
+                },
+            ..
+        } => self.camera_controller.process_keyboard(*key, *state),
+        WindowEvent::MouseWheel { delta, .. } => {
+            self.camera_controller.process_scroll(delta);
+            true
+        }
+        WindowEvent::MouseInput {
+            button: MouseButton::Left,
+            state,
+            ..
+        } => {
+            self.mouse_pressed = *state == ElementState::Pressed;
+            true
+        }
+        _ => false,
+    }
+}

下面是对事件循环代理(event_loop)的 run() 函数的修改:

rust
fn main() {
+    // ...
+    event_loop.run(move |event, _, control_flow| {
+        *control_flow = ControlFlow::Poll;
+        match event {
+            // ...
+            // 新增!
+            Event::DeviceEvent {
+                event: DeviceEvent::MouseMotion{ delta, },
+                .. // 我们现在没有用到 device_id
+            } => if state.mouse_pressed {
+                state.camera_controller.process_mouse(delta.0, delta.1)
+            }
+            // 更新!
+            Event::WindowEvent {
+                ref event,
+                window_id,
+            } if window_id == state.app.view.id() && !state.input(event) => {
+                match event {
+                    #[cfg(not(target_arch="wasm32"))]
+                    WindowEvent::CloseRequested
+                    | WindowEvent::KeyboardInput {
+                        input:
+                            KeyboardInput {
+                                state: ElementState::Pressed,
+                                virtual_keycode: Some(VirtualKeyCode::Escape),
+                                ..
+                            },
+                        ..
+                    } => *control_flow = ControlFlow::Exit,
+                    WindowEvent::Resized(physical_size) => {
+                        state.resize(*physical_size);
+                    }
+                    WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
+                        state.resize(**new_inner_size);
+                    }
+                    _ => {}
+                }
+            }
+            // ...
+        }
+    });
+}

update 函数需要多解释一下:CameraController 上的 update_camera 函数有一个参数 dt,它是之间的时间差(delta time,也可以说是时间间隔),用来辅助实现摄像机的平滑移动,使其不被帧速率所锁定。所以将它作为一个参数传入 update

rust
fn update(&mut self, dt: instant::Duration) {
+    // 更新!
+    self.camera_controller.update_camera(&mut self.camera, dt);
+    self.camera_uniform.update_view_proj(&self.camera, &self.projection);
 
-    // ..
-}

既然如此,我们也用 dt 来平滑光源的旋转:

rust
self.light_uniform.position =
-    (glam::Quat::from_axis_angle(glam::Vec3::Y, (60.0 * dt.as_secs_f32()).to_radians())
-    * old_position).into(); // 更新!

让我们在 main 函数中来实现 dt 的具体计算:

rust
fn main() {
-    // ...
-    let mut state = State::new(&window).await;
-    let mut last_render_time = instant::Instant::now();  // 新增!
-    event_loop.run(move |event, _, control_flow| {
-        *control_flow = ControlFlow::Poll;
-        match event {
-            // ...
-            // 更新!
-            Event::RedrawRequested(window_id) if window_id == state.app.view.id() => {
-                let now = instant::Instant::now();
-                let dt = now - last_render_time;
-                last_render_time = now;
-                state.update(dt);
-                // ...
-            }
-            _ => {}
-        }
-    });
-}

现在,我们应该可以自由控制摄像机了:

./screenshot.png

- + // .. +}

既然如此,我们也用 dt 来平滑光源的旋转:

rust
self.light_uniform.position =
+    (glam::Quat::from_axis_angle(glam::Vec3::Y, (60.0 * dt.as_secs_f32()).to_radians())
+    * old_position).into(); // 更新!

让我们在 main 函数中来实现 dt 的具体计算:

rust
fn main() {
+    // ...
+    let mut state = State::new(&window).await;
+    let mut last_render_time = instant::Instant::now();  // 新增!
+    event_loop.run(move |event, _, control_flow| {
+        *control_flow = ControlFlow::Poll;
+        match event {
+            // ...
+            // 更新!
+            Event::RedrawRequested(window_id) if window_id == state.app.view.id() => {
+                let now = instant::Instant::now();
+                let dt = now - last_render_time;
+                last_render_time = now;
+                state.update(dt);
+                // ...
+            }
+            _ => {}
+        }
+    });
+}

现在,我们应该可以自由控制摄像机了:

./screenshot.png

+ \ No newline at end of file diff --git a/intermediate/tutorial13-terrain/index.html b/intermediate/tutorial13-terrain/index.html index 9efa7f206..7eda5a778 100644 --- a/intermediate/tutorial13-terrain/index.html +++ b/intermediate/tutorial13-terrain/index.html @@ -5,121 +5,123 @@ 程序地形 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

程序地形

到目前为止,我们一直在一个空旷的场景里渲染模型。如果只是想测试着色代码,这是非常好的,但大多数应用程序会想让屏幕上填充更多有趣的元素。 你可以用各种方法来处理此问题,比如,在 Blender 中创建一堆模型,然后把它们加载到场景中。如果你有一些像样的艺术技巧和一些耐心,这是很有效的方法。我在这两个方面都很欠缺,所以让我们通过代码来制作一些看起来不错的东西。

正如本文的名字所示,我们将创建一个地形(Terrain)。现在,创建地形网格的经典方法是使用预先生成的噪声纹理(Noise Texture),并对其进行采样,以获得网格中每个点的高度值。这是一个相当有效的方法,但我选择了直接使用计算着色器来生成噪声。让我们开始吧!

计算着色器

计算着色器(Compute Shader)允许你利用 GPU 的并行计算能力完成任意任务。虽然它也可以用于渲染任务,但通常用于与绘制三角形和像素没有直接关系的任务,比如,物理模拟、图像滤镜、创建程序纹理、运行神经网络等等。我稍后会详细介绍它们的工作原理,但现在只需用它们来为我们的地形创建顶点和索引缓冲区。

噪声函数

让我们从计算着色器的代码开始,创建一个名为 terrain.wgsl 的新文件,在文件内先实现一个噪声函数(Noise Function),然后再创建着色器的入口函数。具体代码如下:

rust
// ============================
-// 地形生成
-// ============================
+    
Skip to content

程序地形

到目前为止,我们一直在一个空旷的场景里渲染模型。如果只是想测试着色代码,这是非常好的,但大多数应用程序会想让屏幕上填充更多有趣的元素。 你可以用各种方法来处理此问题,比如,在 Blender 中创建一堆模型,然后把它们加载到场景中。如果你有一些像样的艺术技巧和一些耐心,这是很有效的方法。我在这两个方面都很欠缺,所以让我们通过代码来制作一些看起来不错的东西。

正如本文的名字所示,我们将创建一个地形(Terrain)。现在,创建地形网格的经典方法是使用预先生成的噪声纹理(Noise Texture),并对其进行采样,以获得网格中每个点的高度值。这是一个相当有效的方法,但我选择了直接使用计算着色器来生成噪声。让我们开始吧!

计算着色器

计算着色器(Compute Shader)允许你利用 GPU 的并行计算能力完成任意任务。虽然它也可以用于渲染任务,但通常用于与绘制三角形和像素没有直接关系的任务,比如,物理模拟、图像滤镜、创建程序纹理、运行神经网络等等。我稍后会详细介绍它们的工作原理,但现在只需用它们来为我们的地形创建顶点和索引缓冲区。

噪声函数

让我们从计算着色器的代码开始,创建一个名为 terrain.wgsl 的新文件,在文件内先实现一个噪声函数(Noise Function),然后再创建着色器的入口函数。具体代码如下:

rust
// ============================
+// 地形生成
+// ============================
 
-// https://gist.github.com/munrocket/236ed5ba7e409b8bdf1ff6eca5dcdc39
-//  MIT License. © Ian McEwan, Stefan Gustavson, Munrocket
-// - Less condensed glsl implementation with comments can be found at https://weber.itn.liu.se/~stegu/jgt2012/article.pdf
+// https://gist.github.com/munrocket/236ed5ba7e409b8bdf1ff6eca5dcdc39
+//  MIT License. © Ian McEwan, Stefan Gustavson, Munrocket
+// - Less condensed glsl implementation with comments can be found at https://weber.itn.liu.se/~stegu/jgt2012/article.pdf
 
-fn permute3(x: vec3f) -> vec3f { return (((x * 34.) + 1.) * x) % vec3f(289.); }
+fn permute3(x: vec3f) -> vec3f { return (((x * 34.) + 1.) * x) % vec3f(289.); }
 
-fn snoise2(v: vec2f) -> f32 {
-  let C = vec4f(0.211324865405187, 0.366025403784439, -0.577350269189626, 0.024390243902439);
-  var i: vec2f = floor(v + dot(v, C.yy));
-  let x0 = v - i + dot(i, C.xx);
-  // I flipped the condition here from > to < as it fixed some artifacting I was observing
-  var i1: vec2f = select(vec2f(1., 0.), vec2f(0., 1.), (x0.x < x0.y));
-  var x12: vec4f = x0.xyxy + C.xxzz - vec4f(i1, 0., 0.);
-  i = i % vec2f(289.);
-  let p = permute3(permute3(i.y + vec3f(0., i1.y, 1.)) + i.x + vec3f(0., i1.x, 1.));
-  var m: vec3f = max(0.5 -
-      vec3f(dot(x0, x0), dot(x12.xy, x12.xy), dot(x12.zw, x12.zw)), vec3f(0.));
-  m = m * m;
-  m = m * m;
-  let x = 2. * fract(p * C.www) - 1.;
-  let h = abs(x) - 0.5;
-  let ox = floor(x + 0.5);
-  let a0 = x - ox;
-  m = m * (1.79284291400159 - 0.85373472095314 * (a0 * a0 + h * h));
-  let g = vec3f(a0.x * x0.x + h.x * x0.y, a0.yz * x12.xz + h.yz * x12.yw);
-  return 130. * dot(m, g);
-}

部分读者可能已经认出这是 Simplex 噪声(特别是 OpenSimplex 噪声)的一个实现。我承认没有真正理解 OpenSimplex 噪声背后的数学原理。它的基本原理类似于 Perlin 噪声,但不是一个正方形网格,而是六边形网格,这消除了在正方形网格上产生噪声的一些伪影。我也不是这方面的专家,所以总结一下:permute3() 接收一个 vec3 并返回一个伪随机的 vec3snoise2() 接收一个 vec2 并返回一个 [-1, 1] 之间的浮点数。如果你想了解更多关于噪声函数的信息,请查看这篇文章来自 The Book of Shaders。代码是用 GLSL 编写的,但概念是一样的。

从下面的渲染结果可以看出,直接使用 snoise 的输出来生成地形的高度值,地表往往过于平滑。虽然这可能就是你想要的,但它看起来不像是自然界的地形。

smooth terrain

为了使地形更加粗糙,我们将使用一种叫做分形布朗运动的技术。这种技术的工作原理是对噪声函数进行多次采样,每次将强度减半,同时将噪声的频率提高一倍。 这意味着地形的整体形状保持平滑,同时拥有更清晰的细节,得到的效果将是下面这样:

more organic terrain

这个函数的代码其实很简单:

rust
fn fbm(p: vec2f) -> f32 {
-    let NUM_OCTAVES: u32 = 5u;
-    var x = p * 0.01;
-    var v = 0.0;
-    var a = 0.5;
-    let shift = vec2f(100.0);
-    let cs = vec2f(cos(0.5), sin(0.5));
-    let rot = mat2x2<f32>(cs.x, cs.y, -cs.y, cs.x);
+fn snoise2(v: vec2f) -> f32 {
+  let C = vec4f(0.211324865405187, 0.366025403784439, -0.577350269189626, 0.024390243902439);
+  var i: vec2f = floor(v + dot(v, C.yy));
+  let x0 = v - i + dot(i, C.xx);
+  // I flipped the condition here from > to < as it fixed some artifacting I was observing
+  var i1: vec2f = select(vec2f(1., 0.), vec2f(0., 1.), (x0.x < x0.y));
+  var x12: vec4f = x0.xyxy + C.xxzz - vec4f(i1, 0., 0.);
+  i = i % vec2f(289.);
+  let p = permute3(permute3(i.y + vec3f(0., i1.y, 1.)) + i.x + vec3f(0., i1.x, 1.));
+  var m: vec3f = max(0.5 -
+      vec3f(dot(x0, x0), dot(x12.xy, x12.xy), dot(x12.zw, x12.zw)), vec3f(0.));
+  m = m * m;
+  m = m * m;
+  let x = 2. * fract(p * C.www) - 1.;
+  let h = abs(x) - 0.5;
+  let ox = floor(x + 0.5);
+  let a0 = x - ox;
+  m = m * (1.79284291400159 - 0.85373472095314 * (a0 * a0 + h * h));
+  let g = vec3f(a0.x * x0.x + h.x * x0.y, a0.yz * x12.xz + h.yz * x12.yw);
+  return 130. * dot(m, g);
+}

部分读者可能已经认出这是 Simplex 噪声(特别是 OpenSimplex 噪声)的一个实现。我承认没有真正理解 OpenSimplex 噪声背后的数学原理。它的基本原理类似于 Perlin 噪声,但不是一个正方形网格,而是六边形网格,这消除了在正方形网格上产生噪声的一些伪影。我也不是这方面的专家,所以总结一下:permute3() 接收一个 vec3 并返回一个伪随机的 vec3snoise2() 接收一个 vec2 并返回一个 [-1, 1] 之间的浮点数。如果你想了解更多关于噪声函数的信息,请查看这篇文章来自 The Book of Shaders。代码是用 GLSL 编写的,但概念是一样的。

从下面的渲染结果可以看出,直接使用 snoise 的输出来生成地形的高度值,地表往往过于平滑。虽然这可能就是你想要的,但它看起来不像是自然界的地形。

smooth terrain

为了使地形更加粗糙,我们将使用一种叫做分形布朗运动的技术。这种技术的工作原理是对噪声函数进行多次采样,每次将强度减半,同时将噪声的频率提高一倍。 这意味着地形的整体形状保持平滑,同时拥有更清晰的细节,得到的效果将是下面这样:

more organic terrain

这个函数的代码其实很简单:

rust
fn fbm(p: vec2f) -> f32 {
+    let NUM_OCTAVES: u32 = 5u;
+    var x = p * 0.01;
+    var v = 0.0;
+    var a = 0.5;
+    let shift = vec2f(100.0);
+    let cs = vec2f(cos(0.5), sin(0.5));
+    let rot = mat2x2<f32>(cs.x, cs.y, -cs.y, cs.x);
 
-    for (var i=0u; i<NUM_OCTAVES; i=i+1u) {
-        v = v + a * snoise2(x);
-        x = rot * x * 2.0 + shift;
-        a = a * 0.5;
-    }
+    for (var i=0u; i<NUM_OCTAVES; i=i+1u) {
+        v = v + a * snoise2(x);
+        x = rot * x * 2.0 + shift;
+        a = a * 0.5;
+    }
 
-    return v;
-}

让我们稍微回顾一下:

  • NUM_OCTAVES 常数设定噪声级别。更高的级别将给地形网格增加更多的细节,但级别越高,得到的回报将递减,我发现 5 是一个好数字。
  • p 乘以 0.01 用来“放大”噪声函数。这是因为我们的网格将是 1x1 的四边形,而 simplex 噪声函数在每步进一次时类似于白噪声。我们来看到直接使用 p 是什么样子的:spiky terrain
  • a 变量是在给定的噪声级别下的噪声振幅。
  • shiftrot 用于减少生成的噪声中的失真。其中一个失真现象是,在 0,0 处,无论你如何缩放 psnoise 的输出都是一样的。

生成网格

为了生成地形网格,需要向着色器传递一些信息:

rust
struct ChunkData {
-    chunk_size: vec2u,
-    chunk_corner: vec2<i32>,
-    min_max_height: vec2f,
-}
+    return v;
+}

让我们稍微回顾一下:

  • NUM_OCTAVES 常数设定噪声级别。更高的级别将给地形网格增加更多的细节,但级别越高,得到的回报将递减,我发现 5 是一个好数字。
  • p 乘以 0.01 用来“放大”噪声函数。这是因为我们的网格将是 1x1 的四边形,而 simplex 噪声函数在每步进一次时类似于白噪声。我们来看到直接使用 p 是什么样子的:spiky terrain
  • a 变量是在给定的噪声级别下的噪声振幅。
  • shiftrot 用于减少生成的噪声中的失真。其中一个失真现象是,在 0,0 处,无论你如何缩放 psnoise 的输出都是一样的。

生成网格

为了生成地形网格,需要向着色器传递一些信息:

rust
struct ChunkData {
+    chunk_size: vec2u,
+    chunk_corner: vec2<i32>,
+    min_max_height: vec2f,
+}
 
-struct Vertex {
-    @location(0) position: vec3f,
-    @location(1) normal: vec3f,
-}
+struct Vertex {
+    @location(0) position: vec3f,
+    @location(1) normal: vec3f,
+}
 
-struct VertexBuffer {
-    data: array<Vertex>, // stride: 32
-}
+struct VertexBuffer {
+    data: array<Vertex>, // stride: 32
+}
 
-struct IndexBuffer {
-    data: array<u32>,
-}
+struct IndexBuffer {
+    data: array<u32>,
+}
 
-@group(0) @binding(0) var<uniform> chunk_data: ChunkData;
-@group(0)@binding(1) var<storage, read_write> vertices: VertexBuffer;
-@group(0)@binding(2) var<storage, read_write> indices: IndexBuffer;

我们传递给色器的 uniform 缓冲区,其中包括四边形网格的大小 chunk_size,噪声算法的起始点 chunk_corner ,以及地形的 min_max_height

顶点和索引缓冲区作为 storage 缓冲区传入,并启用 read_write 访问模式来支持数据的读取与写入。我们将在 Rust 中创建这些缓冲区,并在执行计算着色器时将其绑定。

着色器的下一个部分是在网格上生成一个点,以及该点的一个顶点:

rust
fn terrain_point(p: vec2f) -> vec3f {
-    return vec3f(
-        p.x,
-        mix(chunk_data.min_max_height.x,chunk_data.min_max_height.y, fbm(p)),
-        p.y,
-    );
-}
+@group(0) @binding(0) var<uniform> chunk_data: ChunkData;
+@group(0)@binding(1) var<storage, read_write> vertices: VertexBuffer;
+@group(0)@binding(2) var<storage, read_write> indices: IndexBuffer;

我们传递给色器的 uniform 缓冲区,其中包括四边形网格的大小 chunk_size,噪声算法的起始点 chunk_corner ,以及地形的 min_max_height

顶点和索引缓冲区作为 storage 缓冲区传入,并启用 read_write 访问模式来支持数据的读取与写入。我们将在 Rust 中创建这些缓冲区,并在执行计算着色器时将其绑定。

着色器的下一个部分是在网格上生成一个点,以及该点的一个顶点:

rust
fn terrain_point(p: vec2f) -> vec3f {
+    return vec3f(
+        p.x,
+        mix(chunk_data.min_max_height.x,chunk_data.min_max_height.y, fbm(p)),
+        p.y,
+    );
+}
 
-fn terrain_vertex(p: vec2f) -> Vertex {
-    let v = terrain_point(p);
+fn terrain_vertex(p: vec2f) -> Vertex {
+    let v = terrain_point(p);
 
-    let tpx = terrain_point(p + vec2f(0.1, 0.0)) - v;
-    let tpz = terrain_point(p + vec2f(0.0, 0.1)) - v;
-    let tnx = terrain_point(p + vec2f(-0.1, 0.0)) - v;
-    let tnz = terrain_point(p + vec2f(0.0, -0.1)) - v;
+    let tpx = terrain_point(p + vec2f(0.1, 0.0)) - v;
+    let tpz = terrain_point(p + vec2f(0.0, 0.1)) - v;
+    let tnx = terrain_point(p + vec2f(-0.1, 0.0)) - v;
+    let tnz = terrain_point(p + vec2f(0.0, -0.1)) - v;
 
-    let pn = normalize(cross(tpz, tpx));
-    let nn = normalize(cross(tnz, tnx));
+    let pn = normalize(cross(tpz, tpx));
+    let nn = normalize(cross(tnz, tnx));
 
-    let n = (pn + nn) * 0.5;
+    let n = (pn + nn) * 0.5;
 
-    return Vertex(v, n);
-}

terrain_point 函数接收地形上的一个 XZ 点,并返回一个 vec3,其中 y 值在最小和最大高度之间。

terrain_vertex 使用 terrain_point 来获得它的位置,同时通过对附近的 4 个点进行采样,并使用叉积来计算顶点法线。

你应该注意到了 Vertex 结构体不包括纹理坐标字段。我们可以通过使用顶点的 XZ 坐标,并让纹理采样器在 X 和 Y 轴上镜像纹理来轻松地创建纹理坐标,但以这种方式进行纹理采样时,高度图往往会有拉伸现象。

我们将在未来的教程中介绍一种叫做三平面映射的方法来给地形贴图。但现在我们只使用一个程序纹理,它将在渲染地形的片元着色器中被创建。

现在我们可以在地形表面获得一个实际的顶点数据,并用来填充顶点和索引缓冲区了。我们将创建一个 gen_terrain() 函数作为计算着色器的入口:

rust
@compute @workgroup_size(64)
-fn gen_terrain(
-    @builtin(global_invocation_id) gid: vec3<u32>
-) {
-    // snipped...
-}

@stage(compute) 注释指定了 gen_terrain 是一个计算着色器入口。

workgroup_size() 指定 GPU 可以为每个工作组(workgroup)分配的一组调用,这一组调用会同时执行着色器入口函数,并共享对工作组地址空间中着色器变量的访问。 我们在编写计算着色器的时候指定工作组的大小,它有 3 个维度的参数,因为工作组是一个 3D 网格,但如果不指定它们,则默认为 1。 换句话说,workgroup_size(64) 相当于 workgroup_size(64, 1, 1)

global_invocation_id 是一个 3D 索引。这可能看起来很奇怪,但你可以把工作组看作是工作组的 3D 网格。这些工作组有一个内部的工作者网格。global_invocation_id 就是相对于所有其他工作组的当前工作者的 id。

从视觉上看,工作组的网格看起来会是这样的:

work group grid

把计算着色器想象成一个在一堆嵌套的 for 循环中运行的函数,但每个循环都是并行执行的,这可能会有帮助。它看起来会像这样:

for wgx in num_workgroups.x:
-    for wgy in num_workgroups.y:
-        for wgz in num_workgroups.z:
-            var local_invocation_id = (wgx, wgy, wgz)
-            for x in workgroup_size.x:
-                for y in workgroup_size.x:
-                    for z in workgroup_size.x:
-                        var global_invocation_id = local_invocation_id * workgroup_size + (x, y, z);
-                        gen_terrain(global_invocation_id)

如果想了解更多关于工作组的信息请查看 WGSL 文档

TODO:

  • Note changes to create_render_pipeline
  • Mention swizzle feature for cgmath
  • Compare workgroups and workgroups sizes to nested for loops
    • Maybe make a diagram in blender?
  • Change to camera movement speed
- + return Vertex(v, n); +}

terrain_point 函数接收地形上的一个 XZ 点,并返回一个 vec3,其中 y 值在最小和最大高度之间。

terrain_vertex 使用 terrain_point 来获得它的位置,同时通过对附近的 4 个点进行采样,并使用叉积来计算顶点法线。

你应该注意到了 Vertex 结构体不包括纹理坐标字段。我们可以通过使用顶点的 XZ 坐标,并让纹理采样器在 X 和 Y 轴上镜像纹理来轻松地创建纹理坐标,但以这种方式进行纹理采样时,高度图往往会有拉伸现象。

我们将在未来的教程中介绍一种叫做三平面映射的方法来给地形贴图。但现在我们只使用一个程序纹理,它将在渲染地形的片元着色器中被创建。

现在我们可以在地形表面获得一个实际的顶点数据,并用来填充顶点和索引缓冲区了。我们将创建一个 gen_terrain() 函数作为计算着色器的入口:

rust
@compute @workgroup_size(64)
+fn gen_terrain(
+    @builtin(global_invocation_id) gid: vec3<u32>
+) {
+    // snipped...
+}

@stage(compute) 注释指定了 gen_terrain 是一个计算着色器入口。

workgroup_size() 指定 GPU 可以为每个工作组(workgroup)分配的一组调用,这一组调用会同时执行着色器入口函数,并共享对工作组地址空间中着色器变量的访问。 我们在编写计算着色器的时候指定工作组的大小,它有 3 个维度的参数,因为工作组是一个 3D 网格,但如果不指定它们,则默认为 1。 换句话说,workgroup_size(64) 相当于 workgroup_size(64, 1, 1)

global_invocation_id 是一个 3D 索引。这可能看起来很奇怪,但你可以把工作组看作是工作组的 3D 网格。这些工作组有一个内部的工作者网格。global_invocation_id 就是相对于所有其他工作组的当前工作者的 id。

从视觉上看,工作组的网格看起来会是这样的:

work group grid

把计算着色器想象成一个在一堆嵌套的 for 循环中运行的函数,但每个循环都是并行执行的,这可能会有帮助。它看起来会像这样:

for wgx in num_workgroups.x:
+    for wgy in num_workgroups.y:
+        for wgz in num_workgroups.z:
+            var local_invocation_id = (wgx, wgy, wgz)
+            for x in workgroup_size.x:
+                for y in workgroup_size.x:
+                    for z in workgroup_size.x:
+                        var global_invocation_id = local_invocation_id * workgroup_size + (x, y, z);
+                        gen_terrain(global_invocation_id)

如果想了解更多关于工作组的信息请查看 WGSL 文档

TODO:

  • Note changes to create_render_pipeline
  • Mention swizzle feature for cgmath
  • Compare workgroups and workgroups sizes to nested for loops
    • Maybe make a diagram in blender?
  • Change to camera movement speed
+ \ No newline at end of file diff --git a/intermediate/vertex-animation/index.html b/intermediate/vertex-animation/index.html index 08f9d8f18..ad98e5a7a 100644 --- a/intermediate/vertex-animation/index.html +++ b/intermediate/vertex-animation/index.html @@ -5,19 +5,21 @@ 顶点动画 | 学习 wgpu - + + - - - - - + + + + + - + + - - + + \ No newline at end of file diff --git a/intermediate/vertex-animation/universal-animation-formula.html b/intermediate/vertex-animation/universal-animation-formula.html index 8e721278f..419f7aab4 100644 --- a/intermediate/vertex-animation/universal-animation-formula.html +++ b/intermediate/vertex-animation/universal-animation-formula.html @@ -5,71 +5,73 @@ 万能动画公式 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

万能动画公式

要实现不同形态/形状之间的动态变换,核心算法很简单,就是通过构造同等数量的顶点/控制点来实现。

在进行动态变换时,通常不同形态或形状之间的顶点数量会不相等。为了使两边的顶点能够一一对应起来,我们可以通过随机或插值的方式来补充顶点。这种方式不会破坏顶点数较少一边的造型,相当于某些点有了分身。通过对对应顶点的插值计算,就能够实现形态的变换。

此万能动画公式的优点在于它足够简单且通用。无论是对于简单的形状变换还是复杂的动态效果,都可以通过构造同等数量的顶点来实现。而且,运用不同的插值算法,还能灵活地控制形态变换的程度和速度。

示例:Hilbert 曲线

此示例能正常运行在桌面端及 Firefox Nightly.

但 Chrome 118 上存在管线验证的 bug,导致会报如下警告而无法正常运行:

log
Attribute offset (12) with format VertexFormat::Float32x3 (size: 12) doesn't fit in the vertex buffer stride (12).
- - While validating attributes[1].
- - While validating buffers[0].

代码实现

Hilbert 曲线是一种连续、自避免且自相似的空间填充曲线。

每升一个维度,曲线的顶点数就多 4 倍,基于这个规律,我们用上面的万能动画公式来完成升维/降维变换动画:

rust
pub struct HilbertCurveApp {
-    // 当前曲线与目标曲线的顶点缓冲区
-    vertex_buffers: Vec<wgpu::Buffer>,
-    // 当前曲线的顶点总数
-    curve_vertex_count: usize,
-    // 当前动画帧的索引,用于设置缓冲区的动态偏移
-    animate_index: u32,
-    // 每一个动画阶段的总帧数
-    draw_count: u32,
-    // 目标曲线维度
-    curve_dimention: u32,
-    // 是否为升维动画
-    is_animation_up: bool,
-}

创建两个 ping-pong 顶点缓冲区,它们的大小一样:

rust
let mut vertex_buffers: Vec<wgpu::Buffer> = Vec::with_capacity(2);
-for _ in 0..2 {
-    let buf = app.device.create_buffer(&wgpu::BufferDescriptor {
-        size,
-        usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
-        label: None,
-        mapped_at_creation: false,
-    });
-    vertex_buffers.push(buf);
-}

render() 函数中基于动画迭代情况填充/更新顶点缓冲区:

rust
let mut target = HilbertCurve::new(self.curve_dimention);
-let start = if self.is_animation_up {
-    let mut start = HilbertCurve::new(self.curve_dimention - 1);
-    // 把顶点数翻 4 倍来对应目标维度曲线
-    start.four_times_vertices();
-    start
-} else {
-    target.four_times_vertices();
-    HilbertCurve::new(self.curve_dimention + 1)
-};
-// 更新顶点数
-self.curve_vertex_count = target.vertices.len();
-// 填充顶点 buffer
-for (buf, curve) in self.vertex_buffers.iter().zip(vec![start, target].iter()) {
-    self.app
-        .queue
-        .write_buffer(buf, 0, bytemuck::cast_slice(&curve.vertices));
-}

着色器中完成顶点位置的插值计算:

wgsl
struct HilbertUniform {
-    // 接近目标的比例
-    near_target_ratio: f32,
-};
-@group(0) @binding(0) var<uniform> mvp_mat: MVPMatUniform;
-@group(1) @binding(0) var<uniform> hilbert: HilbertUniform;
+    
Skip to content

万能动画公式

要实现不同形态/形状之间的动态变换,核心算法很简单,就是通过构造同等数量的顶点/控制点来实现。

在进行动态变换时,通常不同形态或形状之间的顶点数量会不相等。为了使两边的顶点能够一一对应起来,我们可以通过随机或插值的方式来补充顶点。这种方式不会破坏顶点数较少一边的造型,相当于某些点有了分身。通过对对应顶点的插值计算,就能够实现形态的变换。

此万能动画公式的优点在于它足够简单且通用。无论是对于简单的形状变换还是复杂的动态效果,都可以通过构造同等数量的顶点来实现。而且,运用不同的插值算法,还能灵活地控制形态变换的程度和速度。

示例:Hilbert 曲线

此示例能正常运行在桌面端及 Firefox Nightly.

但 Chrome 118 上存在管线验证的 bug,导致会报如下警告而无法正常运行:

log
Attribute offset (12) with format VertexFormat::Float32x3 (size: 12) doesn't fit in the vertex buffer stride (12).
+ - While validating attributes[1].
+ - While validating buffers[0].

代码实现

Hilbert 曲线是一种连续、自避免且自相似的空间填充曲线。

每升一个维度,曲线的顶点数就多 4 倍,基于这个规律,我们用上面的万能动画公式来完成升维/降维变换动画:

rust
pub struct HilbertCurveApp {
+    // 当前曲线与目标曲线的顶点缓冲区
+    vertex_buffers: Vec<wgpu::Buffer>,
+    // 当前曲线的顶点总数
+    curve_vertex_count: usize,
+    // 当前动画帧的索引,用于设置缓冲区的动态偏移
+    animate_index: u32,
+    // 每一个动画阶段的总帧数
+    draw_count: u32,
+    // 目标曲线维度
+    curve_dimention: u32,
+    // 是否为升维动画
+    is_animation_up: bool,
+}

创建两个 ping-pong 顶点缓冲区,它们的大小一样:

rust
let mut vertex_buffers: Vec<wgpu::Buffer> = Vec::with_capacity(2);
+for _ in 0..2 {
+    let buf = app.device.create_buffer(&wgpu::BufferDescriptor {
+        size,
+        usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
+        label: None,
+        mapped_at_creation: false,
+    });
+    vertex_buffers.push(buf);
+}

render() 函数中基于动画迭代情况填充/更新顶点缓冲区:

rust
let mut target = HilbertCurve::new(self.curve_dimention);
+let start = if self.is_animation_up {
+    let mut start = HilbertCurve::new(self.curve_dimention - 1);
+    // 把顶点数翻 4 倍来对应目标维度曲线
+    start.four_times_vertices();
+    start
+} else {
+    target.four_times_vertices();
+    HilbertCurve::new(self.curve_dimention + 1)
+};
+// 更新顶点数
+self.curve_vertex_count = target.vertices.len();
+// 填充顶点 buffer
+for (buf, curve) in self.vertex_buffers.iter().zip(vec![start, target].iter()) {
+    self.app
+        .queue
+        .write_buffer(buf, 0, bytemuck::cast_slice(&curve.vertices));
+}

着色器中完成顶点位置的插值计算:

wgsl
struct HilbertUniform {
+    // 接近目标的比例
+    near_target_ratio: f32,
+};
+@group(0) @binding(0) var<uniform> mvp_mat: MVPMatUniform;
+@group(1) @binding(0) var<uniform> hilbert: HilbertUniform;
 
-@vertex
-fn vs_main(@location(0) pos: vec3f, @location(1) target_pos: vec3f) -> @builtin(position) vec4f {
-   let new_pos = pos + (target_pos - pos) * hilbert.near_target_ratio;
-   return mvp_mat.mvp * vec4<f32>(new_pos, 1.0);
-}

查看完整源码

- +@vertex +fn vs_main(@location(0) pos: vec3f, @location(1) target_pos: vec3f) -> @builtin(position) vec4f { + let new_pos = pos + (target_pos - pos) * hilbert.near_target_ratio; + return mvp_mat.mvp * vec4<f32>(new_pos, 1.0); +}

查看完整源码

+ \ No newline at end of file diff --git a/res/chrome.png b/res/chrome.png deleted file mode 100644 index 1245a406b..000000000 Binary files a/res/chrome.png and /dev/null differ diff --git a/res/edge.png b/res/edge.png deleted file mode 100644 index 02473fc9b..000000000 Binary files a/res/edge.png and /dev/null differ diff --git a/res/safari.png b/res/safari.png new file mode 100644 index 000000000..7756c62d7 Binary files /dev/null and b/res/safari.png differ diff --git a/showcase/alignment.html b/showcase/alignment.html index 8d95dc782..755913884 100644 --- a/showcase/alignment.html +++ b/showcase/alignment.html @@ -5,39 +5,41 @@ Memory Layout in WGSL | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

Memory Layout in WGSL

This page is currently being reworked. I want to understand the topics a bit better, but as 0.12 is out I want to release what I have for now.

Alignment of vertex and index buffers

Vertex buffers require defining a VertexBufferLayout, so the memory alignment is whatever you tell WebGPU it should be. This can be really convenient for keeping down memory usage on the GPU.

The Index Buffer uses the alignment of whatever primitive type you specify via the IndexFormat you pass into RenderEncoder::set_index_buffer().

Alignment of Uniform and Storage buffers

GPUs are designed to process thousands of pixels in parallel. In order to achieve this, some sacrifices had to be made. Graphics hardware likes to have all the bytes you intend on processing aligned by powers of 2. The exact specifics of why this is are beyond my level of knowledge, but it's important to know so that you can troubleshoot why your shaders aren't working.

Let's take a look at the following table:


TypeAlignment in BytesSize in Bytes
scalar (i32, u32, f32)44
vec2<T>88
vec3<T>1612
vec4<T>1616

You can see for vec3 the alignment is the next power of 2 from the size, 16. This can catch beginners (and even veterans) off guard as it's not the most intuitive. This becomes especially important when we start laying out structs. Take the light struct from the lighting tutorial:

You can see the full table of the alignments in section 4.3.7.1 of the WGSL spec

rust
struct Light {
-    position: vec3f,
-    color: vec3f,
-}

So what's the alignment of this struct? Your first guess would be that it's the sum of the alignments of the individual fields. That might make sense if we were in Rust-land, but in shader-land, it's a little more involved. The alignment for a given struct is given by the following equation:

// S is the struct in question
-// M is a member of the struct
-AlignOf(S) = max(AlignOfMember(S, M1), ... , AlignOfMember(S, Mn))

Basically, the alignment of the struct is the maximum of the alignments of the members of the struct. This means that:

AlignOf(Light)
-    = max(AlignOfMember(Light, position), AlignOfMember(Light, color))
-    = max(16, 16)
-    = 16

This is why the LightUniform has those padding fields. WGPU won't accept it if the data is not aligned correctly.

How to deal with alignment issues

In general, 16 is the max alignment you'll see. In that case, you might think that we should be able to do something like the following:

rust
#[repr(C, align(16))]
-#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
-struct LightUniform {
-    position: [f32; 3],
-    color: [f32; 3],
-}

But this won't compile. The bytemuck crate doesn't work with structs with implicit padding bytes. Rust can't guarantee that the memory between the fields has been initialized properly. This gave me an error when I tried it:

error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
-   --> code/intermediate/tutorial10-lighting/src/main.rs:246:8
-    |
-246 | struct LightUniform {
-    |        ^^^^^^^^^^^^
-    |
-    = note: source type: `LightUniform` (256 bits)
-    = note: target type: `_::{closure#0}::TypeWithoutPadding` (192 bits)

Additional resources

If you're looking for more information check out the write-up by @teoxoy.

- +
Skip to content

Memory Layout in WGSL

This page is currently being reworked. I want to understand the topics a bit better, but as 0.12 is out I want to release what I have for now.

Alignment of vertex and index buffers

Vertex buffers require defining a VertexBufferLayout, so the memory alignment is whatever you tell WebGPU it should be. This can be really convenient for keeping down memory usage on the GPU.

The Index Buffer uses the alignment of whatever primitive type you specify via the IndexFormat you pass into RenderEncoder::set_index_buffer().

Alignment of Uniform and Storage buffers

GPUs are designed to process thousands of pixels in parallel. In order to achieve this, some sacrifices had to be made. Graphics hardware likes to have all the bytes you intend on processing aligned by powers of 2. The exact specifics of why this is are beyond my level of knowledge, but it's important to know so that you can troubleshoot why your shaders aren't working.

Let's take a look at the following table:


TypeAlignment in BytesSize in Bytes
scalar (i32, u32, f32)44
vec2<T>88
vec3<T>1612
vec4<T>1616

You can see for vec3 the alignment is the next power of 2 from the size, 16. This can catch beginners (and even veterans) off guard as it's not the most intuitive. This becomes especially important when we start laying out structs. Take the light struct from the lighting tutorial:

You can see the full table of the alignments in section 4.3.7.1 of the WGSL spec

rust
struct Light {
+    position: vec3f,
+    color: vec3f,
+}

So what's the alignment of this struct? Your first guess would be that it's the sum of the alignments of the individual fields. That might make sense if we were in Rust-land, but in shader-land, it's a little more involved. The alignment for a given struct is given by the following equation:

// S is the struct in question
+// M is a member of the struct
+AlignOf(S) = max(AlignOfMember(S, M1), ... , AlignOfMember(S, Mn))

Basically, the alignment of the struct is the maximum of the alignments of the members of the struct. This means that:

AlignOf(Light)
+    = max(AlignOfMember(Light, position), AlignOfMember(Light, color))
+    = max(16, 16)
+    = 16

This is why the LightUniform has those padding fields. WGPU won't accept it if the data is not aligned correctly.

How to deal with alignment issues

In general, 16 is the max alignment you'll see. In that case, you might think that we should be able to do something like the following:

rust
#[repr(C, align(16))]
+#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
+struct LightUniform {
+    position: [f32; 3],
+    color: [f32; 3],
+}

But this won't compile. The bytemuck crate doesn't work with structs with implicit padding bytes. Rust can't guarantee that the memory between the fields has been initialized properly. This gave me an error when I tried it:

error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
+   --> code/intermediate/tutorial10-lighting/src/main.rs:246:8
+    |
+246 | struct LightUniform {
+    |        ^^^^^^^^^^^^
+    |
+    = note: source type: `LightUniform` (256 bits)
+    = note: target type: `_::{closure#0}::TypeWithoutPadding` (192 bits)

Additional resources

If you're looking for more information check out the write-up by @teoxoy.

+ \ No newline at end of file diff --git a/showcase/compute/index.html b/showcase/compute/index.html index dda563085..b4b3c75f0 100644 --- a/showcase/compute/index.html +++ b/showcase/compute/index.html @@ -5,113 +5,115 @@ Compute Example: Tangents and Bitangents | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

Compute Example: Tangents and Bitangents

This proved more difficult than I anticipated. The first problem I encountered was some vertex data corruption due to the shader reading my vertex data incorrectly. I was using the ModelVertex struct I used in the normal mapping tutorial.

rust
#[repr(C)]
-#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
-pub struct ModelVertex {
-    position: [f32; 3],
-    tex_coords: [f32; 2],
-    normal: [f32; 3],
-    tangent: [f32; 3],
-    bitangent: [f32; 3],
-}

This structure works perfectly fine when used as a vertex buffer. Using it as a storage buffer proved less convenient. My previous code used a GLSL struct similar to my ModelVertex.

shader
struct ModelVertex {
-    vec3 position;
-    vec2 tex_coords;
-    vec3 normal;
-    vec3 tangent;
-    vec3 bitangent;
-};

At first glance, this seems just fine, but OpenGL experts would likely see a problem with the structure. Our fields aren't aligned properly to support the std430 alignment that storage buffers require. I won't get into detail but you can check out the alignment showcase if you want to know more. To summarize, the vec2 for the tex_coords was messing up the byte alignment, corrupting the vertex data resulting in the following:

./corruption.png

I could have fixed this by adding a padding field after tex_coords on the Rust side, but that would require modifying the VertexBufferLayout. I ended up solving this problem by using the components of the vectors directly which resulted in a struct like this:

shader
struct ModelVertex {
-    float x; float y; float z;
-    float uv; float uw;
-    float nx; float ny; float nz;
-    float tx; float ty; float tz;
-    float bx; float by; float bz;
-};

Since std430 will use the alignment of the largest element of the struct, using all floats means the struct will be aligned to 4 bytes. This is alignment matches what ModelVertex uses in Rust. This was kind of a pain to work with, but it fixed the corruption issue.

The second problem required me to rethink how I was computing the tangent and bitangent. The previous algorithm I was using only computed the tangent and bitangent for each triangle and set all the vertices in that triangle to use the same tangent and bitangent. While this is fine in a single-threaded context, the code breaks down when trying to compute the triangles in parallel. The reason is that multiple triangles can share the same vertices. This means that when we go to save the resulting tangents, we inevitably end up trying to write to the same vertex from multiple different threads which is a big no no. You can see the issue with this method below:

./black_triangles.png

Those black triangles were the result of multiple GPU threads trying to modify the same vertices. Looking at the data in Render Doc I could see that the tangents and bitangents were garbage numbers such as NaN.

./render_doc_output.png

While on the CPU we could introduce a synchronization primitive such as a Mutex to fix this issue, AFAIK there isn't really such a thing on the GPU. Instead, I decided to swap my code to work with each vertex individually. There are some hurdles with that, but those will be easier to explain in code. Let's start with the main function.

shader
void main() {
-    uint vertexIndex = gl_GlobalInvocationID.x;
-    ModelVertex result = calcTangentBitangent(vertexIndex);
-    dstVertices[vertexIndex] = result;
-}

We use the gl_GlobalInvocationID.x to get the index of the vertex we want to compute the tangents for. I opted to put the actual calculation into its own method. Let's take a look at that.

shader
ModelVertex calcTangentBitangent(uint vertexIndex) {
-    ModelVertex v = srcVertices[vertexIndex];
+    
Skip to content

Compute Example: Tangents and Bitangents

This proved more difficult than I anticipated. The first problem I encountered was some vertex data corruption due to the shader reading my vertex data incorrectly. I was using the ModelVertex struct I used in the normal mapping tutorial.

rust
#[repr(C)]
+#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
+pub struct ModelVertex {
+    position: [f32; 3],
+    tex_coords: [f32; 2],
+    normal: [f32; 3],
+    tangent: [f32; 3],
+    bitangent: [f32; 3],
+}

This structure works perfectly fine when used as a vertex buffer. Using it as a storage buffer proved less convenient. My previous code used a GLSL struct similar to my ModelVertex.

shader
struct ModelVertex {
+    vec3 position;
+    vec2 tex_coords;
+    vec3 normal;
+    vec3 tangent;
+    vec3 bitangent;
+};

At first glance, this seems just fine, but OpenGL experts would likely see a problem with the structure. Our fields aren't aligned properly to support the std430 alignment that storage buffers require. I won't get into detail but you can check out the alignment showcase if you want to know more. To summarize, the vec2 for the tex_coords was messing up the byte alignment, corrupting the vertex data resulting in the following:

./corruption.png

I could have fixed this by adding a padding field after tex_coords on the Rust side, but that would require modifying the VertexBufferLayout. I ended up solving this problem by using the components of the vectors directly which resulted in a struct like this:

shader
struct ModelVertex {
+    float x; float y; float z;
+    float uv; float uw;
+    float nx; float ny; float nz;
+    float tx; float ty; float tz;
+    float bx; float by; float bz;
+};

Since std430 will use the alignment of the largest element of the struct, using all floats means the struct will be aligned to 4 bytes. This is alignment matches what ModelVertex uses in Rust. This was kind of a pain to work with, but it fixed the corruption issue.

The second problem required me to rethink how I was computing the tangent and bitangent. The previous algorithm I was using only computed the tangent and bitangent for each triangle and set all the vertices in that triangle to use the same tangent and bitangent. While this is fine in a single-threaded context, the code breaks down when trying to compute the triangles in parallel. The reason is that multiple triangles can share the same vertices. This means that when we go to save the resulting tangents, we inevitably end up trying to write to the same vertex from multiple different threads which is a big no no. You can see the issue with this method below:

./black_triangles.png

Those black triangles were the result of multiple GPU threads trying to modify the same vertices. Looking at the data in Render Doc I could see that the tangents and bitangents were garbage numbers such as NaN.

./render_doc_output.png

While on the CPU we could introduce a synchronization primitive such as a Mutex to fix this issue, AFAIK there isn't really such a thing on the GPU. Instead, I decided to swap my code to work with each vertex individually. There are some hurdles with that, but those will be easier to explain in code. Let's start with the main function.

shader
void main() {
+    uint vertexIndex = gl_GlobalInvocationID.x;
+    ModelVertex result = calcTangentBitangent(vertexIndex);
+    dstVertices[vertexIndex] = result;
+}

We use the gl_GlobalInvocationID.x to get the index of the vertex we want to compute the tangents for. I opted to put the actual calculation into its own method. Let's take a look at that.

shader
ModelVertex calcTangentBitangent(uint vertexIndex) {
+    ModelVertex v = srcVertices[vertexIndex];
 
-    vec3 tangent = vec3(0);
-    vec3 bitangent = vec3(0);
-    uint trianglesIncluded = 0;
+    vec3 tangent = vec3(0);
+    vec3 bitangent = vec3(0);
+    uint trianglesIncluded = 0;
 
-    // Find the triangles that use v
-    //  * Loop over every triangle (i + 3)
-    for (uint i = 0; i < numIndices; i += 3) {
-        uint index0 = indices[i];
-        uint index1 = indices[i+1];
-        uint index2 = indices[i+2];
+    // Find the triangles that use v
+    //  * Loop over every triangle (i + 3)
+    for (uint i = 0; i < numIndices; i += 3) {
+        uint index0 = indices[i];
+        uint index1 = indices[i+1];
+        uint index2 = indices[i+2];
 
-        // Only perform the calculation if one of the indices
-        // matches our vertexIndex
-        if (index0 == vertexIndex || index1 == vertexIndex || index2 == vertexIndex) {
-            ModelVertex v0 = srcVertices[index0];
-            ModelVertex v1 = srcVertices[index1];
-            ModelVertex v2 = srcVertices[index2];
+        // Only perform the calculation if one of the indices
+        // matches our vertexIndex
+        if (index0 == vertexIndex || index1 == vertexIndex || index2 == vertexIndex) {
+            ModelVertex v0 = srcVertices[index0];
+            ModelVertex v1 = srcVertices[index1];
+            ModelVertex v2 = srcVertices[index2];
 
-            vec3 pos0 = getPos(v0);
-            vec3 pos1 = getPos(v1);
-            vec3 pos2 = getPos(v2);
+            vec3 pos0 = getPos(v0);
+            vec3 pos1 = getPos(v1);
+            vec3 pos2 = getPos(v2);
 
-            vec2 uv0 = getUV(v0);
-            vec2 uv1 = getUV(v1);
-            vec2 uv2 = getUV(v2);
+            vec2 uv0 = getUV(v0);
+            vec2 uv1 = getUV(v1);
+            vec2 uv2 = getUV(v2);
 
-            vec3 delta_pos1 = pos1 - pos0;
-            vec3 delta_pos2 = pos2 - pos0;
+            vec3 delta_pos1 = pos1 - pos0;
+            vec3 delta_pos2 = pos2 - pos0;
 
-            vec2 delta_uv1 = uv1 - uv0;
-            vec2 delta_uv2 = uv2 - uv0;
+            vec2 delta_uv1 = uv1 - uv0;
+            vec2 delta_uv2 = uv2 - uv0;
 
-            float r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
-            tangent += (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
-            bitangent += (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r; 
-            trianglesIncluded += 1;
-        }
-        
-    }
+            float r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
+            tangent += (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
+            bitangent += (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r; 
+            trianglesIncluded += 1;
+        }
+        
+    }
 
-    // Average the tangent and bitangents
-    if (trianglesIncluded > 0) {
-        tangent /= trianglesIncluded;
-        bitangent /= trianglesIncluded;
-        tangent = normalize(tangent);
-        bitangent = normalize(bitangent);
-    }
+    // Average the tangent and bitangents
+    if (trianglesIncluded > 0) {
+        tangent /= trianglesIncluded;
+        bitangent /= trianglesIncluded;
+        tangent = normalize(tangent);
+        bitangent = normalize(bitangent);
+    }
 
-    // Save the results
-    v.tx = tangent.x;
-    v.ty = tangent.y;
-    v.tz = tangent.z;
-    v.bx = bitangent.x;
-    v.by = bitangent.y;
-    v.bz = bitangent.z;
+    // Save the results
+    v.tx = tangent.x;
+    v.ty = tangent.y;
+    v.tz = tangent.z;
+    v.bx = bitangent.x;
+    v.by = bitangent.y;
+    v.bz = bitangent.z;
 
-    return v;
-}

Possible Improvements

Looping over every triangle for every vertex is likely raising some red flags for some of you. In a single-threaded context, this algorithm would end up being O(N*M). As we are utilizing the high number of threads available to our GPU, this is less of an issue, but it still means our GPU is burning more cycles than it needs to.

One way I came up with to possibly improve performance is to store the index of each triangle in a hash map like structure with the vertex index as keys. Here's some pseudo code:

rust
for t in 0..indices.len() / 3 {
-    triangle_map[indices[t * 3]].push(t);
-    triangle_map.push((indices[t * 3 + 1], t);
-    triangle_map.push((indices[t * 3 + 2], t);
-}

We'd then need to flatten this structure to pass it to the GPU. We'd also need a second array to index the first.

rust
for (i, (_v, t_list)) in triangle_map.iter().enumerate() {
-    triangle_map_indices.push(TriangleMapIndex { 
-        start: i,
-        len: t_list.len(),
-    });
-    flat_triangle_map.extend(t_list);
-}

I ultimately decided against this method as it was more complicated, and I haven't had time to benchmark it to see if it's faster than the simple method.

Results

The tangents and bitangents are now getting calculated correctly and on the GPU!

./results.png

- + return v; +}

Possible Improvements

Looping over every triangle for every vertex is likely raising some red flags for some of you. In a single-threaded context, this algorithm would end up being O(N*M). As we are utilizing the high number of threads available to our GPU, this is less of an issue, but it still means our GPU is burning more cycles than it needs to.

One way I came up with to possibly improve performance is to store the index of each triangle in a hash map like structure with the vertex index as keys. Here's some pseudo code:

rust
for t in 0..indices.len() / 3 {
+    triangle_map[indices[t * 3]].push(t);
+    triangle_map.push((indices[t * 3 + 1], t);
+    triangle_map.push((indices[t * 3 + 2], t);
+}

We'd then need to flatten this structure to pass it to the GPU. We'd also need a second array to index the first.

rust
for (i, (_v, t_list)) in triangle_map.iter().enumerate() {
+    triangle_map_indices.push(TriangleMapIndex { 
+        start: i,
+        len: t_list.len(),
+    });
+    flat_triangle_map.extend(t_list);
+}

I ultimately decided against this method as it was more complicated, and I haven't had time to benchmark it to see if it's faster than the simple method.

Results

The tangents and bitangents are now getting calculated correctly and on the GPU!

./results.png

+ \ No newline at end of file diff --git a/showcase/gifs/index.html b/showcase/gifs/index.html index 4ad554ea8..0e89c537c 100644 --- a/showcase/gifs/index.html +++ b/showcase/gifs/index.html @@ -5,142 +5,144 @@ 生成 GIF 动图 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

生成 GIF 动图

假如你想要展示一个自己实现的,漂亮的 WebGPU 模拟动画,当然可以录制一个视频,但如果只是想在微博或朋友圈以九宫格来展示呢?

这,就是 GIF 的用武之地。

另外,GIF 的发音是 GHIF,而不是 JIF,因为 JIF 不仅是花生酱,它也是一种不同的图像格式

如何制作 GIF?

我们使用 gif 包创建一个函数来对渲染的图像进行编码:

rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
-    use gif::{Frame, Encoder, Repeat, SetParameter};
+    
Skip to content

生成 GIF 动图

假如你想要展示一个自己实现的,漂亮的 WebGPU 模拟动画,当然可以录制一个视频,但如果只是想在微博或朋友圈以九宫格来展示呢?

这,就是 GIF 的用武之地。

另外,GIF 的发音是 GHIF,而不是 JIF,因为 JIF 不仅是花生酱,它也是一种不同的图像格式

如何制作 GIF?

我们使用 gif 包创建一个函数来对渲染的图像进行编码:

rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
+    use gif::{Frame, Encoder, Repeat, SetParameter};
 
-    let mut image = std::fs::File::create(path)?;
-    let mut encoder = Encoder::new(&mut image, size, size, &[])?;
-    encoder.set(Repeat::Infinite)?;
+    let mut image = std::fs::File::create(path)?;
+    let mut encoder = Encoder::new(&mut image, size, size, &[])?;
+    encoder.set(Repeat::Infinite)?;
 
-    for mut frame in frames {
-        encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
-    }
+    for mut frame in frames {
+        encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
+    }
 
-    Ok(())
-}

上面的函数所需要的参数是 GIF 的帧数,它应该运行多快,以及 GIF 的大小。

如何生成帧数据?

如果看过离屏渲染案例,你就知道我们可以直接渲染到一个纹理。我们将创建一个用于渲染的纹理和一个用于复制纹理的纹素数据的缓冲区

rust
// 创建一个用于渲染的纹理
-let texture_size = 256u32;
-let rt_desc = wgpu::TextureDescriptor {
-    size: wgpu::Extent3d {
-        width: texture_size,
-        height: texture_size,
-        depth_or_array_layers: 1,
-    },
-    mip_level_count: 1,
-    sample_count: 1,
-    dimension: wgpu::TextureDimension::D2,
-    format: wgpu::TextureFormat::Rgba8UnormSrgb,
-    usage: wgpu::TextureUsages::COPY_SRC
-        | wgpu::TextureUsages::RENDER_ATTACHMENT,
-    label: None,
-    view_formats: &[],
-};
-let render_target = framework::Texture::from_descriptor(&device, rt_desc);
+    Ok(())
+}

上面的函数所需要的参数是 GIF 的帧数,它应该运行多快,以及 GIF 的大小。

如何生成帧数据?

如果看过离屏渲染案例,你就知道我们可以直接渲染到一个纹理。我们将创建一个用于渲染的纹理和一个用于复制纹理的纹素数据的缓冲区

rust
// 创建一个用于渲染的纹理
+let texture_size = 256u32;
+let rt_desc = wgpu::TextureDescriptor {
+    size: wgpu::Extent3d {
+        width: texture_size,
+        height: texture_size,
+        depth_or_array_layers: 1,
+    },
+    mip_level_count: 1,
+    sample_count: 1,
+    dimension: wgpu::TextureDimension::D2,
+    format: wgpu::TextureFormat::Rgba8UnormSrgb,
+    usage: wgpu::TextureUsages::COPY_SRC
+        | wgpu::TextureUsages::RENDER_ATTACHMENT,
+    label: None,
+    view_formats: &[],
+};
+let render_target = framework::Texture::from_descriptor(&device, rt_desc);
 
-// wgpu 需要使用 wgpu::COPY_BYTES_PER_ROW_ALIGNMENT 对齐纹理 -> 缓冲区的复制
-// 因此,我们需要同时保存 padded_bytes_per_row 和 unpadded_bytes_per_row
-let pixel_size = mem::size_of::<[u8;4]>() as u32;
-let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
-let unpadded_bytes_per_row = pixel_size * texture_size;
-let padding = (align - unpadded_bytes_per_row % align) % align;
-let padded_bytes_per_row = unpadded_bytes_per_row + padding;
+// wgpu 需要使用 wgpu::COPY_BYTES_PER_ROW_ALIGNMENT 对齐纹理 -> 缓冲区的复制
+// 因此,我们需要同时保存 padded_bytes_per_row 和 unpadded_bytes_per_row
+let pixel_size = mem::size_of::<[u8;4]>() as u32;
+let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
+let unpadded_bytes_per_row = pixel_size * texture_size;
+let padding = (align - unpadded_bytes_per_row % align) % align;
+let padded_bytes_per_row = unpadded_bytes_per_row + padding;
 
-// 创建一个用于复制纹素数据的缓冲区
-let buffer_size = (padded_bytes_per_row * texture_size) as wgpu::BufferAddress;
-let buffer_desc = wgpu::BufferDescriptor {
-    size: buffer_size,
-    usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
-    label: Some("Output Buffer"),
-    mapped_at_creation: false,
-};
-let output_buffer = device.create_buffer(&buffer_desc);

现在,我们可以渲染一帧了,然后把这个帧缓冲区数据(也就是我们上面创建的纹理的纹素数据)复制到一个 Vec<u8> 数组。

rust
let mut frames = Vec::new();
+// 创建一个用于复制纹素数据的缓冲区
+let buffer_size = (padded_bytes_per_row * texture_size) as wgpu::BufferAddress;
+let buffer_desc = wgpu::BufferDescriptor {
+    size: buffer_size,
+    usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
+    label: Some("Output Buffer"),
+    mapped_at_creation: false,
+};
+let output_buffer = device.create_buffer(&buffer_desc);

现在,我们可以渲染一帧了,然后把这个帧缓冲区数据(也就是我们上面创建的纹理的纹素数据)复制到一个 Vec<u8> 数组。

rust
let mut frames = Vec::new();
 
-for c in &colors {
-    let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-        label: None,
-    });
+for c in &colors {
+    let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+        label: None,
+    });
 
-    let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
-        label: Some("GIF Pass"),
-        color_attachments: &[
-            wgpu::RenderPassColorAttachment {
-                view: &render_target.view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(
-                        wgpu::Color {
-                            r: c[0],
-                            g: c[1],
-                            b: c[2],
-                            a: 1.0,
-                        }
-                    ),
-                    store: wgpu::StoreOp::Store
-                },
-            }
-        ],
-        ..Default::default()
-    });
+    let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
+        label: Some("GIF Pass"),
+        color_attachments: &[
+            wgpu::RenderPassColorAttachment {
+                view: &render_target.view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(
+                        wgpu::Color {
+                            r: c[0],
+                            g: c[1],
+                            b: c[2],
+                            a: 1.0,
+                        }
+                    ),
+                    store: wgpu::StoreOp::Store
+                },
+            }
+        ],
+        ..Default::default()
+    });
 
-    rpass.set_pipeline(&render_pipeline);
-    rpass.draw(0..3, 0..1);
+    rpass.set_pipeline(&render_pipeline);
+    rpass.draw(0..3, 0..1);
 
-    drop(rpass);
+    drop(rpass);
 
-    encoder.copy_texture_to_buffer(
-        wgpu::ImageCopyTexture {
-            texture: &render_target.texture,
-            mip_level: 0,
-            origin: wgpu::Origin3d::ZERO,
-        },
-        wgpu::ImageCopyBuffer {
-            buffer: &output_buffer,
-            layout: wgpu::ImageDataLayout {
-                offset: 0,
-                bytes_per_row: padded_bytes_per_row,
-                rows_per_image: texture_size,
-            }
-        },
-        render_target.desc.size
-    );
+    encoder.copy_texture_to_buffer(
+        wgpu::ImageCopyTexture {
+            texture: &render_target.texture,
+            mip_level: 0,
+            origin: wgpu::Origin3d::ZERO,
+        },
+        wgpu::ImageCopyBuffer {
+            buffer: &output_buffer,
+            layout: wgpu::ImageDataLayout {
+                offset: 0,
+                bytes_per_row: padded_bytes_per_row,
+                rows_per_image: texture_size,
+            }
+        },
+        render_target.desc.size
+    );
 
-    queue.submit(std::iter::once(encoder.finish()));
+    queue.submit(std::iter::once(encoder.finish()));
 
-    // 创建一个缓冲区数据异步映射
-    let buffer_slice = output_buffer.slice(..);
-    let request = buffer_slice.map_async(wgpu::MapMode::Read);
-    // 等待 GPU 完成上面的任务
-    device.poll(wgpu::Maintain::Wait);
-    let result = request.await;
+    // 创建一个缓冲区数据异步映射
+    let buffer_slice = output_buffer.slice(..);
+    let request = buffer_slice.map_async(wgpu::MapMode::Read);
+    // 等待 GPU 完成上面的任务
+    device.poll(wgpu::Maintain::Wait);
+    let result = request.await;
 
-    match result {
-        Ok(()) => {
-            let padded_data = buffer_slice.get_mapped_range();
-            let data = padded_data
-                .chunks(padded_bytes_per_row as _)
-                .map(|chunk| { &chunk[..unpadded_bytes_per_row as _]})
-                .flatten()
-                .map(|x| { *x })
-                .collect::<Vec<_>>();
-            drop(padded_data);
-            output_buffer.unmap();
-            frames.push(data);
-        }
-        _ => { eprintln!("Something went wrong") }
-    }
+    match result {
+        Ok(()) => {
+            let padded_data = buffer_slice.get_mapped_range();
+            let data = padded_data
+                .chunks(padded_bytes_per_row as _)
+                .map(|chunk| { &chunk[..unpadded_bytes_per_row as _]})
+                .flatten()
+                .map(|x| { *x })
+                .collect::<Vec<_>>();
+            drop(padded_data);
+            output_buffer.unmap();
+            frames.push(data);
+        }
+        _ => { eprintln!("Something went wrong") }
+    }
 
-}

完成后,就可以将我们的帧数据传递给 save_gif() 函数了:

rust
save_gif("output.gif", &mut frames, 1, texture_size as u16).unwrap();

我们还可以使用纹理数组来做优化,并一次发送所有绘制命令。 但上面的简单程序就是生成 GIF 动图的全部要点了,运行示例代码将得到以下 GIF 图:

./output.gif

- +}

完成后,就可以将我们的帧数据传递给 save_gif() 函数了:

rust
save_gif("output.gif", &mut frames, 1, texture_size as u16).unwrap();

我们还可以使用纹理数组来做优化,并一次发送所有绘制命令。 但上面的简单程序就是生成 GIF 动图的全部要点了,运行示例代码将得到以下 GIF 图:

./output.gif

+ \ No newline at end of file diff --git a/showcase/pong/index.html b/showcase/pong/index.html index 37c5dfd5c..fa1f686bd 100644 --- a/showcase/pong/index.html +++ b/showcase/pong/index.html @@ -5,278 +5,280 @@ Pong | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

Pong

Practically the "Hello World!" of games. Pong has been remade thousands of times. I know Pong. You know Pong. We all know Pong. That being said, this time I wanted to put in a little more effort than most people do. This showcase has a basic menu system, sounds, and different game states.

The architecture is not the best as I prescribed to the "get things done" mentality. If I were to redo this project, I'd change a lot of things. Regardless, let's get into the postmortem.

The Architecture

I was messing around with separating state from the render code. It ended up similar to an Entity Component System model.

I had a State class with all of the objects in the scene. This included the ball and the paddles, as well as the text for the scores and even the menu. State also included a game_state field of type GameState.

rust
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
-pub enum GameState {
-    MainMenu,
-    Serving,
-    Playing,
-    GameOver,
-    Quiting,
-}

The State class didn't have any methods on it as I was taking a more data-oriented approach. Instead, I created a System trait and created multiple structs that implemented it.

rust
pub trait System {
-    #[allow(unused_variables)]
-    fn start(&mut self, state: &mut state::State) {}
-    fn update_state(
-        &self,
-        input: &input::Input,
-        state: &mut state::State,
-        events: &mut Vec<state::Event>,
-    );
-}

The systems would be in charge of controlling updating the different objects' states (position, visibility, etc), as well as updating the game_state field. I created all the systems on startup and used a match on game_state to determine which ones should be allowed to run (the visiblity_system always runs as it is always needed).

rust
visiblity_system.update_state(&input, &mut state, &mut events);
-match state.game_state {
-    state::GameState::MainMenu => {
-        menu_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::Serving {
-            serving_system.start(&mut state);
-        }
-    },
-    state::GameState::Serving => {
-        serving_system.update_state(&input, &mut state, &mut events);
-        play_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::Playing {
-            play_system.start(&mut state);
-        }
-    },
-    state::GameState::Playing => {
-        ball_system.update_state(&input, &mut state, &mut events);
-        play_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::Serving {
-            serving_system.start(&mut state);
-        } else if state.game_state == state::GameState::GameOver {
-            game_over_system.start(&mut state);
-        }
-    },
-    state::GameState::GameOver => {
-        game_over_system.update_state(&input, &mut state, &mut events);
-        if state.game_state == state::GameState::MainMenu {
-            menu_system.start(&mut state);
-        }
-    },
-    state::GameState::Quiting => {},
-}

It's definitely not the cleanest code, but it works.

I ended up having 6 systems in total.

  1. I added the VisibilitySystem near the end of development. Up to that point, all the systems had to set the visible field of the objects. That was a pain and cluttered the logic. Instead, I decided to create the VisiblitySystem to handle that.

  2. The MenuSystem handled controlling what text was focused, and what would happen when the user pressed the enter key. If the Play button was focused, pressing enter would change game_state to GameState::Serving which would start the game. The Quit button would shift to GameState::Quiting.

  3. The ServingSystem sets the ball's position to (0.0, 0.0), updates the score texts, and shifts into GameState::Playing after a timer.

  4. The PlaySystem controls the players. It allows them to move and keeps them from leaving the play space. This system runs on both GameState::Playing as well as GameState::Serving. I did this to allow the players to reposition themselves before the serve. The PlaySystem also will shift into GameState::GameOver when one of the players' scores is greater than 2.

  5. The BallSystem system controls the ball's movement as well as its bouncing of walls/players. It also updates the score and shifts to GameState::Serving when the ball goes off the side of the screen.

  6. The GameOver system updates the win_text and shifts to GameState::MainMenu after a delay.

I found the system approach quite nice to work with. My implementation wasn't the best, but I would like to work with it again. I might even implement my own ECS.

Input

The System trait, originally had a process_input method. This became a problem when I was implementing allowing players to move between serves. The players would get stuck when the game_state switched from Serving to Playing as the inputs were getting stuck. I only called process_input on systems that were currently in use. Changing that would be finicky, so I decided to move all the input code into its own struct.

rust
use winit::event::{VirtualKeyCode, ElementState};
+    
Skip to content

Pong

Practically the "Hello World!" of games. Pong has been remade thousands of times. I know Pong. You know Pong. We all know Pong. That being said, this time I wanted to put in a little more effort than most people do. This showcase has a basic menu system, sounds, and different game states.

The architecture is not the best as I prescribed to the "get things done" mentality. If I were to redo this project, I'd change a lot of things. Regardless, let's get into the postmortem.

The Architecture

I was messing around with separating state from the render code. It ended up similar to an Entity Component System model.

I had a State class with all of the objects in the scene. This included the ball and the paddles, as well as the text for the scores and even the menu. State also included a game_state field of type GameState.

rust
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum GameState {
+    MainMenu,
+    Serving,
+    Playing,
+    GameOver,
+    Quiting,
+}

The State class didn't have any methods on it as I was taking a more data-oriented approach. Instead, I created a System trait and created multiple structs that implemented it.

rust
pub trait System {
+    #[allow(unused_variables)]
+    fn start(&mut self, state: &mut state::State) {}
+    fn update_state(
+        &self,
+        input: &input::Input,
+        state: &mut state::State,
+        events: &mut Vec<state::Event>,
+    );
+}

The systems would be in charge of controlling updating the different objects' states (position, visibility, etc), as well as updating the game_state field. I created all the systems on startup and used a match on game_state to determine which ones should be allowed to run (the visiblity_system always runs as it is always needed).

rust
visiblity_system.update_state(&input, &mut state, &mut events);
+match state.game_state {
+    state::GameState::MainMenu => {
+        menu_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::Serving {
+            serving_system.start(&mut state);
+        }
+    },
+    state::GameState::Serving => {
+        serving_system.update_state(&input, &mut state, &mut events);
+        play_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::Playing {
+            play_system.start(&mut state);
+        }
+    },
+    state::GameState::Playing => {
+        ball_system.update_state(&input, &mut state, &mut events);
+        play_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::Serving {
+            serving_system.start(&mut state);
+        } else if state.game_state == state::GameState::GameOver {
+            game_over_system.start(&mut state);
+        }
+    },
+    state::GameState::GameOver => {
+        game_over_system.update_state(&input, &mut state, &mut events);
+        if state.game_state == state::GameState::MainMenu {
+            menu_system.start(&mut state);
+        }
+    },
+    state::GameState::Quiting => {},
+}

It's definitely not the cleanest code, but it works.

I ended up having 6 systems in total.

  1. I added the VisibilitySystem near the end of development. Up to that point, all the systems had to set the visible field of the objects. That was a pain and cluttered the logic. Instead, I decided to create the VisiblitySystem to handle that.

  2. The MenuSystem handled controlling what text was focused, and what would happen when the user pressed the enter key. If the Play button was focused, pressing enter would change game_state to GameState::Serving which would start the game. The Quit button would shift to GameState::Quiting.

  3. The ServingSystem sets the ball's position to (0.0, 0.0), updates the score texts, and shifts into GameState::Playing after a timer.

  4. The PlaySystem controls the players. It allows them to move and keeps them from leaving the play space. This system runs on both GameState::Playing as well as GameState::Serving. I did this to allow the players to reposition themselves before the serve. The PlaySystem also will shift into GameState::GameOver when one of the players' scores is greater than 2.

  5. The BallSystem system controls the ball's movement as well as its bouncing of walls/players. It also updates the score and shifts to GameState::Serving when the ball goes off the side of the screen.

  6. The GameOver system updates the win_text and shifts to GameState::MainMenu after a delay.

I found the system approach quite nice to work with. My implementation wasn't the best, but I would like to work with it again. I might even implement my own ECS.

Input

The System trait, originally had a process_input method. This became a problem when I was implementing allowing players to move between serves. The players would get stuck when the game_state switched from Serving to Playing as the inputs were getting stuck. I only called process_input on systems that were currently in use. Changing that would be finicky, so I decided to move all the input code into its own struct.

rust
use winit::event::{VirtualKeyCode, ElementState};
 
-#[derive(Debug, Default)]
-pub struct Input {
-    pub p1_up_pressed: bool,
-    pub p1_down_pressed: bool,
-    pub p2_up_pressed: bool,
-    pub p2_down_pressed: bool,
-    pub enter_pressed: bool,
-}
+#[derive(Debug, Default)]
+pub struct Input {
+    pub p1_up_pressed: bool,
+    pub p1_down_pressed: bool,
+    pub p2_up_pressed: bool,
+    pub p2_down_pressed: bool,
+    pub enter_pressed: bool,
+}
 
-impl Input {
-    pub fn new() -> Self {
-        Default::default()
-    }
+impl Input {
+    pub fn new() -> Self {
+        Default::default()
+    }
 
-    pub fn update(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
-        let pressed = state == ElementState::Pressed;
-        match key {
-            VirtualKeyCode::Up => {
-                self.p2_up_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::Down => {
-                self.p2_down_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::W => {
-                self.p1_up_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::S => {
-                self.p1_down_pressed = pressed;
-                true
-            }
-            VirtualKeyCode::Return => {
-                self.enter_pressed = pressed;
-                true
-            }
-            _ => false
-        }
-    }
+    pub fn update(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
+        let pressed = state == ElementState::Pressed;
+        match key {
+            VirtualKeyCode::Up => {
+                self.p2_up_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::Down => {
+                self.p2_down_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::W => {
+                self.p1_up_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::S => {
+                self.p1_down_pressed = pressed;
+                true
+            }
+            VirtualKeyCode::Return => {
+                self.enter_pressed = pressed;
+                true
+            }
+            _ => false
+        }
+    }
 
-    pub fn ui_up_pressed(&self) -> bool {
-        self.p1_up_pressed || self.p2_up_pressed
-    }
+    pub fn ui_up_pressed(&self) -> bool {
+        self.p1_up_pressed || self.p2_up_pressed
+    }
 
-    pub fn ui_down_pressed(&self) -> bool {
-        self.p1_down_pressed || self.p2_down_pressed
-    }
-}

This works really well. I simply pass this struct into the update_state method.

Render

I used wgpu_glyph for the text and white quads for the ball and paddles. There's not much to say here, it's Pong after all.

I did mess around with batching, however. It was totally overkill for this project, but it was a good learning experience. Here's the code if you're interested.

rust
pub struct QuadBufferBuilder {
-    vertex_data: Vec<Vertex>,
-    index_data: Vec<u32>,
-    current_quad: u32,
-}
+    pub fn ui_down_pressed(&self) -> bool {
+        self.p1_down_pressed || self.p2_down_pressed
+    }
+}

This works really well. I simply pass this struct into the update_state method.

Render

I used wgpu_glyph for the text and white quads for the ball and paddles. There's not much to say here, it's Pong after all.

I did mess around with batching, however. It was totally overkill for this project, but it was a good learning experience. Here's the code if you're interested.

rust
pub struct QuadBufferBuilder {
+    vertex_data: Vec<Vertex>,
+    index_data: Vec<u32>,
+    current_quad: u32,
+}
 
-impl QuadBufferBuilder {
-    pub fn new() -> Self {
-        Self {
-            vertex_data: Vec::new(),
-            index_data: Vec::new(),
-            current_quad: 0,
-        }
-    }
+impl QuadBufferBuilder {
+    pub fn new() -> Self {
+        Self {
+            vertex_data: Vec::new(),
+            index_data: Vec::new(),
+            current_quad: 0,
+        }
+    }
 
-    pub fn push_ball(self, ball: &state::Ball) -> Self {
-        if ball.visible {
-            let min_x = ball.position.x - ball.radius;
-            let min_y = ball.position.y - ball.radius;
-            let max_x = ball.position.x + ball.radius;
-            let max_y = ball.position.y + ball.radius;
+    pub fn push_ball(self, ball: &state::Ball) -> Self {
+        if ball.visible {
+            let min_x = ball.position.x - ball.radius;
+            let min_y = ball.position.y - ball.radius;
+            let max_x = ball.position.x + ball.radius;
+            let max_y = ball.position.y + ball.radius;
 
-            self.push_quad(min_x, min_y, max_x, max_y)
-        } else {
-            self
-        }
-    }
+            self.push_quad(min_x, min_y, max_x, max_y)
+        } else {
+            self
+        }
+    }
 
-    pub fn push_player(self, player: &state::Player) -> Self {
-        if player.visible {
-            self.push_quad(
-                player.position.x - player.size.x * 0.5,
-                player.position.y - player.size.y * 0.5,
-                player.position.x + player.size.x * 0.5,
-                player.position.y + player.size.y * 0.5,
-            )
-        } else {
-            self
-        }
-    }
+    pub fn push_player(self, player: &state::Player) -> Self {
+        if player.visible {
+            self.push_quad(
+                player.position.x - player.size.x * 0.5,
+                player.position.y - player.size.y * 0.5,
+                player.position.x + player.size.x * 0.5,
+                player.position.y + player.size.y * 0.5,
+            )
+        } else {
+            self
+        }
+    }
 
-    pub fn push_quad(mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) -> Self {
-        self.vertex_data.extend(&[
-            Vertex {
-                position: (min_x, min_y).into(),
-            },
-            Vertex {
-                position: (max_x, min_y).into(),
-            },
-            Vertex {
-                position: (max_x, max_y).into(),
-            },
-            Vertex {
-                position: (min_x, max_y).into(),
-            },
-        ]);
-        self.index_data.extend(&[
-            self.current_quad * 4 + 0,
-            self.current_quad * 4 + 1,
-            self.current_quad * 4 + 2,
-            self.current_quad * 4 + 0,
-            self.current_quad * 4 + 2,
-            self.current_quad * 4 + 3,
-        ]);
-        self.current_quad += 1;
-        self
-    }
+    pub fn push_quad(mut self, min_x: f32, min_y: f32, max_x: f32, max_y: f32) -> Self {
+        self.vertex_data.extend(&[
+            Vertex {
+                position: (min_x, min_y).into(),
+            },
+            Vertex {
+                position: (max_x, min_y).into(),
+            },
+            Vertex {
+                position: (max_x, max_y).into(),
+            },
+            Vertex {
+                position: (min_x, max_y).into(),
+            },
+        ]);
+        self.index_data.extend(&[
+            self.current_quad * 4 + 0,
+            self.current_quad * 4 + 1,
+            self.current_quad * 4 + 2,
+            self.current_quad * 4 + 0,
+            self.current_quad * 4 + 2,
+            self.current_quad * 4 + 3,
+        ]);
+        self.current_quad += 1;
+        self
+    }
 
-    pub fn build(self, device: &wgpu::Device) -> (StagingBuffer, StagingBuffer, u32) {
-        (
-            StagingBuffer::new(device, &self.vertex_data),
-            StagingBuffer::new(device, &self.index_data),
-            self.index_data.len() as u32,
-        )
-    }
-}

Sound

I used rodio for sound. I created a SoundPack class to store the sounds. Deciding how to get the sounds to play took some thinking. I chose to pass in a Vec<state::Event> into the update_state method. The system would then push an event to the Vec. The Event enum is listed below.

rust
#[derive(Debug, Copy, Clone)]
-pub enum Event {
-    ButtonPressed,
-    FocusChanged,
-    BallBounce(cgmath::Vector2<f32>),
-    Score(u32),
-}

I was going to have BallBounce play a positioned sound using a SpatialSink, but I was getting clipping issues, and I wanted to be done with the project. Aside from that, the events system worked nicely.

WASM Support

This example works on the web, but there are a few steps that I needed to take to make things work. The first one was that I needed to switch to using a lib.rs instead of just main.rs. I opted to use wasm-pack to create the web assembly. I could have kept the old format by using wasm-bindgen directly, but I ran into issues with using the wrong version of wasm-bindgen, so I elected to stick with wasm-pack.

In order for wasm-pack to work properly I first needed to add some dependencies:

toml
[dependencies]
-cfg-if = "1"
-env_logger = "0.10"
-winit = "0.28.7"
-anyhow = "1.0"
-bytemuck = { version = "1.14", features = [ "derive" ] }
-cgmath = "0.18"
-pollster = "0.3"
-wgpu = { version = "0.16", features = ["spirv"]}
-wgpu_glyph = "0.17"
-rand = "0.8"
-rodio = { version = "0.15", default-features = false, features = ["wav"] }
-log = "0.4"
-instant = "0.1"
+    pub fn build(self, device: &wgpu::Device) -> (StagingBuffer, StagingBuffer, u32) {
+        (
+            StagingBuffer::new(device, &self.vertex_data),
+            StagingBuffer::new(device, &self.index_data),
+            self.index_data.len() as u32,
+        )
+    }
+}

Sound

I used rodio for sound. I created a SoundPack class to store the sounds. Deciding how to get the sounds to play took some thinking. I chose to pass in a Vec<state::Event> into the update_state method. The system would then push an event to the Vec. The Event enum is listed below.

rust
#[derive(Debug, Copy, Clone)]
+pub enum Event {
+    ButtonPressed,
+    FocusChanged,
+    BallBounce(cgmath::Vector2<f32>),
+    Score(u32),
+}

I was going to have BallBounce play a positioned sound using a SpatialSink, but I was getting clipping issues, and I wanted to be done with the project. Aside from that, the events system worked nicely.

WASM Support

This example works on the web, but there are a few steps that I needed to take to make things work. The first one was that I needed to switch to using a lib.rs instead of just main.rs. I opted to use wasm-pack to create the web assembly. I could have kept the old format by using wasm-bindgen directly, but I ran into issues with using the wrong version of wasm-bindgen, so I elected to stick with wasm-pack.

In order for wasm-pack to work properly I first needed to add some dependencies:

toml
[dependencies]
+cfg-if = "1"
+env_logger = "0.10"
+winit = "0.28.7"
+anyhow = "1.0"
+bytemuck = { version = "1.14", features = [ "derive" ] }
+cgmath = "0.18"
+pollster = "0.3"
+wgpu = { version = "0.16", features = ["spirv"]}
+wgpu_glyph = "0.17"
+rand = "0.8"
+rodio = { version = "0.15", default-features = false, features = ["wav"] }
+log = "0.4"
+instant = "0.1"
 
-[target.'cfg(target_arch = "wasm32")'.dependencies]
-console_error_panic_hook = "0.1.6"
-console_log = "1.0"
-getrandom = { version = "0.2", features = ["js"] }
-rodio = { version = "0.15", default-features = false, features = ["wasm-bindgen", "wav"] }
-wasm-bindgen-futures = "0.4.20"
-wasm-bindgen = "0.2.87"
-web-sys = { version = "0.3.64", features = [
-    "Document",
-    "Window",
-    "Element",
-]}
-wgpu = { version = "0.16", features = ["spirv", "webgl"]}
+[target.'cfg(target_arch = "wasm32")'.dependencies]
+console_error_panic_hook = "0.1.6"
+console_log = "1.0"
+getrandom = { version = "0.2", features = ["js"] }
+rodio = { version = "0.15", default-features = false, features = ["wasm-bindgen", "wav"] }
+wasm-bindgen-futures = "0.4.20"
+wasm-bindgen = "0.2.87"
+web-sys = { version = "0.3.64", features = [
+    "Document",
+    "Window",
+    "Element",
+]}
+wgpu = { version = "0.16", features = ["spirv", "webgl"]}
 
-[build-dependencies]
-anyhow = "1.0"
-fs_extra = "1.3"
-glob = "0.3"
-rayon = "1.8"
-naga = { version = "0.11", features = ["glsl-in", "spv-out", "wgsl-out"]}

I'll highlight a few of these:

  • rand: If you want to use rand on the web, you need to include getrandom directly and enable its js feature.
  • rodio: I had to disable all of the features for the WASM build, and then enabled them separately. The mp3 feature specifically wasn't working for me. There might have been a workaround, but since I'm not using mp3 in this example I just elected to only use wav.
  • instant: This crate is basically just a wrapper around std::time::Instant. In a normal build, it's just a type alias. In web builds it uses the browser's time functions.
  • cfg-if: This is a convenient crate for making platform-specific code less horrible to write.
  • env_logger and console_log: env_logger doesn't work on web assembly so we need to use a different logger. console_log is the one used in the web assembly tutorials, so I went with that one.
  • wasm-bindgen: This crate is the glue that makes Rust code work on the web. If you are building using the wasm-bindgen command you need to make sure that the command version of wasm-bindgen matches the version in Cargo.toml exactly otherwise you'll have problems. If you use wasm-pack it will download the appropriate wasm-bindgen binary to use for your crate.
  • web-sys: This has functions and types that allow you to use different methods available in js such as "getElementById()".

Now that that's out of the way let's talk about some code. First, we need to create a function that will start our event loop.

rust
#[cfg(target_arch="wasm32")]
-use wasm_bindgen::prelude::*;
+[build-dependencies]
+anyhow = "1.0"
+fs_extra = "1.3"
+glob = "0.3"
+rayon = "1.8"
+naga = { version = "0.11", features = ["glsl-in", "spv-out", "wgsl-out"]}

I'll highlight a few of these:

  • rand: If you want to use rand on the web, you need to include getrandom directly and enable its js feature.
  • rodio: I had to disable all of the features for the WASM build, and then enabled them separately. The mp3 feature specifically wasn't working for me. There might have been a workaround, but since I'm not using mp3 in this example I just elected to only use wav.
  • instant: This crate is basically just a wrapper around std::time::Instant. In a normal build, it's just a type alias. In web builds it uses the browser's time functions.
  • cfg-if: This is a convenient crate for making platform-specific code less horrible to write.
  • env_logger and console_log: env_logger doesn't work on web assembly so we need to use a different logger. console_log is the one used in the web assembly tutorials, so I went with that one.
  • wasm-bindgen: This crate is the glue that makes Rust code work on the web. If you are building using the wasm-bindgen command you need to make sure that the command version of wasm-bindgen matches the version in Cargo.toml exactly otherwise you'll have problems. If you use wasm-pack it will download the appropriate wasm-bindgen binary to use for your crate.
  • web-sys: This has functions and types that allow you to use different methods available in js such as "getElementById()".

Now that that's out of the way let's talk about some code. First, we need to create a function that will start our event loop.

rust
#[cfg(target_arch="wasm32")]
+use wasm_bindgen::prelude::*;
 
-#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
-pub fn start() {
-    // Snipped...
-}

The wasm_bindgen(start) tell's wasm-bindgen that this function should be started as soon as the web assembly module is loaded by javascript. Most of the code inside this function is the same as what you'd find in other examples on this site, but there is some specific stuff we need to do on the web.

rust
cfg_if::cfg_if! {
-    if #[cfg(target_arch = "wasm32")] {
-        console_log::init_with_level(log::Level::Warn).expect("Could't initialize logger");
-        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
-    } else {
-        env_logger::init();
-    }
-}

This code should run before you try to do anything significant. It sets up the logger based on what architecture you're building for. Most architectures will use env_logger. The wasm32 architecture will use console_log. It's also important that we tell Rust to forward panics to javascript. If we didn't do this we would have no idea when our Rust code panics.

Next, we create a window. Much of it is like we've done before, but since we are supporting fullscreen we need to do some extra steps.

rust
let event_loop = EventLoop::new();
-let monitor = event_loop.primary_monitor().unwrap();
-let video_mode = monitor.video_modes().next();
-let size = video_mode.clone().map_or(PhysicalSize::new(800, 600), |vm| vm.size());
-let window = WindowBuilder::new()
-    .with_visible(false)
-    .with_title("Pong")
-    .with_fullscreen(video_mode.map(|vm| Fullscreen::Exclusive(vm)))
-    .build(&event_loop)
-    .unwrap();
+#[cfg_attr(target_arch="wasm32", wasm_bindgen(start))]
+pub fn start() {
+    // Snipped...
+}

The wasm_bindgen(start) tell's wasm-bindgen that this function should be started as soon as the web assembly module is loaded by javascript. Most of the code inside this function is the same as what you'd find in other examples on this site, but there is some specific stuff we need to do on the web.

rust
cfg_if::cfg_if! {
+    if #[cfg(target_arch = "wasm32")] {
+        console_log::init_with_level(log::Level::Warn).expect("Could't initialize logger");
+        std::panic::set_hook(Box::new(console_error_panic_hook::hook));
+    } else {
+        env_logger::init();
+    }
+}

This code should run before you try to do anything significant. It sets up the logger based on what architecture you're building for. Most architectures will use env_logger. The wasm32 architecture will use console_log. It's also important that we tell Rust to forward panics to javascript. If we didn't do this we would have no idea when our Rust code panics.

Next, we create a window. Much of it is like we've done before, but since we are supporting fullscreen we need to do some extra steps.

rust
let event_loop = EventLoop::new();
+let monitor = event_loop.primary_monitor().unwrap();
+let video_mode = monitor.video_modes().next();
+let size = video_mode.clone().map_or(PhysicalSize::new(800, 600), |vm| vm.size());
+let window = WindowBuilder::new()
+    .with_visible(false)
+    .with_title("Pong")
+    .with_fullscreen(video_mode.map(|vm| Fullscreen::Exclusive(vm)))
+    .build(&event_loop)
+    .unwrap();
 
-// WASM builds don't have access to monitor information, so
-// we should specify a fallback resolution
-if window.fullscreen().is_none() {
-    window.set_inner_size(PhysicalSize::new(512, 512));
-}

We then have to do some web-specific stuff if we are on that platform.

rust
#[cfg(target_arch = "wasm32")]
-{
-    use winit::platform::web::WindowExtWebSys;
-    web_sys::window()
-        .and_then(|win| win.document())
-        .and_then(|doc| {
-            let dst = doc.get_element_by_id("wasm-example")?;
-            let canvas = web_sys::Element::from(window.canvas());
-            dst.append_child(&canvas).ok()?;
+// WASM builds don't have access to monitor information, so
+// we should specify a fallback resolution
+if window.fullscreen().is_none() {
+    window.set_inner_size(PhysicalSize::new(512, 512));
+}

We then have to do some web-specific stuff if we are on that platform.

rust
#[cfg(target_arch = "wasm32")]
+{
+    use winit::platform::web::WindowExtWebSys;
+    web_sys::window()
+        .and_then(|win| win.document())
+        .and_then(|doc| {
+            let dst = doc.get_element_by_id("wasm-example")?;
+            let canvas = web_sys::Element::from(window.canvas());
+            dst.append_child(&canvas).ok()?;
 
-            // Request fullscreen, if denied, continue as normal
-            match canvas.request_fullscreen() {
-                Ok(_) => {},
-                Err(_) => ()
-            }
+            // Request fullscreen, if denied, continue as normal
+            match canvas.request_fullscreen() {
+                Ok(_) => {},
+                Err(_) => ()
+            }
 
-            Some(())
-        })
-        .expect("Couldn't append canvas to document body.");
-}

Everything else works the same.

Summary

A fun project to work on. It was overly architected, and kinda hard to make changes, but a good experience nonetheless.

Try the code down below! (Controls currently require a keyboard.)

- + Some(()) + }) + .expect("Couldn't append canvas to document body."); +}

Everything else works the same.

Summary

A fun project to work on. It was overly architected, and kinda hard to make changes, but a good experience nonetheless.

Try the code down below! (Controls currently require a keyboard.)

+ \ No newline at end of file diff --git a/showcase/threading.html b/showcase/threading.html index 2fab14224..9b8441651 100644 --- a/showcase/threading.html +++ b/showcase/threading.html @@ -5,106 +5,108 @@ Multi-threading with Wgpu and Rayon | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

Multi-threading with Wgpu and Rayon

This example has not been tested on WASM. Rayon has support for multi threading on WASM via wasm-bindgen-rayon, though that implementation is only currently working on Chrome-based browsers. Because of this I've elected not to make a WASM version of this tutorial at this time.

The main selling point of Vulkan, DirectX 12, Metal, and by extension Wgpu is that these APIs is that they designed from the ground up to be thread safe. Up to this point, we have been doing everything on a single thread. That's about to change.

This example is based on the code for tutorial12-camera

I won't go into what threads are in this tutorial. That is a full CS course in and of itself. All we'll be covering is using threading to make loading resources faster.

We won't go over multithreading rendering as we don't have enough different types of objects to justify that yet. This will change in a coming tutorial

Parallelizing loading models and textures

Currently, we load the materials and meshes of our model one at a time. This is a perfect opportunity for multithreading! All our changes will be in model.rs. Let's first start with the materials. We'll convert the regular for loop into a par_iter().map().

rust
// resources.rs
+    
Skip to content

Multi-threading with Wgpu and Rayon

This example has not been tested on WASM. Rayon has support for multi threading on WASM via wasm-bindgen-rayon, though that implementation is only currently working on Chrome-based browsers. Because of this I've elected not to make a WASM version of this tutorial at this time.

The main selling point of Vulkan, DirectX 12, Metal, and by extension Wgpu is that these APIs is that they designed from the ground up to be thread safe. Up to this point, we have been doing everything on a single thread. That's about to change.

This example is based on the code for tutorial12-camera

I won't go into what threads are in this tutorial. That is a full CS course in and of itself. All we'll be covering is using threading to make loading resources faster.

We won't go over multithreading rendering as we don't have enough different types of objects to justify that yet. This will change in a coming tutorial

Parallelizing loading models and textures

Currently, we load the materials and meshes of our model one at a time. This is a perfect opportunity for multithreading! All our changes will be in model.rs. Let's first start with the materials. We'll convert the regular for loop into a par_iter().map().

rust
// resources.rs
 
-#[cfg(not(target_arch="wasm32"))]
-use rayon::iter::IntoParallelIterator;
+#[cfg(not(target_arch="wasm32"))]
+use rayon::iter::IntoParallelIterator;
 
-impl Model {
-    pub fn load<P: AsRef<Path>>(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        layout: &wgpu::BindGroupLayout,
-        path: P,
-    ) -> Result<Self> {
-        // ...
-        // UPDATED!
-        let materials = obj_materials.par_iter().map(|mat| {
-            // We can also parallelize loading the textures!
-            let mut textures = [
-                (containing_folder.join(&mat.diffuse_texture), false),
-                (containing_folder.join(&mat.normal_texture), true),
-            ].par_iter().map(|(texture_path, is_normal_map)| {
-                texture::Texture::load(device, queue, texture_path, *is_normal_map)
-            }).collect::<Result<Vec<_>>>()?;
-            
-            // Pop removes from the end of the list.
-            let normal_texture = textures.pop().unwrap();
-            let diffuse_texture = textures.pop().unwrap();
+impl Model {
+    pub fn load<P: AsRef<Path>>(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        layout: &wgpu::BindGroupLayout,
+        path: P,
+    ) -> Result<Self> {
+        // ...
+        // UPDATED!
+        let materials = obj_materials.par_iter().map(|mat| {
+            // We can also parallelize loading the textures!
+            let mut textures = [
+                (containing_folder.join(&mat.diffuse_texture), false),
+                (containing_folder.join(&mat.normal_texture), true),
+            ].par_iter().map(|(texture_path, is_normal_map)| {
+                texture::Texture::load(device, queue, texture_path, *is_normal_map)
+            }).collect::<Result<Vec<_>>>()?;
+            
+            // Pop removes from the end of the list.
+            let normal_texture = textures.pop().unwrap();
+            let diffuse_texture = textures.pop().unwrap();
 
-            Ok(Material::new(
-                device,
-                &mat.name,
-                diffuse_texture,
-                normal_texture,
-                layout,
-            ))
-        }).collect::<Result<Vec<Material>>>()?;
-        // ...
-    }
-    // ...
-}

Next, we can update the meshes to be loaded in parallel.

rust
impl Model {
-    pub fn load<P: AsRef<Path>>(
-        device: &wgpu::Device,
-        queue: &wgpu::Queue,
-        layout: &wgpu::BindGroupLayout,
-        path: P,
-    ) -> Result<Self> {
-        // ...
-        // UPDATED!
-        let meshes = obj_models.par_iter().map(|m| {
-            let mut vertices = (0..m.mesh.positions.len() / 3).into_par_iter().map(|i| {
-                ModelVertex {
-                    position: [
-                        m.mesh.positions[i * 3],
-                        m.mesh.positions[i * 3 + 1],
-                        m.mesh.positions[i * 3 + 2],
-                    ].into(),
-                    tex_coords: [
-                        m.mesh.texcoords[i * 2], 
-                        m.mesh.texcoords[i * 2 + 1]
-                    ].into(),
-                    normal: [
-                        m.mesh.normals[i * 3],
-                        m.mesh.normals[i * 3 + 1],
-                        m.mesh.normals[i * 3 + 2],
-                    ].into(),
-                    // We'll calculate these later
-                    tangent: [0.0; 3].into(),
-                    bitangent: [0.0; 3].into(),
-                }
-            }).collect::<Vec<_>>();
-            // ...
-            let index_buffer = device.create_buffer_init(
-                &wgpu::util::BufferInitDescriptor {
-                    label: Some(&format!("{:?} Index Buffer", m.name)), // UPDATED!
-                    contents: bytemuck::cast_slice(&m.mesh.indices),
-                    usage: wgpu::BufferUsages::INDEX,
-                }
-            );
-            // ...
-            // UPDATED!
-            Ok(Mesh {
-                // ...
-            })
-        }).collect::<Result<Vec<_>>>()?;
-        // ...
-    }
-    // ...
-}

We've parallelized loading the meshes, and making the vertex array for them. Probably a bit overkill, but rayon should prevent us from using too many threads.

You'll notice that we didn't use rayon for calculating the tangent, and bitangent. I tried to get it to work, but I was having trouble finding a way to do it without multiple mutable references to vertices. I don't feel like introducing a std::sync::Mutex, so I'll leave it for now.

This is honestly a better job for a compute shader, as the model data is going to get loaded into a buffer anyway.

It's that easy!

Most of the wgpu types are Send + Sync, so we can use them in threads without much trouble. It was so easy, that I feel like this tutorial is too short! I'll just leave off with a speed comparison between the previous model loading code and the current code.

Elapsed (Original): 309.596382ms
-Elapsed (Threaded): 199.645027ms

We're not loading that many resources, so the speedup is minimal. We'll be doing more stuff with threading, but this is a good introduction.

- + Ok(Material::new( + device, + &mat.name, + diffuse_texture, + normal_texture, + layout, + )) + }).collect::<Result<Vec<Material>>>()?; + // ... + } + // ... +}

Next, we can update the meshes to be loaded in parallel.

rust
impl Model {
+    pub fn load<P: AsRef<Path>>(
+        device: &wgpu::Device,
+        queue: &wgpu::Queue,
+        layout: &wgpu::BindGroupLayout,
+        path: P,
+    ) -> Result<Self> {
+        // ...
+        // UPDATED!
+        let meshes = obj_models.par_iter().map(|m| {
+            let mut vertices = (0..m.mesh.positions.len() / 3).into_par_iter().map(|i| {
+                ModelVertex {
+                    position: [
+                        m.mesh.positions[i * 3],
+                        m.mesh.positions[i * 3 + 1],
+                        m.mesh.positions[i * 3 + 2],
+                    ].into(),
+                    tex_coords: [
+                        m.mesh.texcoords[i * 2], 
+                        m.mesh.texcoords[i * 2 + 1]
+                    ].into(),
+                    normal: [
+                        m.mesh.normals[i * 3],
+                        m.mesh.normals[i * 3 + 1],
+                        m.mesh.normals[i * 3 + 2],
+                    ].into(),
+                    // We'll calculate these later
+                    tangent: [0.0; 3].into(),
+                    bitangent: [0.0; 3].into(),
+                }
+            }).collect::<Vec<_>>();
+            // ...
+            let index_buffer = device.create_buffer_init(
+                &wgpu::util::BufferInitDescriptor {
+                    label: Some(&format!("{:?} Index Buffer", m.name)), // UPDATED!
+                    contents: bytemuck::cast_slice(&m.mesh.indices),
+                    usage: wgpu::BufferUsages::INDEX,
+                }
+            );
+            // ...
+            // UPDATED!
+            Ok(Mesh {
+                // ...
+            })
+        }).collect::<Result<Vec<_>>>()?;
+        // ...
+    }
+    // ...
+}

We've parallelized loading the meshes, and making the vertex array for them. Probably a bit overkill, but rayon should prevent us from using too many threads.

You'll notice that we didn't use rayon for calculating the tangent, and bitangent. I tried to get it to work, but I was having trouble finding a way to do it without multiple mutable references to vertices. I don't feel like introducing a std::sync::Mutex, so I'll leave it for now.

This is honestly a better job for a compute shader, as the model data is going to get loaded into a buffer anyway.

It's that easy!

Most of the wgpu types are Send + Sync, so we can use them in threads without much trouble. It was so easy, that I feel like this tutorial is too short! I'll just leave off with a speed comparison between the previous model loading code and the current code.

Elapsed (Original): 309.596382ms
+Elapsed (Threaded): 199.645027ms

We're not loading that many resources, so the speedup is minimal. We'll be doing more stuff with threading, but this is a good introduction.

+ \ No newline at end of file diff --git a/showcase/windowless/index.html b/showcase/windowless/index.html index e83767885..1503a7d29 100644 --- a/showcase/windowless/index.html +++ b/showcase/windowless/index.html @@ -5,192 +5,194 @@ 离屏渲染 | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
本章内容

离屏渲染

有时我们只是想利用 gpu,也许是要并行地计算一组大的数字; 也许是正在制作一部 3D 电影,并需要用路径追踪来创建一个看起来很真实的场景; 也许正在挖掘一种加密货币。在所有这些情况下,我们可能 不需要 从窗口看到正在发生的事情。

如何使用?

离屏渲染(Off-Screen Rendering, 也叫做 Windowless Rendering)其实很简单:事实上,我们不需要一个窗口(Window)来创建一个GPU 实例,不需要一个窗口来选择适配器,也不需要一个窗口来创建逻辑设备。我们只需要窗口来创建一个展示平面交换链(SwapChain)。所以,只要有了逻辑设备,就可以开始向 GPU 发送命令。

rust
let adapter = instance
-    .request_adapter(&wgpu::RequestAdapterOptions {
-        compatible_surface: Some(&surface),
-        ..Default::default()
-    })
-    .await
-    .unwrap();
-let (device, queue) = adapter
-    .request_device(&Default::default(), None)
-    .await
-    .unwrap();

离屏绘制一个三角形

虽然我们已经说过不需要看到 gpu 在做什么,但确实需要在某些时候看到结果。如果回顾一下关于 surface 的讨论,会发现我们是使用 surface.get_current_texture() 获取一个纹理来绘制。

现在,我们跳过这一步,自己创建纹理。这里需要注意的是,需要指定 TextureFormat::Rgba8UnormSrgb 为纹理像素格式而不是 surface.get_capabilities(&adapter).formats[0],因为 PNG 使用 RGBA 而不是 BGRA 像素格式:

rust
let texture_size = 256u32;
+    
Skip to content

离屏渲染

有时我们只是想利用 gpu,也许是要并行地计算一组大的数字; 也许是正在制作一部 3D 电影,并需要用路径追踪来创建一个看起来很真实的场景; 也许正在挖掘一种加密货币。在所有这些情况下,我们可能 不需要 从窗口看到正在发生的事情。

如何使用?

离屏渲染(Off-Screen Rendering, 也叫做 Windowless Rendering)其实很简单:事实上,我们不需要一个窗口(Window)来创建一个GPU 实例,不需要一个窗口来选择适配器,也不需要一个窗口来创建逻辑设备。我们只需要窗口来创建一个展示平面交换链(SwapChain)。所以,只要有了逻辑设备,就可以开始向 GPU 发送命令。

rust
let adapter = instance
+    .request_adapter(&wgpu::RequestAdapterOptions {
+        compatible_surface: Some(&surface),
+        ..Default::default()
+    })
+    .await
+    .unwrap();
+let (device, queue) = adapter
+    .request_device(&Default::default(), None)
+    .await
+    .unwrap();

离屏绘制一个三角形

虽然我们已经说过不需要看到 gpu 在做什么,但确实需要在某些时候看到结果。如果回顾一下关于 surface 的讨论,会发现我们是使用 surface.get_current_texture() 获取一个纹理来绘制。

现在,我们跳过这一步,自己创建纹理。这里需要注意的是,需要指定 TextureFormat::Rgba8UnormSrgb 为纹理像素格式而不是 surface.get_capabilities(&adapter).formats[0],因为 PNG 使用 RGBA 而不是 BGRA 像素格式:

rust
let texture_size = 256u32;
 
-let texture_desc = wgpu::TextureDescriptor {
-    size: wgpu::Extent3d {
-        width: texture_size,
-        height: texture_size,
-        depth_or_array_layers: 1,
-    },
-    mip_level_count: 1,
-    sample_count: 1,
-    dimension: wgpu::TextureDimension::D2,
-    format: wgpu::TextureFormat::Rgba8UnormSrgb,
-    usage: wgpu::TextureUsages::COPY_SRC
-        | wgpu::TextureUsages::RENDER_ATTACHMENT,
-    label: None,
-    view_formats: &[],
-};
-let texture = device.create_texture(&texture_desc);
-let texture_view = texture.create_view(&Default::default());

usage 字段的 RENDER_ATTACHMENT 位令 wgpu 可以渲染到此纹理,COPY_SRC 位令我们能够从纹理中提取数据,以便能够将其保存到文件中。

虽然我们可以使用这个纹理来绘制三角形,但还需要一些方法来获取它里面的像素。在纹理教程中,我们用一个缓冲区从一个文件中加载颜色数据,然后复制到另一个缓冲区。

我们要做的是反过来:从纹理中把数据复制到缓冲区,然后保存到文件中。我们得创建一个足够大的缓冲区来容纳数据:

rust
let u32_size = std::mem::size_of::<u32>() as u32;
+let texture_desc = wgpu::TextureDescriptor {
+    size: wgpu::Extent3d {
+        width: texture_size,
+        height: texture_size,
+        depth_or_array_layers: 1,
+    },
+    mip_level_count: 1,
+    sample_count: 1,
+    dimension: wgpu::TextureDimension::D2,
+    format: wgpu::TextureFormat::Rgba8UnormSrgb,
+    usage: wgpu::TextureUsages::COPY_SRC
+        | wgpu::TextureUsages::RENDER_ATTACHMENT,
+    label: None,
+    view_formats: &[],
+};
+let texture = device.create_texture(&texture_desc);
+let texture_view = texture.create_view(&Default::default());

usage 字段的 RENDER_ATTACHMENT 位令 wgpu 可以渲染到此纹理,COPY_SRC 位令我们能够从纹理中提取数据,以便能够将其保存到文件中。

虽然我们可以使用这个纹理来绘制三角形,但还需要一些方法来获取它里面的像素。在纹理教程中,我们用一个缓冲区从一个文件中加载颜色数据,然后复制到另一个缓冲区。

我们要做的是反过来:从纹理中把数据复制到缓冲区,然后保存到文件中。我们得创建一个足够大的缓冲区来容纳数据:

rust
let u32_size = std::mem::size_of::<u32>() as u32;
 
-let output_buffer_size = (u32_size * texture_size * texture_size) as wgpu::BufferAddress;
-let output_buffer_desc = wgpu::BufferDescriptor {
-    size: output_buffer_size,
-    usage: wgpu::BufferUsages::COPY_DST
-        // MAP_READ 告诉 wpgu 我们要在 cpu 端读取此缓冲区
-        | wgpu::BufferUsages::MAP_READ,
-    label: None,
-    mapped_at_creation: false,
-};
-let output_buffer = device.create_buffer(&output_buffer_desc);

现在已经做好了离屏绘制的准备,让我们来绘制点东西试试。由于只是画一个三角形,可以重用管线教程中的着色器代码:

rust
// 顶点着色器
+let output_buffer_size = (u32_size * texture_size * texture_size) as wgpu::BufferAddress;
+let output_buffer_desc = wgpu::BufferDescriptor {
+    size: output_buffer_size,
+    usage: wgpu::BufferUsages::COPY_DST
+        // MAP_READ 告诉 wpgu 我们要在 cpu 端读取此缓冲区
+        | wgpu::BufferUsages::MAP_READ,
+    label: None,
+    mapped_at_creation: false,
+};
+let output_buffer = device.create_buffer(&output_buffer_desc);

现在已经做好了离屏绘制的准备,让我们来绘制点东西试试。由于只是画一个三角形,可以重用管线教程中的着色器代码:

rust
// 顶点着色器
 
-struct VertexOutput {
-    @builtin(position) clip_position: vec4f,
-};
+struct VertexOutput {
+    @builtin(position) clip_position: vec4f,
+};
 
-@vertex
-fn vs_main(
-    @builtin(vertex_index) in_vertex_index: u32,
-) -> VertexOutput {
-    var out: VertexOutput;
-    let x = f32(1 - i32(in_vertex_index)) * 0.5;
-    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
-    out.clip_position = vec4f(x, y, 0.0, 1.0);
-    return out;
-}
+@vertex
+fn vs_main(
+    @builtin(vertex_index) in_vertex_index: u32,
+) -> VertexOutput {
+    var out: VertexOutput;
+    let x = f32(1 - i32(in_vertex_index)) * 0.5;
+    let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
+    out.clip_position = vec4f(x, y, 0.0, 1.0);
+    return out;
+}
 
-// 片元着色器
+// 片元着色器
 
-@fragment
-fn fs_main(in: VertexOutput) -> @location(0) vec4f {
-    return vec4f(0.3, 0.2, 0.1, 1.0);
-}

然后用着色器来创建一个简单的渲染管线 RenderPipeline

rust
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
-            label: Some("Shader"),
-            source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
-        });
+@fragment
+fn fs_main(in: VertexOutput) -> @location(0) vec4f {
+    return vec4f(0.3, 0.2, 0.1, 1.0);
+}

然后用着色器来创建一个简单的渲染管线 RenderPipeline

rust
 let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
+            label: Some("Shader"),
+            source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
+        });
 
-let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
-    label: Some("Render Pipeline Layout"),
-    bind_group_layouts: &[],
-    push_constant_ranges: &[],
-});
+let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+    label: Some("Render Pipeline Layout"),
+    bind_group_layouts: &[],
+    push_constant_ranges: &[],
+});
 
-let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
-    label: Some("Render Pipeline"),
-    layout: Some(&render_pipeline_layout),
-    vertex: wgpu::VertexState {
-        module: &shader,
-        entry_point: "vs_main",
-        buffers: &[],
-    },
-    fragment: Some(wgpu::FragmentState {
-        module: &fs_module,
-        entry_point: "main",
-        targets: &[Some(wgpu::ColorTargetState {
-            format: texture_desc.format,
-            alpha_blend: wgpu::BlendState::REPLACE,
-            color_blend: wgpu::BlendState::REPLACE,
-            write_mask: wgpu::ColorWrites::ALL,
-        })],
-    }),
-    primitive: wgpu::PrimitiveState {
-        topology: wgpu::PrimitiveTopology::TriangleList,
-        strip_index_format: None,
-        front_face: wgpu::FrontFace::Ccw,
-        cull_mode: Some(wgpu::Face::Back),
-        polygon_mode: wgpu::PolygonMode::Fill,
-    },
-    depth_stencil: None,
-    multisample: wgpu::MultisampleState {
-        count: 1,
-        mask: !0,
-        alpha_to_coverage_enabled: false,
-    },
-});

接着创建一个命令编码器 CommandEncoder

rust
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
-    label: None,
-});

离屏渲染最关键的地方就是渲染通道 的设置了。一个渲染通道至少需要一个颜色附件,一个颜色附件需要绑定一个纹理视图。前面的教程我们一直使用的是交换链SwapChain)的纹理视图,但事实上任何纹理视图都可以,包括我们自己创建的 texture_view

rust
{
-    let render_pass_desc = wgpu::RenderPassDescriptor {
-        label: Some("Render Pass"),
-        color_attachments: &[
-            wgpu::RenderPassColorAttachment {
-                view: &texture_view,
-                resolve_target: None,
-                ops: wgpu::Operations {
-                    load: wgpu::LoadOp::Clear(wgpu::Color {
-                        r: 0.1,
-                        g: 0.2,
-                        b: 0.3,
-                        a: 1.0,
-                    }),
-                    store: wgpu::StoreOp::Store
-                },
-            }
-        ],
-        ..Default::default()
-    };
-    let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
+let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+    label: Some("Render Pipeline"),
+    layout: Some(&render_pipeline_layout),
+    vertex: wgpu::VertexState {
+        module: &shader,
+        entry_point: "vs_main",
+        buffers: &[],
+    },
+    fragment: Some(wgpu::FragmentState {
+        module: &fs_module,
+        entry_point: "main",
+        targets: &[Some(wgpu::ColorTargetState {
+            format: texture_desc.format,
+            alpha_blend: wgpu::BlendState::REPLACE,
+            color_blend: wgpu::BlendState::REPLACE,
+            write_mask: wgpu::ColorWrites::ALL,
+        })],
+    }),
+    primitive: wgpu::PrimitiveState {
+        topology: wgpu::PrimitiveTopology::TriangleList,
+        strip_index_format: None,
+        front_face: wgpu::FrontFace::Ccw,
+        cull_mode: Some(wgpu::Face::Back),
+        polygon_mode: wgpu::PolygonMode::Fill,
+    },
+    depth_stencil: None,
+    multisample: wgpu::MultisampleState {
+        count: 1,
+        mask: !0,
+        alpha_to_coverage_enabled: false,
+    },
+});

接着创建一个命令编码器 CommandEncoder

rust
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
+    label: None,
+});

离屏渲染最关键的地方就是渲染通道 的设置了。一个渲染通道至少需要一个颜色附件,一个颜色附件需要绑定一个纹理视图。前面的教程我们一直使用的是交换链SwapChain)的纹理视图,但事实上任何纹理视图都可以,包括我们自己创建的 texture_view

rust
{
+    let render_pass_desc = wgpu::RenderPassDescriptor {
+        label: Some("Render Pass"),
+        color_attachments: &[
+            wgpu::RenderPassColorAttachment {
+                view: &texture_view,
+                resolve_target: None,
+                ops: wgpu::Operations {
+                    load: wgpu::LoadOp::Clear(wgpu::Color {
+                        r: 0.1,
+                        g: 0.2,
+                        b: 0.3,
+                        a: 1.0,
+                    }),
+                    store: wgpu::StoreOp::Store
+                },
+            }
+        ],
+        ..Default::default()
+    };
+    let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
 
-    render_pass.set_pipeline(&render_pipeline);
-    render_pass.draw(0..3, 0..1);
-}

让我们把绘制在纹理Texture)中的像素数据复制到 output_buffer 缓冲区

rust
encoder.copy_texture_to_buffer(
-    wgpu::ImageCopyTexture {
-        aspect: wgpu::TextureAspect::All,
-                texture: &texture,
-        mip_level: 0,
-        origin: wgpu::Origin3d::ZERO,
-    },
-    wgpu::ImageCopyBuffer {
-        buffer: &output_buffer,
-        layout: wgpu::ImageDataLayout {
-            offset: 0,
-            bytes_per_row: u32_size * texture_size,
-            rows_per_image: texture_size,
-        },
-    },
-    texture_desc.size,
-);

上面已经编码(Encode)好了所有的命令(Command),现在把它们提交给 GPU 来执行:

rust
queue.submit(Some(encoder.finish()));

从缓冲区中读取数据

为了从缓冲区中读取数据,首先需要对它进行映射(Map),然后执行 get_mapped_range() 就可以得到一个缓冲区视图BufferView)实例,它实质上就是一个 &[u8] 类型数据的容器:

rust
// 需要对映射变量设置范围,以便我们能够解除缓冲区的映射
-{
-    let buffer_slice = output_buffer.slice(..);
+    render_pass.set_pipeline(&render_pipeline);
+    render_pass.draw(0..3, 0..1);
+}

让我们把绘制在纹理Texture)中的像素数据复制到 output_buffer 缓冲区

rust
encoder.copy_texture_to_buffer(
+    wgpu::ImageCopyTexture {
+        aspect: wgpu::TextureAspect::All,
+                texture: &texture,
+        mip_level: 0,
+        origin: wgpu::Origin3d::ZERO,
+    },
+    wgpu::ImageCopyBuffer {
+        buffer: &output_buffer,
+        layout: wgpu::ImageDataLayout {
+            offset: 0,
+            bytes_per_row: u32_size * texture_size,
+            rows_per_image: texture_size,
+        },
+    },
+    texture_desc.size,
+);

上面已经编码(Encode)好了所有的命令(Command),现在把它们提交给 GPU 来执行:

rust
queue.submit(Some(encoder.finish()));

从缓冲区中读取数据

为了从缓冲区中读取数据,首先需要对它进行映射(Map),然后执行 get_mapped_range() 就可以得到一个缓冲区视图BufferView)实例,它实质上就是一个 &[u8] 类型数据的容器:

rust
// 需要对映射变量设置范围,以便我们能够解除缓冲区的映射
+{
+    let buffer_slice = output_buffer.slice(..);
 
-    // 注意:我们必须在 await future 之前先创建映射,然后再调用 device.poll()。
-    // 否则,应用程序将停止响应。
-    let (tx, rx) = futures_intrusive::channel::shared::oneshot_channel();
-    buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
-        tx.send(result).unwrap();
-    });
-    device.poll(wgpu::Maintain::Wait);
-    rx.receive().await.unwrap().unwrap();
+    // 注意:我们必须在 await future 之前先创建映射,然后再调用 device.poll()。
+    // 否则,应用程序将停止响应。
+    let (tx, rx) = futures_intrusive::channel::shared::oneshot_channel();
+    buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
+        tx.send(result).unwrap();
+    });
+    device.poll(wgpu::Maintain::Wait);
+    rx.receive().await.unwrap().unwrap();
 
-    let data = buffer_slice.get_mapped_range();
+    let data = buffer_slice.get_mapped_range();
 
-    use image::{ImageBuffer, Rgba};
-    let buffer =
-        ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap();
-    buffer.save("image.png").unwrap();
+    use image::{ImageBuffer, Rgba};
+    let buffer =
+        ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap();
+    buffer.save("image.png").unwrap();
 
-}
-// 解除缓冲区映射
-output_buffer.unmap();

这个程序使用了 futures-intrusive,那也是 wgpu 的 demo 中使用的

Main 函数不能异步化

main() 做为程序的入口函数,它默认无法返回一个 Future(异步任务抽象单元),所以不能使用 async 关键字。我们将通过把代码封装到另一个函数中来解决此问题,这样就可以在 main()阻塞它(也就是等待函数真正执行完成)。异步函数被调用时会立即返回一个 Future 对象,此时函数内的任务可能还没有真正开始执行, 我们需要使用一个可以轮询 Future 的,比如pollster crate

有一些可以用来标注 main() 函数为异步,如 async-stdtokio。我选择不这样做,因为这两个包对咱们的项目来说都有点儿重了。当然,你可以使用你喜欢的任何异步包和设置。

rust
async fn run() {
-    // 离屏绘制代码...
-}
+}
+// 解除缓冲区映射
+output_buffer.unmap();

这个程序使用了 futures-intrusive,那也是 wgpu 的 demo 中使用的

Main 函数不能异步化

main() 做为程序的入口函数,它默认无法返回一个 Future(异步任务抽象单元),所以不能使用 async 关键字。我们将通过把代码封装到另一个函数中来解决此问题,这样就可以在 main()阻塞它(也就是等待函数真正执行完成)。异步函数被调用时会立即返回一个 Future 对象,此时函数内的任务可能还没有真正开始执行, 我们需要使用一个可以轮询 Future 的,比如pollster crate

有一些可以用来标注 main() 函数为异步,如 async-stdtokio。我选择不这样做,因为这两个包对咱们的项目来说都有点儿重了。当然,你可以使用你喜欢的任何异步包和设置。

rust
async fn run() {
+    // 离屏绘制代码...
+}
 
-fn main() {
-    pollster::block_on(run());
-}

现在运行代码,就会在项目根目录输出这样一张名为 image.png 的图像:

a brown triangle

- +fn main() { + pollster::block_on(run()); +}

现在运行代码,就会在项目根目录输出这样一张名为 image.png 的图像:

a brown triangle

+ \ No newline at end of file diff --git a/simuverse.html b/simuverse.html index 26c3bc94b..ef6c0f9be 100644 --- a/simuverse.html +++ b/simuverse.html @@ -5,19 +5,21 @@ 学习 wgpu | 学习 wgpu - + + - - - - - + + + + + - + + -
Skip to content
WASM 加载中...
- +
Skip to content
WASM 加载中...
+ \ No newline at end of file