From 75e57f5012a7a0a18a00294c601221cf413210ec Mon Sep 17 00:00:00 2001 From: shrekshao <5031596+shrekshao@users.noreply.github.com> Date: Mon, 27 Nov 2023 22:16:02 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20=20@=2078b9c?= =?UTF-8?q?c18a958dc0e8ff18494f1433b4509ba56d3=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 404.html | 4 ++-- .../samples/A-buffer.json | 0 .../samples/animometer.json | 0 .../samples/bitonicSort.json | 0 .../samples/cameras.json | 0 .../samples/computeBoids.json | 0 .../samples/cornell.json | 0 .../samples/cubemap.json | 0 .../samples/deferredRendering.json | 0 .../samples/fractalCube.json | 0 .../samples/gameOfLife.json | 0 .../samples/helloTriangle.json | 0 .../samples/helloTriangleMSAA.json | 0 .../samples/imageBlur.json | 0 .../samples/instancedCube.json | 0 .../samples/normalMap.json | 0 .../samples/particles.json | 0 .../samples/renderBundles.json | 0 .../samples/resizeCanvas.json | 0 .../samples/reversedZ.json | 0 .../samples/rotatingCube.json | 0 .../samples/samplerParameters.json | 0 .../samples/shadowMapping.json | 0 .../samples/texturedCube.json | 0 .../samples/twoCubes.json | 0 .../samples/videoUploading.json | 0 .../samples/videoUploadingWebCodecs.json | 0 .../samples/worker.json | 0 .../_buildManifest.js | 0 .../_ssgManifest.js | 0 _next/static/chunks/342.4bfb1b924854d81d.js | 1 - _next/static/chunks/342.cb68a51cc6637deb.js | 1 + _next/static/chunks/704.31a64194a8c6952d.js | 1 - _next/static/chunks/704.5ca5973fd6c78894.js | 1 + ...ebpack-af8ade16c1360358.js => webpack-21fda3d42cf1e62d.js} | 2 +- index.html | 2 +- samples/A-buffer.html | 4 ++-- samples/animometer.html | 2 +- samples/bitonicSort.html | 2 +- samples/cameras.html | 2 +- samples/computeBoids.html | 2 +- samples/cornell.html | 2 +- samples/cubemap.html | 2 +- samples/deferredRendering.html | 4 ++-- samples/fractalCube.html | 2 +- samples/gameOfLife.html | 2 +- samples/helloTriangle.html | 2 +- samples/helloTriangleMSAA.html | 2 +- samples/imageBlur.html | 2 +- samples/instancedCube.html | 2 +- samples/normalMap.html | 2 +- samples/particles.html | 2 +- samples/renderBundles.html | 4 ++-- samples/resizeCanvas.html | 2 +- samples/reversedZ.html | 4 ++-- samples/rotatingCube.html | 2 +- samples/samplerParameters.html | 2 +- samples/shadowMapping.html | 2 +- samples/texturedCube.html | 2 +- samples/twoCubes.html | 2 +- samples/videoUploading.html | 2 +- samples/videoUploadingWebCodecs.html | 2 +- samples/worker.html | 4 ++-- 63 files changed, 38 insertions(+), 38 deletions(-) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/A-buffer.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/animometer.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/bitonicSort.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/cameras.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/computeBoids.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/cornell.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/cubemap.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/deferredRendering.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/fractalCube.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/gameOfLife.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/helloTriangle.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/helloTriangleMSAA.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/imageBlur.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/instancedCube.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/normalMap.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/particles.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/renderBundles.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/resizeCanvas.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/reversedZ.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/rotatingCube.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/samplerParameters.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/shadowMapping.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/texturedCube.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/twoCubes.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/videoUploading.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/videoUploadingWebCodecs.json (100%) rename _next/data/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/samples/worker.json (100%) rename _next/static/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/_buildManifest.js (100%) rename _next/static/{ui8IQ4Jl_-GiewJ4GZre6 => bijUc7FuTasWzpIFR90RI}/_ssgManifest.js (100%) delete mode 100644 _next/static/chunks/342.4bfb1b924854d81d.js create mode 100644 _next/static/chunks/342.cb68a51cc6637deb.js delete mode 100644 _next/static/chunks/704.31a64194a8c6952d.js create mode 100644 _next/static/chunks/704.5ca5973fd6c78894.js rename _next/static/chunks/{webpack-af8ade16c1360358.js => webpack-21fda3d42cf1e62d.js} (97%) diff --git a/404.html b/404.html index 63f25ddf..c9ea630d 100644 --- a/404.html +++ b/404.html @@ -1,4 +1,4 @@ -404: This page could not be found

404

This page could not be found.

\ No newline at end of file + }

404

This page could not be found.

\ No newline at end of file diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/A-buffer.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/A-buffer.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/A-buffer.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/A-buffer.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/animometer.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/animometer.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/animometer.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/animometer.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/bitonicSort.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/bitonicSort.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/bitonicSort.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/bitonicSort.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cameras.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/cameras.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cameras.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/cameras.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/computeBoids.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/computeBoids.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/computeBoids.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/computeBoids.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cornell.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/cornell.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cornell.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/cornell.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cubemap.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/cubemap.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cubemap.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/cubemap.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/deferredRendering.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/deferredRendering.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/deferredRendering.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/deferredRendering.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/fractalCube.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/fractalCube.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/fractalCube.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/fractalCube.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/gameOfLife.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/gameOfLife.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/gameOfLife.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/gameOfLife.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangle.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/helloTriangle.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangle.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/helloTriangle.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangleMSAA.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/helloTriangleMSAA.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangleMSAA.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/helloTriangleMSAA.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/imageBlur.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/imageBlur.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/imageBlur.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/imageBlur.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/instancedCube.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/instancedCube.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/instancedCube.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/instancedCube.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/normalMap.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/normalMap.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/normalMap.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/normalMap.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/particles.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/particles.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/particles.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/particles.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/renderBundles.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/renderBundles.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/renderBundles.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/renderBundles.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/resizeCanvas.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/resizeCanvas.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/resizeCanvas.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/resizeCanvas.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/reversedZ.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/reversedZ.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/reversedZ.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/reversedZ.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/rotatingCube.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/rotatingCube.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/rotatingCube.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/rotatingCube.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/samplerParameters.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/samplerParameters.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/samplerParameters.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/samplerParameters.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/shadowMapping.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/shadowMapping.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/shadowMapping.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/shadowMapping.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/texturedCube.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/texturedCube.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/texturedCube.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/texturedCube.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/twoCubes.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/twoCubes.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/twoCubes.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/twoCubes.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploading.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/videoUploading.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploading.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/videoUploading.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploadingWebCodecs.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/videoUploadingWebCodecs.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploadingWebCodecs.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/videoUploadingWebCodecs.json diff --git a/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/worker.json b/_next/data/bijUc7FuTasWzpIFR90RI/samples/worker.json similarity index 100% rename from _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/worker.json rename to _next/data/bijUc7FuTasWzpIFR90RI/samples/worker.json diff --git a/_next/static/ui8IQ4Jl_-GiewJ4GZre6/_buildManifest.js b/_next/static/bijUc7FuTasWzpIFR90RI/_buildManifest.js similarity index 100% rename from _next/static/ui8IQ4Jl_-GiewJ4GZre6/_buildManifest.js rename to _next/static/bijUc7FuTasWzpIFR90RI/_buildManifest.js diff --git a/_next/static/ui8IQ4Jl_-GiewJ4GZre6/_ssgManifest.js b/_next/static/bijUc7FuTasWzpIFR90RI/_ssgManifest.js similarity index 100% rename from _next/static/ui8IQ4Jl_-GiewJ4GZre6/_ssgManifest.js rename to _next/static/bijUc7FuTasWzpIFR90RI/_ssgManifest.js diff --git a/_next/static/chunks/342.4bfb1b924854d81d.js b/_next/static/chunks/342.4bfb1b924854d81d.js deleted file mode 100644 index 0f986cfc..00000000 --- a/_next/static/chunks/342.4bfb1b924854d81d.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[342],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return f}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),u=t(9147),l=t.n(u);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),u=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),r=new n.GUI({autoPlace:!1});return r.domElement.style.position="relative",r.domElement.style.zIndex="1000",r}},[]),d=(0,s.useRef)(null),f=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),m=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[h,g]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?x(m[1]):x(a[0].name),c&&u.current)for(u.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();f&&d.current&&(f.dom.style.position="absolute",f.showPanel(1),d.current.appendChild(f.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:f});o instanceof Promise&&o.catch(e=>{console.error(e),g(e)})}catch(s){console.error(s),g(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),h?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(h)})]}):null]}),(0,r.jsxs)("div",{className:l().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:l().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":v==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function f(e,n){if(!e)throw Error(n)}},6888:function(e,n,t){"use strict";t.d(n,{W:function(){return i}});var r=t(6906),a=t(9385);let i={positions:r.m,triangles:r.g,normals:[],uvs:[]};i.normals=(0,a.b)(i.positions,i.triangles),i.uvs=(0,a.q)(i.positions,"xy"),i.triangles.push([i.positions.length,i.positions.length+2,i.positions.length+1],[i.positions.length,i.positions.length+1,i.positions.length+3]),i.positions.push([-100,20,-100],[100,20,100],[-100,20,100],[100,20,-100]),i.normals.push([0,1,0],[0,1,0],[0,1,0],[0,1,0]),i.uvs.push([0,0],[1,1],[0,1],[1,0])},9385:function(e,n,t){"use strict";t.d(n,{b:function(){return a},q:function(){return o}});var r=t(6416);function a(e,n){let t=e.map(()=>[0,0,0]);return n.forEach(n=>{let[a,i,o]=n,s=e[a],u=e[i],l=e[o],c=r.R3.subtract(u,s),d=r.R3.subtract(l,s);r.R3.normalize(c,c),r.R3.normalize(d,d);let f=r.R3.cross(c,d);r.R3.add(t[a],f,t[a]),r.R3.add(t[i],f,t[i]),r.R3.add(t[o],f,t[o])}),t.forEach(e=>{r.R3.normalize(e,e)}),t}let i={xy:[0,1],xz:[0,2],yz:[1,2]};function o(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"xy",t=i[n],r=e.map(()=>[0,0]),a=[1/0,1/0],o=[-1/0,-1/0];return e.forEach((e,n)=>{r[n][0]=e[t[0]],r[n][1]=e[t[1]],a[0]=Math.min(e[t[0]],a[0]),a[1]=Math.min(e[t[1]],a[1]),o[0]=Math.max(e[t[0]],o[0]),o[1]=Math.max(e[t[1]],o[1])}),r.forEach(e=>{e[0]=(e[0]-a[0])/(o[0]-a[0]),e[1]=(e[1]-a[1])/(o[1]-a[1])}),r}},2342:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return f}});var r=t(6416),a=t(5671),i=t(6888),o="struct Scene {\n lightViewProjMatrix: mat4x4,\n cameraViewProjMatrix: mat4x4,\n lightPos: vec3,\n}\n\nstruct Model {\n modelMatrix: mat4x4,\n}\n\n@group(0) @binding(0) var scene : Scene;\n@group(1) @binding(0) var model : Model;\n\n@vertex\nfn main(\n @location(0) position: vec3\n) -> @builtin(position) vec4 {\n return scene.lightViewProjMatrix * model.modelMatrix * vec4(position, 1.0);\n}\n",s="struct Scene {\n lightViewProjMatrix: mat4x4,\n cameraViewProjMatrix: mat4x4,\n lightPos: vec3,\n}\n\nstruct Model {\n modelMatrix: mat4x4,\n}\n\n@group(0) @binding(0) var scene : Scene;\n@group(1) @binding(0) var model : Model;\n\nstruct VertexOutput {\n @location(0) shadowPos: vec3,\n @location(1) fragPos: vec3,\n @location(2) fragNorm: vec3,\n\n @builtin(position) Position: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position: vec3,\n @location(1) normal: vec3\n) -> VertexOutput {\n var output : VertexOutput;\n\n // XY is in (-1, 1) space, Z is in (0, 1) space\n let posFromLight = scene.lightViewProjMatrix * model.modelMatrix * vec4(position, 1.0);\n\n // Convert XY to (0, 1)\n // Y is flipped because texture coords are Y-down.\n output.shadowPos = vec3(\n posFromLight.xy * vec2(0.5, -0.5) + vec2(0.5),\n posFromLight.z\n );\n\n output.Position = scene.cameraViewProjMatrix * model.modelMatrix * vec4(position, 1.0);\n output.fragPos = output.Position.xyz;\n output.fragNorm = normal;\n return output;\n}\n",u="override shadowDepthTextureSize: f32 = 1024.0;\n\nstruct Scene {\n lightViewProjMatrix : mat4x4,\n cameraViewProjMatrix : mat4x4,\n lightPos : vec3,\n}\n\n@group(0) @binding(0) var scene : Scene;\n@group(0) @binding(1) var shadowMap: texture_depth_2d;\n@group(0) @binding(2) var shadowSampler: sampler_comparison;\n\nstruct FragmentInput {\n @location(0) shadowPos : vec3,\n @location(1) fragPos : vec3,\n @location(2) fragNorm : vec3,\n}\n\nconst albedo = vec3(0.9);\nconst ambientFactor = 0.2;\n\n@fragment\nfn main(input : FragmentInput) -> @location(0) vec4 {\n // Percentage-closer filtering. Sample texels in the region\n // to smooth the result.\n var visibility = 0.0;\n let oneOverShadowDepthTextureSize = 1.0 / shadowDepthTextureSize;\n for (var y = -1; y <= 1; y++) {\n for (var x = -1; x <= 1; x++) {\n let offset = vec2(vec2(x, y)) * oneOverShadowDepthTextureSize;\n\n visibility += textureSampleCompare(\n shadowMap, shadowSampler,\n input.shadowPos.xy + offset, input.shadowPos.z - 0.007\n );\n }\n }\n visibility /= 9.0;\n\n let lambertFactor = max(dot(normalize(scene.lightPos - input.fragPos), input.fragNorm), 0.0);\n let lightingFactor = min(ambientFactor + visibility * lambertFactor, 1.0);\n\n return vec4(lightingFactor * albedo, 1.0);\n}\n",l="src/sample/shadowMapping/main.ts";let c=async e=>{let{canvas:n,pageState:t}=e,a=await navigator.gpu.requestAdapter(),l=await a.requestDevice();if(!t.active)return;let c=n.getContext("webgpu"),d=window.devicePixelRatio;n.width=n.clientWidth*d,n.height=n.clientHeight*d;let f=n.width/n.height,p=navigator.gpu.getPreferredCanvasFormat();c.configure({device:l,format:p,alphaMode:"premultiplied"});let m=l.createBuffer({size:6*i.W.positions.length*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});{let h=new Float32Array(m.getMappedRange());for(let g=0;g(0,a.Tl)({name:"Shadow Mapping",description:"This example shows how to sample from a depth texture to render shadows.",init:c,sources:[{name:l.substring(25),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport { mesh } from '../../meshes/stanfordDragon';\n\nimport vertexShadowWGSL from './vertexShadow.wgsl';\nimport vertexWGSL from './vertex.wgsl';\nimport fragmentWGSL from './fragment.wgsl';\n\nconst shadowDepthTextureSize = 1024;\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const aspect = canvas.width / canvas.height;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create the model vertex buffer.\n const vertexBuffer = device.createBuffer({\n size: mesh.positions.length * 3 * 2 * Float32Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Float32Array(vertexBuffer.getMappedRange());\n for (let i = 0; i < mesh.positions.length; ++i) {\n mapping.set(mesh.positions[i], 6 * i);\n mapping.set(mesh.normals[i], 6 * i + 3);\n }\n vertexBuffer.unmap();\n }\n\n // Create the model index buffer.\n const indexCount = mesh.triangles.length * 3;\n const indexBuffer = device.createBuffer({\n size: indexCount * Uint16Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Uint16Array(indexBuffer.getMappedRange());\n for (let i = 0; i < mesh.triangles.length; ++i) {\n mapping.set(mesh.triangles[i], 3 * i);\n }\n indexBuffer.unmap();\n }\n\n // Create the depth texture for rendering/sampling the shadow map.\n const shadowDepthTexture = device.createTexture({\n size: [shadowDepthTextureSize, shadowDepthTextureSize, 1],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'depth32float',\n });\n const shadowDepthTextureView = shadowDepthTexture.createView();\n\n // Create some common descriptors used for both the shadow pipeline\n // and the color rendering pipeline.\n const vertexBuffers: Iterable = [\n {\n arrayStride: Float32Array.BYTES_PER_ELEMENT * 6,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: Float32Array.BYTES_PER_ELEMENT * 3,\n format: 'float32x3',\n },\n ],\n },\n ];\n\n const primitive: GPUPrimitiveState = {\n topology: 'triangle-list',\n cullMode: 'back',\n };\n\n const uniformBufferBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.VERTEX,\n buffer: {\n type: 'uniform',\n },\n },\n ],\n });\n\n const shadowPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [\n uniformBufferBindGroupLayout,\n uniformBufferBindGroupLayout,\n ],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexShadowWGSL,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth32float',\n },\n primitive,\n });\n\n // Create a bind group layout which holds the scene uniforms and\n // the texture+sampler for depth. We create it manually because the WebPU\n // implementation doesn't infer this from the shader (yet).\n const bglForRender = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n buffer: {\n type: 'uniform',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'depth',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n sampler: {\n type: 'comparison',\n },\n },\n ],\n });\n\n const pipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [bglForRender, uniformBufferBindGroupLayout],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexWGSL,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n constants: {\n shadowDepthTextureSize,\n },\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus-stencil8',\n },\n primitive,\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus-stencil8',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n // view is acquired and set in render loop.\n view: undefined,\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n stencilClearValue: 0,\n stencilLoadOp: 'clear',\n stencilStoreOp: 'store',\n },\n };\n\n const modelUniformBuffer = device.createBuffer({\n size: 4 * 16, // 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneUniformBuffer = device.createBuffer({\n // Two 4x4 viewProj matrices,\n // one for the camera and one for the light.\n // Then a vec3 for the light position.\n // Rounded to the nearest multiple of 16.\n size: 2 * 4 * 16 + 4 * 4,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneBindGroupForShadow = device.createBindGroup({\n layout: uniformBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: sceneUniformBuffer,\n },\n },\n ],\n });\n\n const sceneBindGroupForRender = device.createBindGroup({\n layout: bglForRender,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: sceneUniformBuffer,\n },\n },\n {\n binding: 1,\n resource: shadowDepthTextureView,\n },\n {\n binding: 2,\n resource: device.createSampler({\n compare: 'less',\n }),\n },\n ],\n });\n\n const modelBindGroup = device.createBindGroup({\n layout: uniformBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: modelUniformBuffer,\n },\n },\n ],\n });\n\n const eyePosition = vec3.fromValues(0, 50, -100);\n const upVector = vec3.fromValues(0, 1, 0);\n const origin = vec3.fromValues(0, 0, 0);\n\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 2000.0\n );\n\n const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);\n\n const lightPosition = vec3.fromValues(50, 100, -100);\n const lightViewMatrix = mat4.lookAt(lightPosition, origin, upVector);\n const lightProjectionMatrix = mat4.create();\n {\n const left = -80;\n const right = 80;\n const bottom = -80;\n const top = 80;\n const near = -200;\n const far = 300;\n mat4.ortho(left, right, bottom, top, near, far, lightProjectionMatrix);\n }\n\n const lightViewProjMatrix = mat4.multiply(\n lightProjectionMatrix,\n lightViewMatrix\n );\n\n const viewProjMatrix = mat4.multiply(projectionMatrix, viewMatrix);\n\n // Move the model so it's centered.\n const modelMatrix = mat4.translation([0, -45, 0]);\n\n // The camera/light aren't moving, so write them into buffers now.\n {\n const lightMatrixData = lightViewProjMatrix as Float32Array;\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 0,\n lightMatrixData.buffer,\n lightMatrixData.byteOffset,\n lightMatrixData.byteLength\n );\n\n const cameraMatrixData = viewProjMatrix as Float32Array;\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 64,\n cameraMatrixData.buffer,\n cameraMatrixData.byteOffset,\n cameraMatrixData.byteLength\n );\n\n const lightData = lightPosition as Float32Array;\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 128,\n lightData.buffer,\n lightData.byteOffset,\n lightData.byteLength\n );\n\n const modelData = modelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 0,\n modelData.buffer,\n modelData.byteOffset,\n modelData.byteLength\n );\n }\n\n // Rotates the camera around the origin based on time.\n function getCameraViewProjMatrix() {\n const eyePosition = vec3.fromValues(0, 50, -100);\n\n const rad = Math.PI * (Date.now() / 2000);\n const rotation = mat4.rotateY(mat4.translation(origin), rad);\n vec3.transformMat4(eyePosition, rotation, eyePosition);\n\n const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);\n\n mat4.multiply(projectionMatrix, viewMatrix, viewProjMatrix);\n return viewProjMatrix as Float32Array;\n }\n\n const shadowPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [],\n depthStencilAttachment: {\n view: shadowDepthTextureView,\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const cameraViewProj = getCameraViewProjMatrix();\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 64,\n cameraViewProj.buffer,\n cameraViewProj.byteOffset,\n cameraViewProj.byteLength\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n {\n const shadowPass = commandEncoder.beginRenderPass(shadowPassDescriptor);\n shadowPass.setPipeline(shadowPipeline);\n shadowPass.setBindGroup(0, sceneBindGroupForShadow);\n shadowPass.setBindGroup(1, modelBindGroup);\n shadowPass.setVertexBuffer(0, vertexBuffer);\n shadowPass.setIndexBuffer(indexBuffer, 'uint16');\n shadowPass.drawIndexed(indexCount);\n\n shadowPass.end();\n }\n {\n const renderPass = commandEncoder.beginRenderPass(renderPassDescriptor);\n renderPass.setPipeline(pipeline);\n renderPass.setBindGroup(0, sceneBindGroupForRender);\n renderPass.setBindGroup(1, modelBindGroup);\n renderPass.setVertexBuffer(0, vertexBuffer);\n renderPass.setIndexBuffer(indexBuffer, 'uint16');\n renderPass.drawIndexed(indexCount);\n\n renderPass.end();\n }\n device.queue.submit([commandEncoder.finish()]);\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst ShadowMapping: () => JSX.Element = () =>\n makeSample({\n name: 'Shadow Mapping',\n description:\n 'This example shows how to sample from a depth texture to render shadows.',\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './vertexShadow.wgsl',\n contents: vertexShadowWGSL,\n editable: true,\n },\n {\n name: './vertex.wgsl',\n contents: vertexWGSL,\n editable: true,\n },\n {\n name: './fragment.wgsl',\n contents: fragmentWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default ShadowMapping;\n"},{name:"./vertexShadow.wgsl",contents:o,editable:!0},{name:"./vertex.wgsl",contents:s,editable:!0},{name:"./fragment.wgsl",contents:u,editable:!0}],filename:l});var f=d},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/342.cb68a51cc6637deb.js b/_next/static/chunks/342.cb68a51cc6637deb.js new file mode 100644 index 00000000..93aa72a8 --- /dev/null +++ b/_next/static/chunks/342.cb68a51cc6637deb.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[342],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return f}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),u=t(9147),l=t.n(u);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),u=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),r=new n.GUI({autoPlace:!1});return r.domElement.style.position="relative",r.domElement.style.zIndex="1000",r}},[]),d=(0,s.useRef)(null),f=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),m=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[h,g]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?x(m[1]):x(a[0].name),c&&u.current)for(u.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();f&&d.current&&(f.dom.style.position="absolute",f.showPanel(1),d.current.appendChild(f.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:f});o instanceof Promise&&o.catch(e=>{console.error(e),g(e)})}catch(s){console.error(s),g(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),h?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(h)})]}):null]}),(0,r.jsxs)("div",{className:l().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:l().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":v==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function f(e,n){if(!e)throw Error(n)}},6888:function(e,n,t){"use strict";t.d(n,{W:function(){return i}});var r=t(6906),a=t(9385);let i={positions:r.m,triangles:r.g,normals:[],uvs:[]};i.normals=(0,a.b)(i.positions,i.triangles),i.uvs=(0,a.q)(i.positions,"xy"),i.triangles.push([i.positions.length,i.positions.length+2,i.positions.length+1],[i.positions.length,i.positions.length+1,i.positions.length+3]),i.positions.push([-100,20,-100],[100,20,100],[-100,20,100],[100,20,-100]),i.normals.push([0,1,0],[0,1,0],[0,1,0],[0,1,0]),i.uvs.push([0,0],[1,1],[0,1],[1,0])},9385:function(e,n,t){"use strict";t.d(n,{b:function(){return a},q:function(){return o}});var r=t(6416);function a(e,n){let t=e.map(()=>[0,0,0]);return n.forEach(n=>{let[a,i,o]=n,s=e[a],u=e[i],l=e[o],c=r.R3.subtract(u,s),d=r.R3.subtract(l,s);r.R3.normalize(c,c),r.R3.normalize(d,d);let f=r.R3.cross(c,d);r.R3.add(t[a],f,t[a]),r.R3.add(t[i],f,t[i]),r.R3.add(t[o],f,t[o])}),t.forEach(e=>{r.R3.normalize(e,e)}),t}let i={xy:[0,1],xz:[0,2],yz:[1,2]};function o(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"xy",t=i[n],r=e.map(()=>[0,0]),a=[1/0,1/0],o=[-1/0,-1/0];return e.forEach((e,n)=>{r[n][0]=e[t[0]],r[n][1]=e[t[1]],a[0]=Math.min(e[t[0]],a[0]),a[1]=Math.min(e[t[1]],a[1]),o[0]=Math.max(e[t[0]],o[0]),o[1]=Math.max(e[t[1]],o[1])}),r.forEach(e=>{e[0]=(e[0]-a[0])/(o[0]-a[0]),e[1]=(e[1]-a[1])/(o[1]-a[1])}),r}},2342:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return f}});var r=t(6416),a=t(5671),i=t(6888),o="struct Scene {\n lightViewProjMatrix: mat4x4,\n cameraViewProjMatrix: mat4x4,\n lightPos: vec3,\n}\n\nstruct Model {\n modelMatrix: mat4x4,\n}\n\n@group(0) @binding(0) var scene : Scene;\n@group(1) @binding(0) var model : Model;\n\n@vertex\nfn main(\n @location(0) position: vec3\n) -> @builtin(position) vec4 {\n return scene.lightViewProjMatrix * model.modelMatrix * vec4(position, 1.0);\n}\n",s="struct Scene {\n lightViewProjMatrix: mat4x4,\n cameraViewProjMatrix: mat4x4,\n lightPos: vec3,\n}\n\nstruct Model {\n modelMatrix: mat4x4,\n}\n\n@group(0) @binding(0) var scene : Scene;\n@group(1) @binding(0) var model : Model;\n\nstruct VertexOutput {\n @location(0) shadowPos: vec3,\n @location(1) fragPos: vec3,\n @location(2) fragNorm: vec3,\n\n @builtin(position) Position: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position: vec3,\n @location(1) normal: vec3\n) -> VertexOutput {\n var output : VertexOutput;\n\n // XY is in (-1, 1) space, Z is in (0, 1) space\n let posFromLight = scene.lightViewProjMatrix * model.modelMatrix * vec4(position, 1.0);\n\n // Convert XY to (0, 1)\n // Y is flipped because texture coords are Y-down.\n output.shadowPos = vec3(\n posFromLight.xy * vec2(0.5, -0.5) + vec2(0.5),\n posFromLight.z\n );\n\n output.Position = scene.cameraViewProjMatrix * model.modelMatrix * vec4(position, 1.0);\n output.fragPos = output.Position.xyz;\n output.fragNorm = normal;\n return output;\n}\n",u="override shadowDepthTextureSize: f32 = 1024.0;\n\nstruct Scene {\n lightViewProjMatrix : mat4x4,\n cameraViewProjMatrix : mat4x4,\n lightPos : vec3,\n}\n\n@group(0) @binding(0) var scene : Scene;\n@group(0) @binding(1) var shadowMap: texture_depth_2d;\n@group(0) @binding(2) var shadowSampler: sampler_comparison;\n\nstruct FragmentInput {\n @location(0) shadowPos : vec3,\n @location(1) fragPos : vec3,\n @location(2) fragNorm : vec3,\n}\n\nconst albedo = vec3(0.9);\nconst ambientFactor = 0.2;\n\n@fragment\nfn main(input : FragmentInput) -> @location(0) vec4 {\n // Percentage-closer filtering. Sample texels in the region\n // to smooth the result.\n var visibility = 0.0;\n let oneOverShadowDepthTextureSize = 1.0 / shadowDepthTextureSize;\n for (var y = -1; y <= 1; y++) {\n for (var x = -1; x <= 1; x++) {\n let offset = vec2(vec2(x, y)) * oneOverShadowDepthTextureSize;\n\n visibility += textureSampleCompare(\n shadowMap, shadowSampler,\n input.shadowPos.xy + offset, input.shadowPos.z - 0.007\n );\n }\n }\n visibility /= 9.0;\n\n let lambertFactor = max(dot(normalize(scene.lightPos - input.fragPos), normalize(input.fragNorm)), 0.0);\n let lightingFactor = min(ambientFactor + visibility * lambertFactor, 1.0);\n\n return vec4(lightingFactor * albedo, 1.0);\n}\n",l="src/sample/shadowMapping/main.ts";let c=async e=>{let{canvas:n,pageState:t}=e,a=await navigator.gpu.requestAdapter(),l=await a.requestDevice();if(!t.active)return;let c=n.getContext("webgpu"),d=window.devicePixelRatio;n.width=n.clientWidth*d,n.height=n.clientHeight*d;let f=n.width/n.height,p=navigator.gpu.getPreferredCanvasFormat();c.configure({device:l,format:p,alphaMode:"premultiplied"});let m=l.createBuffer({size:6*i.W.positions.length*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});{let h=new Float32Array(m.getMappedRange());for(let g=0;g(0,a.Tl)({name:"Shadow Mapping",description:"This example shows how to sample from a depth texture to render shadows.",init:c,sources:[{name:l.substring(25),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport { mesh } from '../../meshes/stanfordDragon';\n\nimport vertexShadowWGSL from './vertexShadow.wgsl';\nimport vertexWGSL from './vertex.wgsl';\nimport fragmentWGSL from './fragment.wgsl';\n\nconst shadowDepthTextureSize = 1024;\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const aspect = canvas.width / canvas.height;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create the model vertex buffer.\n const vertexBuffer = device.createBuffer({\n size: mesh.positions.length * 3 * 2 * Float32Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Float32Array(vertexBuffer.getMappedRange());\n for (let i = 0; i < mesh.positions.length; ++i) {\n mapping.set(mesh.positions[i], 6 * i);\n mapping.set(mesh.normals[i], 6 * i + 3);\n }\n vertexBuffer.unmap();\n }\n\n // Create the model index buffer.\n const indexCount = mesh.triangles.length * 3;\n const indexBuffer = device.createBuffer({\n size: indexCount * Uint16Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Uint16Array(indexBuffer.getMappedRange());\n for (let i = 0; i < mesh.triangles.length; ++i) {\n mapping.set(mesh.triangles[i], 3 * i);\n }\n indexBuffer.unmap();\n }\n\n // Create the depth texture for rendering/sampling the shadow map.\n const shadowDepthTexture = device.createTexture({\n size: [shadowDepthTextureSize, shadowDepthTextureSize, 1],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'depth32float',\n });\n const shadowDepthTextureView = shadowDepthTexture.createView();\n\n // Create some common descriptors used for both the shadow pipeline\n // and the color rendering pipeline.\n const vertexBuffers: Iterable = [\n {\n arrayStride: Float32Array.BYTES_PER_ELEMENT * 6,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: Float32Array.BYTES_PER_ELEMENT * 3,\n format: 'float32x3',\n },\n ],\n },\n ];\n\n const primitive: GPUPrimitiveState = {\n topology: 'triangle-list',\n cullMode: 'back',\n };\n\n const uniformBufferBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.VERTEX,\n buffer: {\n type: 'uniform',\n },\n },\n ],\n });\n\n const shadowPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [\n uniformBufferBindGroupLayout,\n uniformBufferBindGroupLayout,\n ],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexShadowWGSL,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth32float',\n },\n primitive,\n });\n\n // Create a bind group layout which holds the scene uniforms and\n // the texture+sampler for depth. We create it manually because the WebPU\n // implementation doesn't infer this from the shader (yet).\n const bglForRender = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n buffer: {\n type: 'uniform',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'depth',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n sampler: {\n type: 'comparison',\n },\n },\n ],\n });\n\n const pipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [bglForRender, uniformBufferBindGroupLayout],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexWGSL,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n constants: {\n shadowDepthTextureSize,\n },\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus-stencil8',\n },\n primitive,\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus-stencil8',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n // view is acquired and set in render loop.\n view: undefined,\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n stencilClearValue: 0,\n stencilLoadOp: 'clear',\n stencilStoreOp: 'store',\n },\n };\n\n const modelUniformBuffer = device.createBuffer({\n size: 4 * 16, // 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneUniformBuffer = device.createBuffer({\n // Two 4x4 viewProj matrices,\n // one for the camera and one for the light.\n // Then a vec3 for the light position.\n // Rounded to the nearest multiple of 16.\n size: 2 * 4 * 16 + 4 * 4,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneBindGroupForShadow = device.createBindGroup({\n layout: uniformBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: sceneUniformBuffer,\n },\n },\n ],\n });\n\n const sceneBindGroupForRender = device.createBindGroup({\n layout: bglForRender,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: sceneUniformBuffer,\n },\n },\n {\n binding: 1,\n resource: shadowDepthTextureView,\n },\n {\n binding: 2,\n resource: device.createSampler({\n compare: 'less',\n }),\n },\n ],\n });\n\n const modelBindGroup = device.createBindGroup({\n layout: uniformBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: modelUniformBuffer,\n },\n },\n ],\n });\n\n const eyePosition = vec3.fromValues(0, 50, -100);\n const upVector = vec3.fromValues(0, 1, 0);\n const origin = vec3.fromValues(0, 0, 0);\n\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 2000.0\n );\n\n const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);\n\n const lightPosition = vec3.fromValues(50, 100, -100);\n const lightViewMatrix = mat4.lookAt(lightPosition, origin, upVector);\n const lightProjectionMatrix = mat4.create();\n {\n const left = -80;\n const right = 80;\n const bottom = -80;\n const top = 80;\n const near = -200;\n const far = 300;\n mat4.ortho(left, right, bottom, top, near, far, lightProjectionMatrix);\n }\n\n const lightViewProjMatrix = mat4.multiply(\n lightProjectionMatrix,\n lightViewMatrix\n );\n\n const viewProjMatrix = mat4.multiply(projectionMatrix, viewMatrix);\n\n // Move the model so it's centered.\n const modelMatrix = mat4.translation([0, -45, 0]);\n\n // The camera/light aren't moving, so write them into buffers now.\n {\n const lightMatrixData = lightViewProjMatrix as Float32Array;\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 0,\n lightMatrixData.buffer,\n lightMatrixData.byteOffset,\n lightMatrixData.byteLength\n );\n\n const cameraMatrixData = viewProjMatrix as Float32Array;\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 64,\n cameraMatrixData.buffer,\n cameraMatrixData.byteOffset,\n cameraMatrixData.byteLength\n );\n\n const lightData = lightPosition as Float32Array;\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 128,\n lightData.buffer,\n lightData.byteOffset,\n lightData.byteLength\n );\n\n const modelData = modelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 0,\n modelData.buffer,\n modelData.byteOffset,\n modelData.byteLength\n );\n }\n\n // Rotates the camera around the origin based on time.\n function getCameraViewProjMatrix() {\n const eyePosition = vec3.fromValues(0, 50, -100);\n\n const rad = Math.PI * (Date.now() / 2000);\n const rotation = mat4.rotateY(mat4.translation(origin), rad);\n vec3.transformMat4(eyePosition, rotation, eyePosition);\n\n const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);\n\n mat4.multiply(projectionMatrix, viewMatrix, viewProjMatrix);\n return viewProjMatrix as Float32Array;\n }\n\n const shadowPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [],\n depthStencilAttachment: {\n view: shadowDepthTextureView,\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const cameraViewProj = getCameraViewProjMatrix();\n device.queue.writeBuffer(\n sceneUniformBuffer,\n 64,\n cameraViewProj.buffer,\n cameraViewProj.byteOffset,\n cameraViewProj.byteLength\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n {\n const shadowPass = commandEncoder.beginRenderPass(shadowPassDescriptor);\n shadowPass.setPipeline(shadowPipeline);\n shadowPass.setBindGroup(0, sceneBindGroupForShadow);\n shadowPass.setBindGroup(1, modelBindGroup);\n shadowPass.setVertexBuffer(0, vertexBuffer);\n shadowPass.setIndexBuffer(indexBuffer, 'uint16');\n shadowPass.drawIndexed(indexCount);\n\n shadowPass.end();\n }\n {\n const renderPass = commandEncoder.beginRenderPass(renderPassDescriptor);\n renderPass.setPipeline(pipeline);\n renderPass.setBindGroup(0, sceneBindGroupForRender);\n renderPass.setBindGroup(1, modelBindGroup);\n renderPass.setVertexBuffer(0, vertexBuffer);\n renderPass.setIndexBuffer(indexBuffer, 'uint16');\n renderPass.drawIndexed(indexCount);\n\n renderPass.end();\n }\n device.queue.submit([commandEncoder.finish()]);\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst ShadowMapping: () => JSX.Element = () =>\n makeSample({\n name: 'Shadow Mapping',\n description:\n 'This example shows how to sample from a depth texture to render shadows.',\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './vertexShadow.wgsl',\n contents: vertexShadowWGSL,\n editable: true,\n },\n {\n name: './vertex.wgsl',\n contents: vertexWGSL,\n editable: true,\n },\n {\n name: './fragment.wgsl',\n contents: fragmentWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default ShadowMapping;\n"},{name:"./vertexShadow.wgsl",contents:o,editable:!0},{name:"./vertex.wgsl",contents:s,editable:!0},{name:"./fragment.wgsl",contents:u,editable:!0}],filename:l});var f=d},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/704.31a64194a8c6952d.js b/_next/static/chunks/704.31a64194a8c6952d.js deleted file mode 100644 index 2423cfeb..00000000 --- a/_next/static/chunks/704.31a64194a8c6952d.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[704],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return l},hu:function(){return c}});var r=t(5893),i=t(9008),a=t.n(i),o=t(1163),s=t(7294),u=t(9147),f=t.n(u);t(7319);let d=e=>{let n=(0,s.useRef)(null),i=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:i}=e;return{name:n,...function(e){let n;let i=null;{i=document.createElement("div");let a=t(4631);n=a(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){i&&t&&(t.appendChild(i),n.setOption("value",e))}})})}}}(i)}}),e.sources),u=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),r=new n.GUI({autoPlace:!1});return r.domElement.style.position="relative",r.domElement.style.zIndex="1000",r}},[]),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),g=(0,o.useRouter)(),m=g.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[p,h]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?x(m[1]):x(i[0].name),d&&u.current)for(u.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();c&&l.current&&(c.dom.style.position="absolute",c.showPanel(1),l.current.appendChild(c.dom));let t={active:!0},r=()=>{t.active=!1};try{let a=n.current;if(!a)throw Error("The canvas is not available");let o=e.init({canvas:a,pageState:t,gui:d,stats:c});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(a(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),p?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(p)})]}):null]}),(0,r.jsxs)("div",{className:f().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:l}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:f().sourceFileNav,children:(0,r.jsx)("ul",{children:i.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),i.map((e,n)=>(0,r.jsx)(e.Container,{className:f().sourceFileContainer,"data-active":v==e.name},n))]})]})},l=e=>(0,r.jsx)(d,{...e});function c(e,n){if(!e)throw Error(n)}},6888:function(e,n,t){"use strict";t.d(n,{W:function(){return a}});var r=t(6906),i=t(9385);let a={positions:r.m,triangles:r.g,normals:[],uvs:[]};a.normals=(0,i.b)(a.positions,a.triangles),a.uvs=(0,i.q)(a.positions,"xy"),a.triangles.push([a.positions.length,a.positions.length+2,a.positions.length+1],[a.positions.length,a.positions.length+1,a.positions.length+3]),a.positions.push([-100,20,-100],[100,20,100],[-100,20,100],[100,20,-100]),a.normals.push([0,1,0],[0,1,0],[0,1,0],[0,1,0]),a.uvs.push([0,0],[1,1],[0,1],[1,0])},9385:function(e,n,t){"use strict";t.d(n,{b:function(){return i},q:function(){return o}});var r=t(6416);function i(e,n){let t=e.map(()=>[0,0,0]);return n.forEach(n=>{let[i,a,o]=n,s=e[i],u=e[a],f=e[o],d=r.R3.subtract(u,s),l=r.R3.subtract(f,s);r.R3.normalize(d,d),r.R3.normalize(l,l);let c=r.R3.cross(d,l);r.R3.add(t[i],c,t[i]),r.R3.add(t[a],c,t[a]),r.R3.add(t[o],c,t[o])}),t.forEach(e=>{r.R3.normalize(e,e)}),t}let a={xy:[0,1],xz:[0,2],yz:[1,2]};function o(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"xy",t=a[n],r=e.map(()=>[0,0]),i=[1/0,1/0],o=[-1/0,-1/0];return e.forEach((e,n)=>{r[n][0]=e[t[0]],r[n][1]=e[t[1]],i[0]=Math.min(e[t[0]],i[0]),i[1]=Math.min(e[t[1]],i[1]),o[0]=Math.max(e[t[0]],o[0]),o[1]=Math.max(e[t[1]],o[1])}),r.forEach(e=>{e[0]=(e[0]-i[0])/(o[0]-i[0]),e[1]=(e[1]-i[1])/(o[1]-i[1])}),r}},9704:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return v}});var r=t(5671),i=t(6416),a=t(6888),o="struct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(0) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\n@group(0) @binding(1) var config: Config;\n\nstruct LightExtent {\n min : vec4,\n max : vec4,\n}\n@group(0) @binding(2) var lightExtent: LightExtent;\n\n@compute @workgroup_size(64, 1, 1)\nfn main(@builtin(global_invocation_id) GlobalInvocationID : vec3) {\n var index = GlobalInvocationID.x;\n if (index >= config.numLights) {\n return;\n }\n\n lightsBuffer.lights[index].position.y = lightsBuffer.lights[index].position.y - 0.5 - 0.003 * (f32(index) - 64.0 * floor(f32(index) / 64.0));\n\n if (lightsBuffer.lights[index].position.y < lightExtent.min.y) {\n lightsBuffer.lights[index].position.y = lightExtent.max.y;\n }\n}\n",s="struct Uniforms {\n modelMatrix : mat4x4,\n normalModelMatrix : mat4x4,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(0) @binding(0) var uniforms : Uniforms;\n@group(0) @binding(1) var camera : Camera;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragNormal: vec3, // normal in world space\n @location(1) fragUV: vec2,\n}\n\n@vertex\nfn main(\n @location(0) position : vec3,\n @location(1) normal : vec3,\n @location(2) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n let worldPosition = (uniforms.modelMatrix * vec4(position, 1.0)).xyz;\n output.Position = camera.viewProjectionMatrix * vec4(worldPosition, 1.0);\n output.fragNormal = normalize((uniforms.normalModelMatrix * vec4(normal, 1.0)).xyz);\n output.fragUV = uv;\n return output;\n}\n",u="struct GBufferOutput {\n @location(0) normal : vec4,\n\n // Textures: diffuse color, specular color, smoothness, emissive etc. could go here\n @location(1) albedo : vec4,\n}\n\n@fragment\nfn main(\n @location(0) fragNormal: vec3,\n @location(1) fragUV : vec2\n) -> GBufferOutput {\n // faking some kind of checkerboard texture\n let uv = floor(30.0 * fragUV);\n let c = 0.2 + 0.5 * ((uv.x + uv.y) - 2.0 * floor((uv.x + uv.y) / 2.0));\n\n var output : GBufferOutput;\n output.normal = vec4(fragNormal, 1.0);\n output.albedo = vec4(c, c, c, 1.0);\n\n return output;\n}\n",f="@vertex\nfn main(\n @builtin(vertex_index) VertexIndex : u32\n) -> @builtin(position) vec4 {\n const pos = array(\n vec2(-1.0, -1.0), vec2(1.0, -1.0), vec2(-1.0, 1.0),\n vec2(-1.0, 1.0), vec2(1.0, -1.0), vec2(1.0, 1.0),\n );\n\n return vec4(pos[VertexIndex], 0.0, 1.0);\n}\n",d="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\noverride canvasSizeWidth: f32;\noverride canvasSizeHeight: f32;\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec4;\n let c = coord.xy / vec2(canvasSizeWidth, canvasSizeHeight);\n if (c.x < 0.33333) {\n let rawDepth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n // remap depth into something a bit more visible\n let depth = (1.0 - rawDepth) * 50.0;\n result = vec4(depth);\n } else if (c.x < 0.66667) {\n result = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n );\n result.x = (result.x + 1.0) * 0.5;\n result.y = (result.y + 1.0) * 0.5;\n result.z = (result.z + 1.0) * 0.5;\n } else {\n result = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n );\n }\n return result;\n}\n",l="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\nstruct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(1) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(1) @binding(1) var config: Config;\n@group(1) @binding(2) var camera: Camera;\n\nfn world_from_screen_coord(coord : vec2, depth_sample: f32) -> vec3 {\n // reconstruct world-space position from the screen coordinate.\n let posClip = vec4(coord.x * 2.0 - 1.0, (1.0 - coord.y) * 2.0 - 1.0, depth_sample, 1.0);\n let posWorldW = camera.invViewProjectionMatrix * posClip;\n let posWorld = posWorldW.xyz / posWorldW.www;\n return posWorld;\n}\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec3;\n\n let depth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n\n // Don't light the sky.\n if (depth >= 1.0) {\n discard;\n }\n\n let bufferSize = textureDimensions(gBufferDepth);\n let coordUV = coord.xy / vec2(bufferSize);\n let position = world_from_screen_coord(coordUV, depth);\n\n let normal = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n ).xyz;\n\n let albedo = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n ).rgb;\n\n for (var i = 0u; i < config.numLights; i++) {\n let L = lightsBuffer.lights[i].position.xyz - position;\n let distance = length(L);\n if (distance > lightsBuffer.lights[i].radius) {\n continue;\n }\n let lambert = max(dot(normal, normalize(L)), 0.0);\n result += vec3(\n lambert * pow(1.0 - distance / lightsBuffer.lights[i].radius, 2.0) * lightsBuffer.lights[i].color * albedo\n );\n }\n\n // some manual ambient\n result += vec3(0.2);\n\n return vec4(result, 1.0);\n}\n",c="src/sample/deferredRendering/main.ts";let g=i.R3.fromValues(-50,-30,-50),m=i.R3.fromValues(50,50,50),p=async e=>{let{canvas:n,pageState:t,gui:r}=e,c=await navigator.gpu.requestAdapter(),p=await c.requestDevice();if(!t.active)return;let h=n.getContext("webgpu"),v=window.devicePixelRatio;n.width=n.clientWidth*v,n.height=n.clientHeight*v;let x=n.width/n.height,b=navigator.gpu.getPreferredCanvasFormat();h.configure({device:p,format:b,alphaMode:"premultiplied"});let B=p.createBuffer({size:8*a.W.positions.length*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});{let P=new Float32Array(B.getMappedRange());for(let y=0;y{let e=p.createBuffer({size:Uint32Array.BYTES_PER_ELEMENT,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});return new Uint32Array(e.getMappedRange())[0]=z.numLights,e.unmap(),e})();r.add(z,"mode",["rendering","gBuffers view"]),r.add(z,"numLights",1,1024).step(1).onChange(()=>{p.queue.writeBuffer(I,0,new Uint32Array([z.numLights]))});let W=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),j=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),k=p.createBindGroup({layout:V.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:W}},{binding:1,resource:{buffer:j}}]}),q=p.createBindGroup({layout:D,entries:[{binding:0,resource:S[0]},{binding:1,resource:S[1]},{binding:2,resource:S[2]}]}),Y=i.R3.sub(m,g),H=8192*Float32Array.BYTES_PER_ELEMENT,Q=p.createBuffer({size:H,usage:GPUBufferUsage.STORAGE,mappedAtCreation:!0}),X=new Float32Array(Q.getMappedRange()),J=i.vh.create(),Z=0;for(let K=0;K<1024;K++){Z=8*K;for(let $=0;$<3;$++)J[$]=Math.random()*Y[$]+g[$];J[3]=1,X.set(J,Z),J[0]=2*Math.random(),J[1]=2*Math.random(),J[2]=2*Math.random(),J[3]=20,X.set(J,Z+4)}Q.unmap();let ee=p.createBuffer({size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),en=new Float32Array(8);en.set(g,0),en.set(m,4),p.queue.writeBuffer(ee,0,en.buffer,en.byteOffset,en.byteLength);let et=p.createComputePipeline({layout:"auto",compute:{module:p.createShaderModule({code:o}),entryPoint:"main"}}),er=p.createBindGroup({layout:A,entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:j}}]}),ei=p.createBindGroup({layout:et.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:ee}}]}),ea=i.R3.fromValues(0,50,-100),eo=i.R3.fromValues(0,1,0),es=i.R3.fromValues(0,0,0),eu=i._E.perspective(2*Math.PI/5,x,1,2e3),ef=i._E.translation([0,-45,0]);p.queue.writeBuffer(W,0,ef.buffer,ef.byteOffset,ef.byteLength);let ed=i._E.invert(ef);i._E.transpose(ed,ed),p.queue.writeBuffer(W,64,ed.buffer,ed.byteOffset,ed.byteLength),requestAnimationFrame(function e(){if(!t.active)return;let n=function(){let e=Math.PI*(Date.now()/5e3),n=i._E.rotateY(i._E.translation(es),e),t=i.R3.transformMat4(ea,n),r=i._E.lookAt(t,es,eo);return i._E.multiply(eu,r)}();p.queue.writeBuffer(j,0,n.buffer,n.byteOffset,n.byteLength);let r=i._E.invert(n);p.queue.writeBuffer(j,64,r.buffer,r.byteOffset,r.byteLength);let a=p.createCommandEncoder();{let o=a.beginRenderPass(F);o.setPipeline(V),o.setBindGroup(0,k),o.setVertexBuffer(0,B),o.setIndexBuffer(E,"uint16"),o.drawIndexed(w),o.end()}{let s=a.beginComputePass();s.setPipeline(et),s.setBindGroup(0,ei),s.dispatchWorkgroups(Math.ceil(16)),s.end()}if("gBuffers view"===z.mode){O.colorAttachments[0].view=h.getCurrentTexture().createView();let u=a.beginRenderPass(O);u.setPipeline(C),u.setBindGroup(0,q),u.draw(6),u.end()}else{O.colorAttachments[0].view=h.getCurrentTexture().createView();let f=a.beginRenderPass(O);f.setPipeline(N),f.setBindGroup(0,q),f.setBindGroup(1,er),f.draw(6),f.end()}p.queue.submit([a.finish()]),requestAnimationFrame(e)})},h=()=>(0,r.Tl)({name:"Deferred Rendering",description:"This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n ",gui:!0,init:p,sources:[{name:c.substring(29),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\nimport { mat4, vec3, vec4 } from 'wgpu-matrix';\nimport { mesh } from '../../meshes/stanfordDragon';\n\nimport lightUpdate from './lightUpdate.wgsl';\nimport vertexWriteGBuffers from './vertexWriteGBuffers.wgsl';\nimport fragmentWriteGBuffers from './fragmentWriteGBuffers.wgsl';\nimport vertexTextureQuad from './vertexTextureQuad.wgsl';\nimport fragmentGBuffersDebugView from './fragmentGBuffersDebugView.wgsl';\nimport fragmentDeferredRendering from './fragmentDeferredRendering.wgsl';\n\nconst kMaxNumLights = 1024;\nconst lightExtentMin = vec3.fromValues(-50, -30, -50);\nconst lightExtentMax = vec3.fromValues(50, 50, 50);\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const aspect = canvas.width / canvas.height;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create the model vertex buffer.\n const kVertexStride = 8;\n const vertexBuffer = device.createBuffer({\n // position: vec3, normal: vec3, uv: vec2\n size:\n mesh.positions.length * kVertexStride * Float32Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Float32Array(vertexBuffer.getMappedRange());\n for (let i = 0; i < mesh.positions.length; ++i) {\n mapping.set(mesh.positions[i], kVertexStride * i);\n mapping.set(mesh.normals[i], kVertexStride * i + 3);\n mapping.set(mesh.uvs[i], kVertexStride * i + 6);\n }\n vertexBuffer.unmap();\n }\n\n // Create the model index buffer.\n const indexCount = mesh.triangles.length * 3;\n const indexBuffer = device.createBuffer({\n size: indexCount * Uint16Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Uint16Array(indexBuffer.getMappedRange());\n for (let i = 0; i < mesh.triangles.length; ++i) {\n mapping.set(mesh.triangles[i], 3 * i);\n }\n indexBuffer.unmap();\n }\n\n // GBuffer texture render targets\n const gBufferTexture2DFloat16 = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'rgba16float',\n });\n const gBufferTextureAlbedo = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'bgra8unorm',\n });\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n });\n\n const gBufferTextureViews = [\n gBufferTexture2DFloat16.createView(),\n gBufferTextureAlbedo.createView(),\n depthTexture.createView(),\n ];\n\n const vertexBuffers: Iterable = [\n {\n arrayStride: Float32Array.BYTES_PER_ELEMENT * 8,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: Float32Array.BYTES_PER_ELEMENT * 3,\n format: 'float32x3',\n },\n {\n // uv\n shaderLocation: 2,\n offset: Float32Array.BYTES_PER_ELEMENT * 6,\n format: 'float32x2',\n },\n ],\n },\n ];\n\n const primitive: GPUPrimitiveState = {\n topology: 'triangle-list',\n cullMode: 'back',\n };\n\n const writeGBuffersPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: vertexWriteGBuffers,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentWriteGBuffers,\n }),\n entryPoint: 'main',\n targets: [\n // normal\n { format: 'rgba16float' },\n // albedo\n { format: 'bgra8unorm' },\n ],\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n primitive,\n });\n\n const gBufferTexturesBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'depth',\n },\n },\n ],\n });\n\n const lightsBufferBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'read-only-storage',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'uniform',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n buffer: {\n type: 'uniform',\n },\n },\n ],\n });\n\n const gBuffersDebugViewPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [gBufferTexturesBindGroupLayout],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentGBuffersDebugView,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n constants: {\n canvasSizeWidth: canvas.width,\n canvasSizeHeight: canvas.height,\n },\n },\n primitive,\n });\n\n const deferredRenderPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [\n gBufferTexturesBindGroupLayout,\n lightsBufferBindGroupLayout,\n ],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentDeferredRendering,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive,\n });\n\n const writeGBufferPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: gBufferTextureViews[0],\n\n clearValue: { r: 0.0, g: 0.0, b: 1.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n {\n view: gBufferTextureViews[1],\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const textureQuadPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n // view is acquired and set in render loop.\n view: undefined,\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const settings = {\n mode: 'rendering',\n numLights: 128,\n };\n const configUniformBuffer = (() => {\n const buffer = device.createBuffer({\n size: Uint32Array.BYTES_PER_ELEMENT,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n new Uint32Array(buffer.getMappedRange())[0] = settings.numLights;\n buffer.unmap();\n return buffer;\n })();\n\n gui.add(settings, 'mode', ['rendering', 'gBuffers view']);\n gui\n .add(settings, 'numLights', 1, kMaxNumLights)\n .step(1)\n .onChange(() => {\n device.queue.writeBuffer(\n configUniformBuffer,\n 0,\n new Uint32Array([settings.numLights])\n );\n });\n\n const modelUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const cameraUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneUniformBindGroup = device.createBindGroup({\n layout: writeGBuffersPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: modelUniformBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n\n const gBufferTexturesBindGroup = device.createBindGroup({\n layout: gBufferTexturesBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: gBufferTextureViews[0],\n },\n {\n binding: 1,\n resource: gBufferTextureViews[1],\n },\n {\n binding: 2,\n resource: gBufferTextureViews[2],\n },\n ],\n });\n\n // Lights data are uploaded in a storage buffer\n // which could be updated/culled/etc. with a compute shader\n const extent = vec3.sub(lightExtentMax, lightExtentMin);\n const lightDataStride = 8;\n const bufferSizeInByte =\n Float32Array.BYTES_PER_ELEMENT * lightDataStride * kMaxNumLights;\n const lightsBuffer = device.createBuffer({\n size: bufferSizeInByte,\n usage: GPUBufferUsage.STORAGE,\n mappedAtCreation: true,\n });\n\n // We randomaly populate lights randomly in a box range\n // And simply move them along y-axis per frame to show they are\n // dynamic lightings\n const lightData = new Float32Array(lightsBuffer.getMappedRange());\n const tmpVec4 = vec4.create();\n let offset = 0;\n for (let i = 0; i < kMaxNumLights; i++) {\n offset = lightDataStride * i;\n // position\n for (let i = 0; i < 3; i++) {\n tmpVec4[i] = Math.random() * extent[i] + lightExtentMin[i];\n }\n tmpVec4[3] = 1;\n lightData.set(tmpVec4, offset);\n // color\n tmpVec4[0] = Math.random() * 2;\n tmpVec4[1] = Math.random() * 2;\n tmpVec4[2] = Math.random() * 2;\n // radius\n tmpVec4[3] = 20.0;\n lightData.set(tmpVec4, offset + 4);\n }\n lightsBuffer.unmap();\n\n const lightExtentBuffer = device.createBuffer({\n size: 4 * 8,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const lightExtentData = new Float32Array(8);\n lightExtentData.set(lightExtentMin, 0);\n lightExtentData.set(lightExtentMax, 4);\n device.queue.writeBuffer(\n lightExtentBuffer,\n 0,\n lightExtentData.buffer,\n lightExtentData.byteOffset,\n lightExtentData.byteLength\n );\n\n const lightUpdateComputePipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: lightUpdate,\n }),\n entryPoint: 'main',\n },\n });\n const lightsBufferBindGroup = device.createBindGroup({\n layout: lightsBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n const lightsBufferComputeBindGroup = device.createBindGroup({\n layout: lightUpdateComputePipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: lightExtentBuffer,\n },\n },\n ],\n });\n //--------------------\n\n // Scene matrices\n const eyePosition = vec3.fromValues(0, 50, -100);\n const upVector = vec3.fromValues(0, 1, 0);\n const origin = vec3.fromValues(0, 0, 0);\n\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 2000.0\n );\n\n // Move the model so it's centered.\n const modelMatrix = mat4.translation([0, -45, 0]);\n\n const modelData = modelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 0,\n modelData.buffer,\n modelData.byteOffset,\n modelData.byteLength\n );\n const invertTransposeModelMatrix = mat4.invert(modelMatrix);\n mat4.transpose(invertTransposeModelMatrix, invertTransposeModelMatrix);\n const normalModelData = invertTransposeModelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 64,\n normalModelData.buffer,\n normalModelData.byteOffset,\n normalModelData.byteLength\n );\n\n // Rotates the camera around the origin based on time.\n function getCameraViewProjMatrix() {\n const rad = Math.PI * (Date.now() / 5000);\n const rotation = mat4.rotateY(mat4.translation(origin), rad);\n const rotatedEyePosition = vec3.transformMat4(eyePosition, rotation);\n\n const viewMatrix = mat4.lookAt(rotatedEyePosition, origin, upVector);\n\n return mat4.multiply(projectionMatrix, viewMatrix) as Float32Array;\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const cameraViewProj = getCameraViewProjMatrix();\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 0,\n cameraViewProj.buffer,\n cameraViewProj.byteOffset,\n cameraViewProj.byteLength\n );\n const cameraInvViewProj = mat4.invert(cameraViewProj) as Float32Array;\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 64,\n cameraInvViewProj.buffer,\n cameraInvViewProj.byteOffset,\n cameraInvViewProj.byteLength\n );\n\n const commandEncoder = device.createCommandEncoder();\n {\n // Write position, normal, albedo etc. data to gBuffers\n const gBufferPass = commandEncoder.beginRenderPass(\n writeGBufferPassDescriptor\n );\n gBufferPass.setPipeline(writeGBuffersPipeline);\n gBufferPass.setBindGroup(0, sceneUniformBindGroup);\n gBufferPass.setVertexBuffer(0, vertexBuffer);\n gBufferPass.setIndexBuffer(indexBuffer, 'uint16');\n gBufferPass.drawIndexed(indexCount);\n gBufferPass.end();\n }\n {\n // Update lights position\n const lightPass = commandEncoder.beginComputePass();\n lightPass.setPipeline(lightUpdateComputePipeline);\n lightPass.setBindGroup(0, lightsBufferComputeBindGroup);\n lightPass.dispatchWorkgroups(Math.ceil(kMaxNumLights / 64));\n lightPass.end();\n }\n {\n if (settings.mode === 'gBuffers view') {\n // GBuffers debug view\n // Left: depth\n // Middle: normal\n // Right: albedo (use uv to mimic a checkerboard texture)\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const debugViewPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n debugViewPass.setPipeline(gBuffersDebugViewPipeline);\n debugViewPass.setBindGroup(0, gBufferTexturesBindGroup);\n debugViewPass.draw(6);\n debugViewPass.end();\n } else {\n // Deferred rendering\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const deferredRenderingPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n deferredRenderingPass.setPipeline(deferredRenderPipeline);\n deferredRenderingPass.setBindGroup(0, gBufferTexturesBindGroup);\n deferredRenderingPass.setBindGroup(1, lightsBufferBindGroup);\n deferredRenderingPass.draw(6);\n deferredRenderingPass.end();\n }\n }\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst DeferredRendering: () => JSX.Element = () =>\n makeSample({\n name: 'Deferred Rendering',\n description: `This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n `,\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: 'vertexWriteGBuffers.wgsl',\n contents: vertexWriteGBuffers,\n editable: true,\n },\n {\n name: 'fragmentWriteGBuffers.wgsl',\n contents: fragmentWriteGBuffers,\n editable: true,\n },\n {\n name: 'vertexTextureQuad.wgsl',\n contents: vertexTextureQuad,\n editable: true,\n },\n {\n name: 'fragmentGBuffersDebugView.wgsl',\n contents: fragmentGBuffersDebugView,\n editable: true,\n },\n {\n name: 'fragmentDeferredRendering.wgsl',\n contents: fragmentDeferredRendering,\n editable: true,\n },\n {\n name: 'lightUpdate.wgsl',\n contents: lightUpdate,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default DeferredRendering;\n"},{name:"vertexWriteGBuffers.wgsl",contents:s,editable:!0},{name:"fragmentWriteGBuffers.wgsl",contents:u,editable:!0},{name:"vertexTextureQuad.wgsl",contents:f,editable:!0},{name:"fragmentGBuffersDebugView.wgsl",contents:d,editable:!0},{name:"fragmentDeferredRendering.wgsl",contents:l,editable:!0},{name:"lightUpdate.wgsl",contents:o,editable:!0}],filename:c});var v=h},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/704.5ca5973fd6c78894.js b/_next/static/chunks/704.5ca5973fd6c78894.js new file mode 100644 index 00000000..426c63e2 --- /dev/null +++ b/_next/static/chunks/704.5ca5973fd6c78894.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[704],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return l},hu:function(){return c}});var r=t(5893),i=t(9008),a=t.n(i),o=t(1163),s=t(7294),u=t(9147),f=t.n(u);t(7319);let d=e=>{let n=(0,s.useRef)(null),i=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:i}=e;return{name:n,...function(e){let n;let i=null;{i=document.createElement("div");let a=t(4631);n=a(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){i&&t&&(t.appendChild(i),n.setOption("value",e))}})})}}}(i)}}),e.sources),u=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),r=new n.GUI({autoPlace:!1});return r.domElement.style.position="relative",r.domElement.style.zIndex="1000",r}},[]),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),g=(0,o.useRouter)(),m=g.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[p,h]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?x(m[1]):x(i[0].name),d&&u.current)for(u.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();c&&l.current&&(c.dom.style.position="absolute",c.showPanel(1),l.current.appendChild(c.dom));let t={active:!0},r=()=>{t.active=!1};try{let a=n.current;if(!a)throw Error("The canvas is not available");let o=e.init({canvas:a,pageState:t,gui:d,stats:c});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(a(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),p?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(p)})]}):null]}),(0,r.jsxs)("div",{className:f().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:l}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:f().sourceFileNav,children:(0,r.jsx)("ul",{children:i.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),i.map((e,n)=>(0,r.jsx)(e.Container,{className:f().sourceFileContainer,"data-active":v==e.name},n))]})]})},l=e=>(0,r.jsx)(d,{...e});function c(e,n){if(!e)throw Error(n)}},6888:function(e,n,t){"use strict";t.d(n,{W:function(){return a}});var r=t(6906),i=t(9385);let a={positions:r.m,triangles:r.g,normals:[],uvs:[]};a.normals=(0,i.b)(a.positions,a.triangles),a.uvs=(0,i.q)(a.positions,"xy"),a.triangles.push([a.positions.length,a.positions.length+2,a.positions.length+1],[a.positions.length,a.positions.length+1,a.positions.length+3]),a.positions.push([-100,20,-100],[100,20,100],[-100,20,100],[100,20,-100]),a.normals.push([0,1,0],[0,1,0],[0,1,0],[0,1,0]),a.uvs.push([0,0],[1,1],[0,1],[1,0])},9385:function(e,n,t){"use strict";t.d(n,{b:function(){return i},q:function(){return o}});var r=t(6416);function i(e,n){let t=e.map(()=>[0,0,0]);return n.forEach(n=>{let[i,a,o]=n,s=e[i],u=e[a],f=e[o],d=r.R3.subtract(u,s),l=r.R3.subtract(f,s);r.R3.normalize(d,d),r.R3.normalize(l,l);let c=r.R3.cross(d,l);r.R3.add(t[i],c,t[i]),r.R3.add(t[a],c,t[a]),r.R3.add(t[o],c,t[o])}),t.forEach(e=>{r.R3.normalize(e,e)}),t}let a={xy:[0,1],xz:[0,2],yz:[1,2]};function o(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"xy",t=a[n],r=e.map(()=>[0,0]),i=[1/0,1/0],o=[-1/0,-1/0];return e.forEach((e,n)=>{r[n][0]=e[t[0]],r[n][1]=e[t[1]],i[0]=Math.min(e[t[0]],i[0]),i[1]=Math.min(e[t[1]],i[1]),o[0]=Math.max(e[t[0]],o[0]),o[1]=Math.max(e[t[1]],o[1])}),r.forEach(e=>{e[0]=(e[0]-i[0])/(o[0]-i[0]),e[1]=(e[1]-i[1])/(o[1]-i[1])}),r}},9704:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return v}});var r=t(5671),i=t(6416),a=t(6888),o="struct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(0) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\n@group(0) @binding(1) var config: Config;\n\nstruct LightExtent {\n min : vec4,\n max : vec4,\n}\n@group(0) @binding(2) var lightExtent: LightExtent;\n\n@compute @workgroup_size(64, 1, 1)\nfn main(@builtin(global_invocation_id) GlobalInvocationID : vec3) {\n var index = GlobalInvocationID.x;\n if (index >= config.numLights) {\n return;\n }\n\n lightsBuffer.lights[index].position.y = lightsBuffer.lights[index].position.y - 0.5 - 0.003 * (f32(index) - 64.0 * floor(f32(index) / 64.0));\n\n if (lightsBuffer.lights[index].position.y < lightExtent.min.y) {\n lightsBuffer.lights[index].position.y = lightExtent.max.y;\n }\n}\n",s="struct Uniforms {\n modelMatrix : mat4x4,\n normalModelMatrix : mat4x4,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(0) @binding(0) var uniforms : Uniforms;\n@group(0) @binding(1) var camera : Camera;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragNormal: vec3, // normal in world space\n @location(1) fragUV: vec2,\n}\n\n@vertex\nfn main(\n @location(0) position : vec3,\n @location(1) normal : vec3,\n @location(2) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n let worldPosition = (uniforms.modelMatrix * vec4(position, 1.0)).xyz;\n output.Position = camera.viewProjectionMatrix * vec4(worldPosition, 1.0);\n output.fragNormal = normalize((uniforms.normalModelMatrix * vec4(normal, 1.0)).xyz);\n output.fragUV = uv;\n return output;\n}\n",u="struct GBufferOutput {\n @location(0) normal : vec4,\n\n // Textures: diffuse color, specular color, smoothness, emissive etc. could go here\n @location(1) albedo : vec4,\n}\n\n@fragment\nfn main(\n @location(0) fragNormal: vec3,\n @location(1) fragUV : vec2\n) -> GBufferOutput {\n // faking some kind of checkerboard texture\n let uv = floor(30.0 * fragUV);\n let c = 0.2 + 0.5 * ((uv.x + uv.y) - 2.0 * floor((uv.x + uv.y) / 2.0));\n\n var output : GBufferOutput;\n output.normal = vec4(normalize(fragNormal), 1.0);\n output.albedo = vec4(c, c, c, 1.0);\n\n return output;\n}\n",f="@vertex\nfn main(\n @builtin(vertex_index) VertexIndex : u32\n) -> @builtin(position) vec4 {\n const pos = array(\n vec2(-1.0, -1.0), vec2(1.0, -1.0), vec2(-1.0, 1.0),\n vec2(-1.0, 1.0), vec2(1.0, -1.0), vec2(1.0, 1.0),\n );\n\n return vec4(pos[VertexIndex], 0.0, 1.0);\n}\n",d="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\noverride canvasSizeWidth: f32;\noverride canvasSizeHeight: f32;\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec4;\n let c = coord.xy / vec2(canvasSizeWidth, canvasSizeHeight);\n if (c.x < 0.33333) {\n let rawDepth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n // remap depth into something a bit more visible\n let depth = (1.0 - rawDepth) * 50.0;\n result = vec4(depth);\n } else if (c.x < 0.66667) {\n result = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n );\n result.x = (result.x + 1.0) * 0.5;\n result.y = (result.y + 1.0) * 0.5;\n result.z = (result.z + 1.0) * 0.5;\n } else {\n result = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n );\n }\n return result;\n}\n",l="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\nstruct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(1) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(1) @binding(1) var config: Config;\n@group(1) @binding(2) var camera: Camera;\n\nfn world_from_screen_coord(coord : vec2, depth_sample: f32) -> vec3 {\n // reconstruct world-space position from the screen coordinate.\n let posClip = vec4(coord.x * 2.0 - 1.0, (1.0 - coord.y) * 2.0 - 1.0, depth_sample, 1.0);\n let posWorldW = camera.invViewProjectionMatrix * posClip;\n let posWorld = posWorldW.xyz / posWorldW.www;\n return posWorld;\n}\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec3;\n\n let depth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n\n // Don't light the sky.\n if (depth >= 1.0) {\n discard;\n }\n\n let bufferSize = textureDimensions(gBufferDepth);\n let coordUV = coord.xy / vec2(bufferSize);\n let position = world_from_screen_coord(coordUV, depth);\n\n let normal = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n ).xyz;\n\n let albedo = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n ).rgb;\n\n for (var i = 0u; i < config.numLights; i++) {\n let L = lightsBuffer.lights[i].position.xyz - position;\n let distance = length(L);\n if (distance > lightsBuffer.lights[i].radius) {\n continue;\n }\n let lambert = max(dot(normal, normalize(L)), 0.0);\n result += vec3(\n lambert * pow(1.0 - distance / lightsBuffer.lights[i].radius, 2.0) * lightsBuffer.lights[i].color * albedo\n );\n }\n\n // some manual ambient\n result += vec3(0.2);\n\n return vec4(result, 1.0);\n}\n",c="src/sample/deferredRendering/main.ts";let g=i.R3.fromValues(-50,-30,-50),m=i.R3.fromValues(50,50,50),p=async e=>{let{canvas:n,pageState:t,gui:r}=e,c=await navigator.gpu.requestAdapter(),p=await c.requestDevice();if(!t.active)return;let h=n.getContext("webgpu"),v=window.devicePixelRatio;n.width=n.clientWidth*v,n.height=n.clientHeight*v;let x=n.width/n.height,b=navigator.gpu.getPreferredCanvasFormat();h.configure({device:p,format:b,alphaMode:"premultiplied"});let B=p.createBuffer({size:8*a.W.positions.length*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});{let P=new Float32Array(B.getMappedRange());for(let y=0;y{let e=p.createBuffer({size:Uint32Array.BYTES_PER_ELEMENT,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});return new Uint32Array(e.getMappedRange())[0]=z.numLights,e.unmap(),e})();r.add(z,"mode",["rendering","gBuffers view"]),r.add(z,"numLights",1,1024).step(1).onChange(()=>{p.queue.writeBuffer(I,0,new Uint32Array([z.numLights]))});let W=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),j=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),k=p.createBindGroup({layout:V.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:W}},{binding:1,resource:{buffer:j}}]}),q=p.createBindGroup({layout:D,entries:[{binding:0,resource:S[0]},{binding:1,resource:S[1]},{binding:2,resource:S[2]}]}),Y=i.R3.sub(m,g),H=8192*Float32Array.BYTES_PER_ELEMENT,Q=p.createBuffer({size:H,usage:GPUBufferUsage.STORAGE,mappedAtCreation:!0}),X=new Float32Array(Q.getMappedRange()),J=i.vh.create(),Z=0;for(let K=0;K<1024;K++){Z=8*K;for(let $=0;$<3;$++)J[$]=Math.random()*Y[$]+g[$];J[3]=1,X.set(J,Z),J[0]=2*Math.random(),J[1]=2*Math.random(),J[2]=2*Math.random(),J[3]=20,X.set(J,Z+4)}Q.unmap();let ee=p.createBuffer({size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),en=new Float32Array(8);en.set(g,0),en.set(m,4),p.queue.writeBuffer(ee,0,en.buffer,en.byteOffset,en.byteLength);let et=p.createComputePipeline({layout:"auto",compute:{module:p.createShaderModule({code:o}),entryPoint:"main"}}),er=p.createBindGroup({layout:A,entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:j}}]}),ei=p.createBindGroup({layout:et.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:ee}}]}),ea=i.R3.fromValues(0,50,-100),eo=i.R3.fromValues(0,1,0),es=i.R3.fromValues(0,0,0),eu=i._E.perspective(2*Math.PI/5,x,1,2e3),ef=i._E.translation([0,-45,0]);p.queue.writeBuffer(W,0,ef.buffer,ef.byteOffset,ef.byteLength);let ed=i._E.invert(ef);i._E.transpose(ed,ed),p.queue.writeBuffer(W,64,ed.buffer,ed.byteOffset,ed.byteLength),requestAnimationFrame(function e(){if(!t.active)return;let n=function(){let e=Math.PI*(Date.now()/5e3),n=i._E.rotateY(i._E.translation(es),e),t=i.R3.transformMat4(ea,n),r=i._E.lookAt(t,es,eo);return i._E.multiply(eu,r)}();p.queue.writeBuffer(j,0,n.buffer,n.byteOffset,n.byteLength);let r=i._E.invert(n);p.queue.writeBuffer(j,64,r.buffer,r.byteOffset,r.byteLength);let a=p.createCommandEncoder();{let o=a.beginRenderPass(F);o.setPipeline(V),o.setBindGroup(0,k),o.setVertexBuffer(0,B),o.setIndexBuffer(E,"uint16"),o.drawIndexed(w),o.end()}{let s=a.beginComputePass();s.setPipeline(et),s.setBindGroup(0,ei),s.dispatchWorkgroups(Math.ceil(16)),s.end()}if("gBuffers view"===z.mode){O.colorAttachments[0].view=h.getCurrentTexture().createView();let u=a.beginRenderPass(O);u.setPipeline(C),u.setBindGroup(0,q),u.draw(6),u.end()}else{O.colorAttachments[0].view=h.getCurrentTexture().createView();let f=a.beginRenderPass(O);f.setPipeline(N),f.setBindGroup(0,q),f.setBindGroup(1,er),f.draw(6),f.end()}p.queue.submit([a.finish()]),requestAnimationFrame(e)})},h=()=>(0,r.Tl)({name:"Deferred Rendering",description:"This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n ",gui:!0,init:p,sources:[{name:c.substring(29),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\nimport { mat4, vec3, vec4 } from 'wgpu-matrix';\nimport { mesh } from '../../meshes/stanfordDragon';\n\nimport lightUpdate from './lightUpdate.wgsl';\nimport vertexWriteGBuffers from './vertexWriteGBuffers.wgsl';\nimport fragmentWriteGBuffers from './fragmentWriteGBuffers.wgsl';\nimport vertexTextureQuad from './vertexTextureQuad.wgsl';\nimport fragmentGBuffersDebugView from './fragmentGBuffersDebugView.wgsl';\nimport fragmentDeferredRendering from './fragmentDeferredRendering.wgsl';\n\nconst kMaxNumLights = 1024;\nconst lightExtentMin = vec3.fromValues(-50, -30, -50);\nconst lightExtentMax = vec3.fromValues(50, 50, 50);\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const aspect = canvas.width / canvas.height;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create the model vertex buffer.\n const kVertexStride = 8;\n const vertexBuffer = device.createBuffer({\n // position: vec3, normal: vec3, uv: vec2\n size:\n mesh.positions.length * kVertexStride * Float32Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Float32Array(vertexBuffer.getMappedRange());\n for (let i = 0; i < mesh.positions.length; ++i) {\n mapping.set(mesh.positions[i], kVertexStride * i);\n mapping.set(mesh.normals[i], kVertexStride * i + 3);\n mapping.set(mesh.uvs[i], kVertexStride * i + 6);\n }\n vertexBuffer.unmap();\n }\n\n // Create the model index buffer.\n const indexCount = mesh.triangles.length * 3;\n const indexBuffer = device.createBuffer({\n size: indexCount * Uint16Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Uint16Array(indexBuffer.getMappedRange());\n for (let i = 0; i < mesh.triangles.length; ++i) {\n mapping.set(mesh.triangles[i], 3 * i);\n }\n indexBuffer.unmap();\n }\n\n // GBuffer texture render targets\n const gBufferTexture2DFloat16 = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'rgba16float',\n });\n const gBufferTextureAlbedo = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'bgra8unorm',\n });\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n });\n\n const gBufferTextureViews = [\n gBufferTexture2DFloat16.createView(),\n gBufferTextureAlbedo.createView(),\n depthTexture.createView(),\n ];\n\n const vertexBuffers: Iterable = [\n {\n arrayStride: Float32Array.BYTES_PER_ELEMENT * 8,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: Float32Array.BYTES_PER_ELEMENT * 3,\n format: 'float32x3',\n },\n {\n // uv\n shaderLocation: 2,\n offset: Float32Array.BYTES_PER_ELEMENT * 6,\n format: 'float32x2',\n },\n ],\n },\n ];\n\n const primitive: GPUPrimitiveState = {\n topology: 'triangle-list',\n cullMode: 'back',\n };\n\n const writeGBuffersPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: vertexWriteGBuffers,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentWriteGBuffers,\n }),\n entryPoint: 'main',\n targets: [\n // normal\n { format: 'rgba16float' },\n // albedo\n { format: 'bgra8unorm' },\n ],\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n primitive,\n });\n\n const gBufferTexturesBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'depth',\n },\n },\n ],\n });\n\n const lightsBufferBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'read-only-storage',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'uniform',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n buffer: {\n type: 'uniform',\n },\n },\n ],\n });\n\n const gBuffersDebugViewPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [gBufferTexturesBindGroupLayout],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentGBuffersDebugView,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n constants: {\n canvasSizeWidth: canvas.width,\n canvasSizeHeight: canvas.height,\n },\n },\n primitive,\n });\n\n const deferredRenderPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [\n gBufferTexturesBindGroupLayout,\n lightsBufferBindGroupLayout,\n ],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentDeferredRendering,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive,\n });\n\n const writeGBufferPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: gBufferTextureViews[0],\n\n clearValue: { r: 0.0, g: 0.0, b: 1.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n {\n view: gBufferTextureViews[1],\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const textureQuadPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n // view is acquired and set in render loop.\n view: undefined,\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const settings = {\n mode: 'rendering',\n numLights: 128,\n };\n const configUniformBuffer = (() => {\n const buffer = device.createBuffer({\n size: Uint32Array.BYTES_PER_ELEMENT,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n new Uint32Array(buffer.getMappedRange())[0] = settings.numLights;\n buffer.unmap();\n return buffer;\n })();\n\n gui.add(settings, 'mode', ['rendering', 'gBuffers view']);\n gui\n .add(settings, 'numLights', 1, kMaxNumLights)\n .step(1)\n .onChange(() => {\n device.queue.writeBuffer(\n configUniformBuffer,\n 0,\n new Uint32Array([settings.numLights])\n );\n });\n\n const modelUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const cameraUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneUniformBindGroup = device.createBindGroup({\n layout: writeGBuffersPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: modelUniformBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n\n const gBufferTexturesBindGroup = device.createBindGroup({\n layout: gBufferTexturesBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: gBufferTextureViews[0],\n },\n {\n binding: 1,\n resource: gBufferTextureViews[1],\n },\n {\n binding: 2,\n resource: gBufferTextureViews[2],\n },\n ],\n });\n\n // Lights data are uploaded in a storage buffer\n // which could be updated/culled/etc. with a compute shader\n const extent = vec3.sub(lightExtentMax, lightExtentMin);\n const lightDataStride = 8;\n const bufferSizeInByte =\n Float32Array.BYTES_PER_ELEMENT * lightDataStride * kMaxNumLights;\n const lightsBuffer = device.createBuffer({\n size: bufferSizeInByte,\n usage: GPUBufferUsage.STORAGE,\n mappedAtCreation: true,\n });\n\n // We randomaly populate lights randomly in a box range\n // And simply move them along y-axis per frame to show they are\n // dynamic lightings\n const lightData = new Float32Array(lightsBuffer.getMappedRange());\n const tmpVec4 = vec4.create();\n let offset = 0;\n for (let i = 0; i < kMaxNumLights; i++) {\n offset = lightDataStride * i;\n // position\n for (let i = 0; i < 3; i++) {\n tmpVec4[i] = Math.random() * extent[i] + lightExtentMin[i];\n }\n tmpVec4[3] = 1;\n lightData.set(tmpVec4, offset);\n // color\n tmpVec4[0] = Math.random() * 2;\n tmpVec4[1] = Math.random() * 2;\n tmpVec4[2] = Math.random() * 2;\n // radius\n tmpVec4[3] = 20.0;\n lightData.set(tmpVec4, offset + 4);\n }\n lightsBuffer.unmap();\n\n const lightExtentBuffer = device.createBuffer({\n size: 4 * 8,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const lightExtentData = new Float32Array(8);\n lightExtentData.set(lightExtentMin, 0);\n lightExtentData.set(lightExtentMax, 4);\n device.queue.writeBuffer(\n lightExtentBuffer,\n 0,\n lightExtentData.buffer,\n lightExtentData.byteOffset,\n lightExtentData.byteLength\n );\n\n const lightUpdateComputePipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: lightUpdate,\n }),\n entryPoint: 'main',\n },\n });\n const lightsBufferBindGroup = device.createBindGroup({\n layout: lightsBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n const lightsBufferComputeBindGroup = device.createBindGroup({\n layout: lightUpdateComputePipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: lightExtentBuffer,\n },\n },\n ],\n });\n //--------------------\n\n // Scene matrices\n const eyePosition = vec3.fromValues(0, 50, -100);\n const upVector = vec3.fromValues(0, 1, 0);\n const origin = vec3.fromValues(0, 0, 0);\n\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 2000.0\n );\n\n // Move the model so it's centered.\n const modelMatrix = mat4.translation([0, -45, 0]);\n\n const modelData = modelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 0,\n modelData.buffer,\n modelData.byteOffset,\n modelData.byteLength\n );\n const invertTransposeModelMatrix = mat4.invert(modelMatrix);\n mat4.transpose(invertTransposeModelMatrix, invertTransposeModelMatrix);\n const normalModelData = invertTransposeModelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 64,\n normalModelData.buffer,\n normalModelData.byteOffset,\n normalModelData.byteLength\n );\n\n // Rotates the camera around the origin based on time.\n function getCameraViewProjMatrix() {\n const rad = Math.PI * (Date.now() / 5000);\n const rotation = mat4.rotateY(mat4.translation(origin), rad);\n const rotatedEyePosition = vec3.transformMat4(eyePosition, rotation);\n\n const viewMatrix = mat4.lookAt(rotatedEyePosition, origin, upVector);\n\n return mat4.multiply(projectionMatrix, viewMatrix) as Float32Array;\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const cameraViewProj = getCameraViewProjMatrix();\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 0,\n cameraViewProj.buffer,\n cameraViewProj.byteOffset,\n cameraViewProj.byteLength\n );\n const cameraInvViewProj = mat4.invert(cameraViewProj) as Float32Array;\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 64,\n cameraInvViewProj.buffer,\n cameraInvViewProj.byteOffset,\n cameraInvViewProj.byteLength\n );\n\n const commandEncoder = device.createCommandEncoder();\n {\n // Write position, normal, albedo etc. data to gBuffers\n const gBufferPass = commandEncoder.beginRenderPass(\n writeGBufferPassDescriptor\n );\n gBufferPass.setPipeline(writeGBuffersPipeline);\n gBufferPass.setBindGroup(0, sceneUniformBindGroup);\n gBufferPass.setVertexBuffer(0, vertexBuffer);\n gBufferPass.setIndexBuffer(indexBuffer, 'uint16');\n gBufferPass.drawIndexed(indexCount);\n gBufferPass.end();\n }\n {\n // Update lights position\n const lightPass = commandEncoder.beginComputePass();\n lightPass.setPipeline(lightUpdateComputePipeline);\n lightPass.setBindGroup(0, lightsBufferComputeBindGroup);\n lightPass.dispatchWorkgroups(Math.ceil(kMaxNumLights / 64));\n lightPass.end();\n }\n {\n if (settings.mode === 'gBuffers view') {\n // GBuffers debug view\n // Left: depth\n // Middle: normal\n // Right: albedo (use uv to mimic a checkerboard texture)\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const debugViewPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n debugViewPass.setPipeline(gBuffersDebugViewPipeline);\n debugViewPass.setBindGroup(0, gBufferTexturesBindGroup);\n debugViewPass.draw(6);\n debugViewPass.end();\n } else {\n // Deferred rendering\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const deferredRenderingPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n deferredRenderingPass.setPipeline(deferredRenderPipeline);\n deferredRenderingPass.setBindGroup(0, gBufferTexturesBindGroup);\n deferredRenderingPass.setBindGroup(1, lightsBufferBindGroup);\n deferredRenderingPass.draw(6);\n deferredRenderingPass.end();\n }\n }\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst DeferredRendering: () => JSX.Element = () =>\n makeSample({\n name: 'Deferred Rendering',\n description: `This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n `,\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: 'vertexWriteGBuffers.wgsl',\n contents: vertexWriteGBuffers,\n editable: true,\n },\n {\n name: 'fragmentWriteGBuffers.wgsl',\n contents: fragmentWriteGBuffers,\n editable: true,\n },\n {\n name: 'vertexTextureQuad.wgsl',\n contents: vertexTextureQuad,\n editable: true,\n },\n {\n name: 'fragmentGBuffersDebugView.wgsl',\n contents: fragmentGBuffersDebugView,\n editable: true,\n },\n {\n name: 'fragmentDeferredRendering.wgsl',\n contents: fragmentDeferredRendering,\n editable: true,\n },\n {\n name: 'lightUpdate.wgsl',\n contents: lightUpdate,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default DeferredRendering;\n"},{name:"vertexWriteGBuffers.wgsl",contents:s,editable:!0},{name:"fragmentWriteGBuffers.wgsl",contents:u,editable:!0},{name:"vertexTextureQuad.wgsl",contents:f,editable:!0},{name:"fragmentGBuffersDebugView.wgsl",contents:d,editable:!0},{name:"fragmentDeferredRendering.wgsl",contents:l,editable:!0},{name:"lightUpdate.wgsl",contents:o,editable:!0}],filename:c});var v=h},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/webpack-af8ade16c1360358.js b/_next/static/chunks/webpack-21fda3d42cf1e62d.js similarity index 97% rename from _next/static/chunks/webpack-af8ade16c1360358.js rename to _next/static/chunks/webpack-21fda3d42cf1e62d.js index 5c770a8d..80207779 100644 --- a/_next/static/chunks/webpack-af8ade16c1360358.js +++ b/_next/static/chunks/webpack-21fda3d42cf1e62d.js @@ -1 +1 @@ -!function(){"use strict";var e,t,r,n,f,a,c,o,i,u,b={},d={};function l(e){var t=d[e];if(void 0!==t)return t.exports;var r=d[e]={exports:{}},n=!0;try{b[e].call(r.exports,r,r.exports,l),n=!1}finally{n&&delete d[e]}return r.exports}l.m=b,e=[],l.O=function(t,r,n,f){if(r){f=f||0;for(var a=e.length;a>0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var c=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(o=!1,f0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var c=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(o=!1,fWebGPU Samples \ No newline at end of file +WebGPU Samples \ No newline at end of file diff --git a/samples/A-buffer.html b/samples/A-buffer.html index ceb5ef26..7b927121 100644 --- a/samples/A-buffer.html +++ b/samples/A-buffer.html @@ -10,6 +10,6 @@ } A-Buffer - WebGPU Samples

A-Buffer

See it on Github!

Demonstrates order independent transparency using a per-pixel + limiting memory usage (when required)."/>

\ No newline at end of file + limiting memory usage (when required).

\ No newline at end of file diff --git a/samples/animometer.html b/samples/animometer.html index 7d2b399b..5424e2ee 100644 --- a/samples/animometer.html +++ b/samples/animometer.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Animometer - WebGPU Samples \ No newline at end of file + Animometer - WebGPU Samples \ No newline at end of file diff --git a/samples/bitonicSort.html b/samples/bitonicSort.html index 11d66f48..ef6d2cdf 100644 --- a/samples/bitonicSort.html +++ b/samples/bitonicSort.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file + Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file diff --git a/samples/cameras.html b/samples/cameras.html index 14155f07..3dccf0c5 100644 --- a/samples/cameras.html +++ b/samples/cameras.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cameras - WebGPU Samples \ No newline at end of file + Cameras - WebGPU Samples \ No newline at end of file diff --git a/samples/computeBoids.html b/samples/computeBoids.html index dcd63d62..e7b16a64 100644 --- a/samples/computeBoids.html +++ b/samples/computeBoids.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Compute Boids - WebGPU Samples \ No newline at end of file + Compute Boids - WebGPU Samples \ No newline at end of file diff --git a/samples/cornell.html b/samples/cornell.html index 2e0ae2e7..a7b578ea 100644 --- a/samples/cornell.html +++ b/samples/cornell.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cornell box - WebGPU Samples \ No newline at end of file + Cornell box - WebGPU Samples \ No newline at end of file diff --git a/samples/cubemap.html b/samples/cubemap.html index d638efde..73c08691 100644 --- a/samples/cubemap.html +++ b/samples/cubemap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cubemap - WebGPU Samples \ No newline at end of file + Cubemap - WebGPU Samples \ No newline at end of file diff --git a/samples/deferredRendering.html b/samples/deferredRendering.html index 4c55f329..9d11d406 100644 --- a/samples/deferredRendering.html +++ b/samples/deferredRendering.html @@ -16,7 +16,7 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. - "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. + "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. Render geometry info to multiple targets in the gBuffers in the first pass. In this sample we have 2 gBuffers for normals and albedo, along with a depth texture. And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity. @@ -24,4 +24,4 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/fractalCube.html b/samples/fractalCube.html index b1dbb172..132ada2d 100644 --- a/samples/fractalCube.html +++ b/samples/fractalCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Fractal Cube - WebGPU Samples \ No newline at end of file + Fractal Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/gameOfLife.html b/samples/gameOfLife.html index d92c276e..53561d97 100644 --- a/samples/gameOfLife.html +++ b/samples/gameOfLife.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Conway's Game of Life - WebGPU Samples \ No newline at end of file + Conway's Game of Life - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangle.html b/samples/helloTriangle.html index 22fd3113..26f46269 100644 --- a/samples/helloTriangle.html +++ b/samples/helloTriangle.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle - WebGPU Samples \ No newline at end of file + Hello Triangle - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangleMSAA.html b/samples/helloTriangleMSAA.html index 6e0e3c7d..12a84b70 100644 --- a/samples/helloTriangleMSAA.html +++ b/samples/helloTriangleMSAA.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle MSAA - WebGPU Samples \ No newline at end of file + Hello Triangle MSAA - WebGPU Samples \ No newline at end of file diff --git a/samples/imageBlur.html b/samples/imageBlur.html index 61306d7c..8aec1e08 100644 --- a/samples/imageBlur.html +++ b/samples/imageBlur.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Image Blur - WebGPU Samples \ No newline at end of file + Image Blur - WebGPU Samples \ No newline at end of file diff --git a/samples/instancedCube.html b/samples/instancedCube.html index 704990cf..5ccb034e 100644 --- a/samples/instancedCube.html +++ b/samples/instancedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Instanced Cube - WebGPU Samples \ No newline at end of file + Instanced Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/normalMap.html b/samples/normalMap.html index 803975dc..9fe57833 100644 --- a/samples/normalMap.html +++ b/samples/normalMap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Normal Mapping - WebGPU Samples \ No newline at end of file + Normal Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/particles.html b/samples/particles.html index 7374ffa4..7ad305c3 100644 --- a/samples/particles.html +++ b/samples/particles.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Particles - WebGPU Samples \ No newline at end of file + Particles - WebGPU Samples \ No newline at end of file diff --git a/samples/renderBundles.html b/samples/renderBundles.html index 71ee057a..ac94eed7 100644 --- a/samples/renderBundles.html +++ b/samples/renderBundles.html @@ -11,7 +11,7 @@ Render Bundles - WebGPU Samples

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of + of instancing to reduce draw overhead.)"/>

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of meshes individually as a proxy for a more complex scene in order to demonstrate the reduction in JavaScript time spent to issue render commands. (Typically a scene like this would make use - of instancing to reduce draw overhead.)

\ No newline at end of file + of instancing to reduce draw overhead.)

\ No newline at end of file diff --git a/samples/resizeCanvas.html b/samples/resizeCanvas.html index 532477fa..f979428c 100644 --- a/samples/resizeCanvas.html +++ b/samples/resizeCanvas.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Resize Canvas - WebGPU Samples \ No newline at end of file + Resize Canvas - WebGPU Samples \ No newline at end of file diff --git a/samples/reversedZ.html b/samples/reversedZ.html index 40dae9ac..e2b2cbe9 100644 --- a/samples/reversedZ.html +++ b/samples/reversedZ.html @@ -17,7 +17,7 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ - "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. + "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. The left column uses regular method, while the right one uses reversed z technique. Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other. Higher sets are placed further from camera (and are scaled for better visual purpose). @@ -26,4 +26,4 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/rotatingCube.html b/samples/rotatingCube.html index 2069881b..d485765e 100644 --- a/samples/rotatingCube.html +++ b/samples/rotatingCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Rotating Cube - WebGPU Samples \ No newline at end of file + Rotating Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/samplerParameters.html b/samples/samplerParameters.html index 0c10973e..6c1cb878 100644 --- a/samples/samplerParameters.html +++ b/samples/samplerParameters.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file + Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file diff --git a/samples/shadowMapping.html b/samples/shadowMapping.html index f814bbda..bcf7de57 100644 --- a/samples/shadowMapping.html +++ b/samples/shadowMapping.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Shadow Mapping - WebGPU Samples \ No newline at end of file + Shadow Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/texturedCube.html b/samples/texturedCube.html index 7eadffa5..b7cf7280 100644 --- a/samples/texturedCube.html +++ b/samples/texturedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Textured Cube - WebGPU Samples \ No newline at end of file + Textured Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/twoCubes.html b/samples/twoCubes.html index e5b2a487..5ca19943 100644 --- a/samples/twoCubes.html +++ b/samples/twoCubes.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Two Cubes - WebGPU Samples \ No newline at end of file + Two Cubes - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploading.html b/samples/videoUploading.html index 40dff0f7..66d8c8b0 100644 --- a/samples/videoUploading.html +++ b/samples/videoUploading.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading - WebGPU Samples \ No newline at end of file + Video Uploading - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploadingWebCodecs.html b/samples/videoUploadingWebCodecs.html index 8665f1f8..0a57d99f 100644 --- a/samples/videoUploadingWebCodecs.html +++ b/samples/videoUploadingWebCodecs.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file + Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file diff --git a/samples/worker.html b/samples/worker.html index cd518ab6..65f6edf9 100644 --- a/samples/worker.html +++ b/samples/worker.html @@ -10,6 +10,6 @@ } WebGPU in a Worker - WebGPU Samples

WebGPU in a Worker

See it on Github!

This example shows one method of using WebGPU in a web worker and presenting to + which is then transferred to the worker where all the WebGPU calls are made."/>

\ No newline at end of file + which is then transferred to the worker where all the WebGPU calls are made.

\ No newline at end of file