mirror of
https://github.com/webgpu/webgpufundamentals.git
synced 2026-05-16 08:00:37 -04:00
407 lines
12 KiB
HTML
407 lines
12 KiB
HTML
<!DOCTYPE html>
|
|
<html>
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
|
|
<title>WebGPU SkyBox</title>
|
|
<style>
|
|
@import url(resources/webgpu-lesson.css);
|
|
html, body {
|
|
margin: 0; /* remove the default margin */
|
|
height: 100%; /* make the html,body fill the page */
|
|
}
|
|
canvas {
|
|
display: block; /* make the canvas act like a block */
|
|
width: 100%; /* make the canvas fill its container */
|
|
height: 100%;
|
|
}
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<canvas></canvas>
|
|
</body>
|
|
<script type="module">
|
|
// see https://webgpufundamentals.org/webgpu/lessons/webgpu-utils.html#wgpu-matrix
|
|
import {mat4} from '../3rdparty/wgpu-matrix.module.js';
|
|
|
|
async function main() {
|
|
const adapter = await navigator.gpu?.requestAdapter();
|
|
const device = await adapter?.requestDevice();
|
|
if (!device) {
|
|
fail('need a browser that supports WebGPU');
|
|
return;
|
|
}
|
|
|
|
// Get a WebGPU context from the canvas and configure it
|
|
const canvas = document.querySelector('canvas');
|
|
const context = canvas.getContext('webgpu');
|
|
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
|
|
context.configure({
|
|
device,
|
|
format: presentationFormat,
|
|
alphaMode: 'premultiplied',
|
|
});
|
|
|
|
const module = device.createShaderModule({
|
|
code: /* wgsl */ `
|
|
struct Uniforms {
|
|
viewDirectionProjectionInverse: mat4x4f,
|
|
};
|
|
|
|
struct VSOutput {
|
|
@builtin(position) position: vec4f,
|
|
@location(0) pos: vec4f,
|
|
};
|
|
|
|
@group(0) @binding(0) var<uniform> uni: Uniforms;
|
|
@group(0) @binding(1) var ourSampler: sampler;
|
|
@group(0) @binding(2) var ourTexture: texture_cube<f32>;
|
|
|
|
@vertex fn vs(@builtin(vertex_index) vNdx: u32) -> VSOutput {
|
|
let pos = array(
|
|
vec2f(-1, 3),
|
|
vec2f(-1,-1),
|
|
vec2f( 3,-1),
|
|
);
|
|
var vsOut: VSOutput;
|
|
vsOut.position = vec4f(pos[vNdx], 1, 1);
|
|
vsOut.pos = vsOut.position;
|
|
return vsOut;
|
|
}
|
|
|
|
@fragment fn fs(vsOut: VSOutput) -> @location(0) vec4f {
|
|
let t = uni.viewDirectionProjectionInverse * vsOut.pos;
|
|
return textureSample(ourTexture, ourSampler, normalize(t.xyz / t.w) * vec3f(1, 1, -1));
|
|
}
|
|
`,
|
|
});
|
|
|
|
const pipeline = device.createRenderPipeline({
|
|
label: 'no attributes',
|
|
layout: 'auto',
|
|
vertex: {
|
|
module,
|
|
},
|
|
fragment: {
|
|
module,
|
|
targets: [{ format: presentationFormat }],
|
|
},
|
|
depthStencil: {
|
|
depthWriteEnabled: true,
|
|
depthCompare: 'less-equal',
|
|
format: 'depth24plus',
|
|
},
|
|
});
|
|
|
|
const numMipLevels = (...sizes) => {
|
|
const maxSize = Math.max(...sizes);
|
|
return 1 + Math.log2(maxSize) | 0;
|
|
};
|
|
|
|
function copySourcesToTexture(device, texture, sources, {flipY} = {}) {
|
|
sources.forEach((source, layer) => {
|
|
device.queue.copyExternalImageToTexture(
|
|
{ source, flipY, },
|
|
{ texture, origin: [0, 0, layer] },
|
|
{ width: source.width, height: source.height },
|
|
);
|
|
});
|
|
if (texture.mipLevelCount > 1) {
|
|
generateMips(device, texture);
|
|
}
|
|
}
|
|
|
|
function createTextureFromSources(device, sources, options = {}) {
|
|
// Assume are sources all the same size so just use the first one for width and height
|
|
const source = sources[0];
|
|
const texture = device.createTexture({
|
|
format: 'rgba8unorm',
|
|
mipLevelCount: options.mips ? numMipLevels(source.width, source.height) : 1,
|
|
size: [source.width, source.height, sources.length],
|
|
usage: GPUTextureUsage.TEXTURE_BINDING |
|
|
GPUTextureUsage.COPY_DST |
|
|
GPUTextureUsage.RENDER_ATTACHMENT,
|
|
});
|
|
copySourcesToTexture(device, texture, sources, options);
|
|
return texture;
|
|
}
|
|
|
|
const generateMips = (() => {
|
|
let sampler;
|
|
let module;
|
|
const pipelineByFormat = {};
|
|
|
|
return function generateMips(device, texture) {
|
|
if (!module) {
|
|
module = device.createShaderModule({
|
|
label: 'textured quad shaders for mip level generation',
|
|
code: /* wgsl */ `
|
|
struct VSOutput {
|
|
@builtin(position) position: vec4f,
|
|
@location(0) texcoord: vec2f,
|
|
};
|
|
|
|
@vertex fn vs(
|
|
@builtin(vertex_index) vertexIndex : u32
|
|
) -> VSOutput {
|
|
let pos = array(
|
|
|
|
vec2f( 0.0, 0.0), // center
|
|
vec2f( 1.0, 0.0), // right, center
|
|
vec2f( 0.0, 1.0), // center, top
|
|
|
|
// 2st triangle
|
|
vec2f( 0.0, 1.0), // center, top
|
|
vec2f( 1.0, 0.0), // right, center
|
|
vec2f( 1.0, 1.0), // right, top
|
|
);
|
|
|
|
var vsOutput: VSOutput;
|
|
let xy = pos[vertexIndex];
|
|
vsOutput.position = vec4f(xy * 2.0 - 1.0, 0.0, 1.0);
|
|
vsOutput.texcoord = vec2f(xy.x, 1.0 - xy.y);
|
|
return vsOutput;
|
|
}
|
|
|
|
@group(0) @binding(0) var ourSampler: sampler;
|
|
@group(0) @binding(1) var ourTexture: texture_2d<f32>;
|
|
|
|
@fragment fn fs(fsInput: VSOutput) -> @location(0) vec4f {
|
|
return textureSample(ourTexture, ourSampler, fsInput.texcoord);
|
|
}
|
|
`,
|
|
});
|
|
|
|
sampler = device.createSampler({
|
|
minFilter: 'linear',
|
|
magFilter: 'linear',
|
|
});
|
|
}
|
|
|
|
if (!pipelineByFormat[texture.format]) {
|
|
pipelineByFormat[texture.format] = device.createRenderPipeline({
|
|
label: 'mip level generator pipeline',
|
|
layout: 'auto',
|
|
vertex: {
|
|
module,
|
|
},
|
|
fragment: {
|
|
module,
|
|
targets: [{ format: texture.format }],
|
|
},
|
|
});
|
|
}
|
|
const pipeline = pipelineByFormat[texture.format];
|
|
|
|
const encoder = device.createCommandEncoder({
|
|
label: 'mip gen encoder',
|
|
});
|
|
|
|
for (let baseMipLevel = 1; baseMipLevel < texture.mipLevelCount; ++baseMipLevel) {
|
|
for (let layer = 0; layer < texture.depthOrArrayLayers; ++layer) {
|
|
const bindGroup = device.createBindGroup({
|
|
layout: pipeline.getBindGroupLayout(0),
|
|
entries: [
|
|
{ binding: 0, resource: sampler },
|
|
{
|
|
binding: 1,
|
|
resource: texture.createView({
|
|
dimension: '2d',
|
|
baseMipLevel: baseMipLevel - 1,
|
|
mipLevelCount: 1,
|
|
baseArrayLayer: layer,
|
|
arrayLayerCount: 1,
|
|
}),
|
|
},
|
|
],
|
|
});
|
|
|
|
const renderPassDescriptor = {
|
|
label: 'our basic canvas renderPass',
|
|
colorAttachments: [
|
|
{
|
|
view: texture.createView({
|
|
dimension: '2d',
|
|
baseMipLevel: baseMipLevel,
|
|
mipLevelCount: 1,
|
|
baseArrayLayer: layer,
|
|
arrayLayerCount: 1,
|
|
}),
|
|
loadOp: 'clear',
|
|
storeOp: 'store',
|
|
},
|
|
],
|
|
};
|
|
|
|
const pass = encoder.beginRenderPass(renderPassDescriptor);
|
|
pass.setPipeline(pipeline);
|
|
pass.setBindGroup(0, bindGroup);
|
|
pass.draw(6); // call our vertex shader 6 times
|
|
pass.end();
|
|
}
|
|
}
|
|
const commandBuffer = encoder.finish();
|
|
device.queue.submit([commandBuffer]);
|
|
};
|
|
})();
|
|
|
|
async function loadImageBitmap(url) {
|
|
const res = await fetch(url);
|
|
const blob = await res.blob();
|
|
return await createImageBitmap(blob, { colorSpaceConversion: 'none' });
|
|
}
|
|
|
|
async function createTextureFromImages(device, urls, options) {
|
|
const images = await Promise.all(urls.map(loadImageBitmap));
|
|
return createTextureFromSources(device, images, options);
|
|
}
|
|
|
|
const texture = await createTextureFromImages(
|
|
device,
|
|
[
|
|
'resources/images/leadenhall_market/pos-x.jpg', /* webgpufundamentals: url */
|
|
'resources/images/leadenhall_market/neg-x.jpg', /* webgpufundamentals: url */
|
|
'resources/images/leadenhall_market/pos-y.jpg', /* webgpufundamentals: url */
|
|
'resources/images/leadenhall_market/neg-y.jpg', /* webgpufundamentals: url */
|
|
'resources/images/leadenhall_market/pos-z.jpg', /* webgpufundamentals: url */
|
|
'resources/images/leadenhall_market/neg-z.jpg', /* webgpufundamentals: url */
|
|
],
|
|
{mips: true, flipY: false},
|
|
);
|
|
|
|
const sampler = device.createSampler({
|
|
magFilter: 'linear',
|
|
minFilter: 'linear',
|
|
mipmapFilter: 'linear',
|
|
});
|
|
|
|
// viewDirectionProjectionInverse
|
|
const uniformBufferSize = (16) * 4;
|
|
const uniformBuffer = device.createBuffer({
|
|
label: 'uniforms',
|
|
size: uniformBufferSize,
|
|
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
|
|
});
|
|
|
|
const uniformValues = new Float32Array(uniformBufferSize / 4);
|
|
|
|
// offsets to the various uniform values in float32 indices
|
|
const kViewDirectionProjectionInverseOffset = 0;
|
|
|
|
const viewDirectionProjectionInverseValue = uniformValues.subarray(
|
|
kViewDirectionProjectionInverseOffset,
|
|
kViewDirectionProjectionInverseOffset + 16);
|
|
|
|
const bindGroup = device.createBindGroup({
|
|
label: 'bind group for object',
|
|
layout: pipeline.getBindGroupLayout(0),
|
|
entries: [
|
|
{ binding: 0, resource: uniformBuffer },
|
|
{ binding: 1, resource: sampler },
|
|
{ binding: 2, resource: texture.createView({dimension: 'cube'}) },
|
|
],
|
|
});
|
|
|
|
const renderPassDescriptor = {
|
|
label: 'our basic canvas renderPass',
|
|
colorAttachments: [
|
|
{
|
|
// view: <- to be filled out when we render
|
|
loadOp: 'clear',
|
|
storeOp: 'store',
|
|
},
|
|
],
|
|
depthStencilAttachment: {
|
|
// view: <- to be filled out when we render
|
|
depthClearValue: 1.0,
|
|
depthLoadOp: 'clear',
|
|
depthStoreOp: 'store',
|
|
},
|
|
};
|
|
|
|
let depthTexture;
|
|
|
|
function render(time) {
|
|
time *= 0.001;
|
|
|
|
// Get the current texture from the canvas context and
|
|
// set it as the texture to render to.
|
|
const canvasTexture = context.getCurrentTexture();
|
|
renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();
|
|
|
|
// If we don't have a depth texture OR if its size is different
|
|
// from the canvasTexture when make a new depth texture
|
|
if (!depthTexture ||
|
|
depthTexture.width !== canvasTexture.width ||
|
|
depthTexture.height !== canvasTexture.height) {
|
|
if (depthTexture) {
|
|
depthTexture.destroy();
|
|
}
|
|
depthTexture = device.createTexture({
|
|
size: [canvasTexture.width, canvasTexture.height],
|
|
format: 'depth24plus',
|
|
usage: GPUTextureUsage.RENDER_ATTACHMENT,
|
|
});
|
|
}
|
|
renderPassDescriptor.depthStencilAttachment.view = depthTexture.createView();
|
|
|
|
const encoder = device.createCommandEncoder();
|
|
const pass = encoder.beginRenderPass(renderPassDescriptor);
|
|
pass.setPipeline(pipeline);
|
|
|
|
const aspect = canvas.clientWidth / canvas.clientHeight;
|
|
const projection = mat4.perspective(
|
|
60 * Math.PI / 180,
|
|
aspect,
|
|
0.1, // zNear
|
|
10, // zFar
|
|
);
|
|
// Camera going in circle from origin looking at origin
|
|
const cameraPosition = [Math.cos(time * .1), 0, Math.sin(time * .1)];
|
|
const view = mat4.lookAt(
|
|
cameraPosition,
|
|
[0, 0, 0], // target
|
|
[0, 1, 0], // up
|
|
);
|
|
// We only care about direction so remove the translation
|
|
view[12] = 0;
|
|
view[13] = 0;
|
|
view[14] = 0;
|
|
|
|
const viewProjection = mat4.multiply(projection, view);
|
|
mat4.inverse(viewProjection, viewDirectionProjectionInverseValue);
|
|
|
|
// upload the uniform values to the uniform buffer
|
|
device.queue.writeBuffer(uniformBuffer, 0, uniformValues);
|
|
pass.setBindGroup(0, bindGroup);
|
|
pass.draw(3);
|
|
|
|
pass.end();
|
|
|
|
const commandBuffer = encoder.finish();
|
|
device.queue.submit([commandBuffer]);
|
|
|
|
requestAnimationFrame(render);
|
|
}
|
|
requestAnimationFrame(render);
|
|
|
|
const observer = new ResizeObserver(entries => {
|
|
for (const entry of entries) {
|
|
const canvas = entry.target;
|
|
const width = entry.contentBoxSize[0].inlineSize;
|
|
const height = entry.contentBoxSize[0].blockSize;
|
|
canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
|
|
canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
|
|
}
|
|
});
|
|
observer.observe(canvas);
|
|
}
|
|
|
|
function fail(msg) {
|
|
alert(msg);
|
|
}
|
|
|
|
main();
|
|
</script>
|
|
</html>
|