mirror of
https://github.com/webgpu/webgpufundamentals.git
synced 2026-05-16 04:30:37 -04:00
543 lines
15 KiB
HTML
543 lines
15 KiB
HTML
<!DOCTYPE html>
|
|
<html>
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
|
|
<title>WebGPU Camera Controls</title>
|
|
<style>
|
|
@import url(resources/webgpu-lesson.css);
|
|
html, body {
|
|
margin: 0; /* remove the default margin */
|
|
height: 100%; /* make the html,body fill the page */
|
|
}
|
|
canvas {
|
|
display: block; /* make the canvas act like a block */
|
|
width: 100%; /* make the canvas fill its container */
|
|
height: 100%;
|
|
touch-action: none;
|
|
}
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<canvas></canvas>
|
|
</div>
|
|
</body>
|
|
<script type="module">
|
|
// see https://webgpufundamentals.org/webgpu/lessons/webgpu-utils.html#wgpu-matrix
|
|
import {mat4, vec3} from '../3rdparty/wgpu-matrix.module.js';
|
|
import GUI from '../3rdparty/muigui-0.x.module.js';
|
|
|
|
function createFVertices() {
|
|
const positions = [
|
|
// left column
|
|
-50, 75, 15,
|
|
-20, 75, 15,
|
|
-50, -75, 15,
|
|
-20, -75, 15,
|
|
|
|
// top rung
|
|
-20, 75, 15,
|
|
50, 75, 15,
|
|
-20, 45, 15,
|
|
50, 45, 15,
|
|
|
|
// middle rung
|
|
-20, 15, 15,
|
|
20, 15, 15,
|
|
-20, -15, 15,
|
|
20, -15, 15,
|
|
|
|
// left column back
|
|
-50, 75, -15,
|
|
-20, 75, -15,
|
|
-50, -75, -15,
|
|
-20, -75, -15,
|
|
|
|
// top rung back
|
|
-20, 75, -15,
|
|
50, 75, -15,
|
|
-20, 45, -15,
|
|
50, 45, -15,
|
|
|
|
// middle rung back
|
|
-20, 15, -15,
|
|
20, 15, -15,
|
|
-20, -15, -15,
|
|
20, -15, -15,
|
|
];
|
|
|
|
const indices = [
|
|
0, 2, 1, 2, 3, 1, // left column
|
|
4, 6, 5, 6, 7, 5, // top run
|
|
8, 10, 9, 10, 11, 9, // middle run
|
|
|
|
12, 13, 14, 14, 13, 15, // left column back
|
|
16, 17, 18, 18, 17, 19, // top run back
|
|
20, 21, 22, 22, 21, 23, // middle run back
|
|
|
|
0, 5, 12, 12, 5, 17, // top
|
|
5, 7, 17, 17, 7, 19, // top rung right
|
|
6, 18, 7, 18, 19, 7, // top rung bottom
|
|
6, 8, 18, 18, 8, 20, // between top and middle rung
|
|
8, 9, 20, 20, 9, 21, // middle rung top
|
|
9, 11, 21, 21, 11, 23, // middle rung right
|
|
10, 22, 11, 22, 23, 11, // middle rung bottom
|
|
10, 3, 22, 22, 3, 15, // stem right
|
|
2, 14, 3, 14, 15, 3, // bottom
|
|
0, 12, 2, 12, 14, 2, // left
|
|
];
|
|
|
|
const quadColors = [
|
|
200, 70, 120, // left column front
|
|
200, 70, 120, // top rung front
|
|
200, 70, 120, // middle rung front
|
|
|
|
80, 70, 200, // left column back
|
|
80, 70, 200, // top rung back
|
|
80, 70, 200, // middle rung back
|
|
|
|
70, 200, 210, // top
|
|
160, 160, 220, // top rung right
|
|
90, 130, 110, // top rung bottom
|
|
200, 200, 70, // between top and middle rung
|
|
210, 100, 70, // middle rung top
|
|
210, 160, 70, // middle rung right
|
|
70, 180, 210, // middle rung bottom
|
|
100, 70, 210, // stem right
|
|
76, 210, 100, // bottom
|
|
140, 210, 80, // left
|
|
];
|
|
|
|
const numVertices = indices.length;
|
|
const vertexData = new Float32Array(numVertices * 4); // xyz + color
|
|
const colorData = new Uint8Array(vertexData.buffer);
|
|
|
|
for (let i = 0; i < indices.length; ++i) {
|
|
const positionNdx = indices[i] * 3;
|
|
const position = positions.slice(positionNdx, positionNdx + 3);
|
|
vertexData.set(position, i * 4);
|
|
|
|
const quadNdx = (i / 6 | 0) * 3;
|
|
const color = quadColors.slice(quadNdx, quadNdx + 3);
|
|
colorData.set(color, i * 16 + 12);
|
|
colorData[i * 16 + 15] = 255;
|
|
}
|
|
|
|
return {
|
|
vertexData,
|
|
numVertices,
|
|
};
|
|
}
|
|
|
|
async function main() {
|
|
const adapter = await navigator.gpu?.requestAdapter();
|
|
const device = await adapter?.requestDevice();
|
|
if (!device) {
|
|
fail('need a browser that supports WebGPU');
|
|
return;
|
|
}
|
|
|
|
const degToRad = d => d * Math.PI / 180;
|
|
|
|
const camera = {
|
|
fieldOfView: degToRad(60),
|
|
near: 1,
|
|
far: 10000,
|
|
target: [-15, 0, 0], //[10, 5, 7],
|
|
radius: 140,
|
|
yRotation: 0,
|
|
xRotation: 0,
|
|
matrix: mat4.identity(),
|
|
};
|
|
|
|
const gui = new GUI();
|
|
gui.name('Drag = Rotate, shift = move, wheel = dolly');
|
|
gui.onChange(render);
|
|
gui.add(camera, 'fieldOfView', {min: 1, max: 179, converters: GUI.converters.radToDeg});
|
|
|
|
const canvas = document.querySelector('canvas');
|
|
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
|
|
const context = canvas.getContext('webgpu');
|
|
context.configure({
|
|
device,
|
|
format: presentationFormat,
|
|
});
|
|
|
|
const module = device.createShaderModule({
|
|
code: /* wgsl */ `
|
|
struct PerModelUniforms {
|
|
matrix: mat4x4f,
|
|
};
|
|
|
|
struct GlobalUniforms {
|
|
viewProjection: mat4x4f,
|
|
};
|
|
|
|
struct Vertex {
|
|
@location(0) position: vec4f,
|
|
@location(1) color: vec4f,
|
|
};
|
|
|
|
struct VSOutput {
|
|
@builtin(position) position: vec4f,
|
|
@location(0) color: vec4f,
|
|
};
|
|
|
|
@group(0) @binding(0) var<uniform> model: PerModelUniforms;
|
|
@group(0) @binding(1) var<uniform> global: GlobalUniforms;
|
|
|
|
@vertex fn vs(vert: Vertex) -> VSOutput {
|
|
var vsOut: VSOutput;
|
|
vsOut.position = global.viewProjection * model.matrix * vert.position;
|
|
vsOut.color = vert.color;
|
|
return vsOut;
|
|
}
|
|
|
|
@fragment fn fs(vsOut: VSOutput) -> @location(0) vec4f {
|
|
return vsOut.color;
|
|
}
|
|
`,
|
|
});
|
|
|
|
const pipeline = device.createRenderPipeline({
|
|
label: '2 attributes',
|
|
layout: 'auto',
|
|
vertex: {
|
|
module,
|
|
buffers: [
|
|
{
|
|
arrayStride: (4) * 4, // (3) floats 4 bytes each + one 4 byte color
|
|
attributes: [
|
|
{shaderLocation: 0, offset: 0, format: 'float32x3'}, // position
|
|
{shaderLocation: 1, offset: 12, format: 'unorm8x4'}, // color
|
|
],
|
|
},
|
|
],
|
|
},
|
|
fragment: {
|
|
module,
|
|
targets: [{ format: presentationFormat }],
|
|
},
|
|
primitive: {
|
|
cullMode: 'back',
|
|
},
|
|
depthStencil: {
|
|
depthWriteEnabled: true,
|
|
depthCompare: 'less',
|
|
format: 'depth24plus',
|
|
},
|
|
});
|
|
|
|
const { vertexData, numVertices } = createFVertices();
|
|
const vertexBuffer = device.createBuffer({
|
|
label: 'vertex buffer vertices',
|
|
size: vertexData.byteLength,
|
|
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
|
|
});
|
|
device.queue.writeBuffer(vertexBuffer, 0, vertexData);
|
|
|
|
const globalUniformBufferSize = (16) * 4;
|
|
const globalUniformBuffer = device.createBuffer({
|
|
label: 'model uniforms',
|
|
size: globalUniformBufferSize,
|
|
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
|
|
});
|
|
|
|
const globalUniformValues = new Float32Array(globalUniformBufferSize / 4);
|
|
|
|
// offsets to the various uniform values in float32 indices
|
|
const kViewProjectionOffset = 0;
|
|
const viewProjectionValue = globalUniformValues.subarray(
|
|
kViewProjectionOffset, kViewProjectionOffset + 16);
|
|
|
|
const objectInfos = [];
|
|
for (let z = -1000; z <= 1000; z += 500) {
|
|
for (let x = -1000; x <= 1000; x += 500) {
|
|
// model matrix
|
|
const uniformBufferSize = (16) * 4;
|
|
const uniformBuffer = device.createBuffer({
|
|
label: 'model uniforms',
|
|
size: uniformBufferSize,
|
|
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
|
|
});
|
|
|
|
const uniformValues = new Float32Array(uniformBufferSize / 4);
|
|
|
|
// offsets to the various uniform values in float32 indices
|
|
const kMatrixOffset = 0;
|
|
|
|
const matrixValue = uniformValues.subarray(kMatrixOffset, kMatrixOffset + 16);
|
|
mat4.translation([x, 0, z], matrixValue);
|
|
device.queue.writeBuffer(uniformBuffer, 0, uniformValues);
|
|
|
|
const bindGroup = device.createBindGroup({
|
|
label: 'bind group for object',
|
|
layout: pipeline.getBindGroupLayout(0),
|
|
entries: [
|
|
{ binding: 0, resource: uniformBuffer },
|
|
{ binding: 1, resource: globalUniformBuffer },
|
|
],
|
|
});
|
|
|
|
objectInfos.push({
|
|
bindGroup,
|
|
uniformBuffer,
|
|
uniformValues,
|
|
matrixValue,
|
|
});
|
|
}
|
|
}
|
|
|
|
const renderPassDescriptor = {
|
|
label: 'our basic canvas renderPass',
|
|
colorAttachments: [
|
|
{
|
|
// view: <- to be filled out when we render
|
|
loadOp: 'clear',
|
|
storeOp: 'store',
|
|
},
|
|
],
|
|
depthStencilAttachment: {
|
|
// view: <- to be filled out when we render
|
|
depthClearValue: 1.0,
|
|
depthLoadOp: 'clear',
|
|
depthStoreOp: 'store',
|
|
},
|
|
};
|
|
|
|
let depthTexture;
|
|
|
|
function render() {
|
|
// Get the current texture from the canvas context and
|
|
// set it as the texture to render to.
|
|
const canvasTexture = context.getCurrentTexture();
|
|
renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();
|
|
|
|
// If we don't have a depth texture OR if its size is different
|
|
// from the canvasTexture when make a new depth texture
|
|
if (!depthTexture ||
|
|
depthTexture.width !== canvasTexture.width ||
|
|
depthTexture.height !== canvasTexture.height) {
|
|
if (depthTexture) {
|
|
depthTexture.destroy();
|
|
}
|
|
depthTexture = device.createTexture({
|
|
size: [canvasTexture.width, canvasTexture.height],
|
|
format: 'depth24plus',
|
|
usage: GPUTextureUsage.RENDER_ATTACHMENT,
|
|
});
|
|
}
|
|
renderPassDescriptor.depthStencilAttachment.view = depthTexture.createView();
|
|
renderPassDescriptor.colorAttachments[0].clearValue = [0.3, 0.3, 0.3, 1];
|
|
|
|
const encoder = device.createCommandEncoder();
|
|
const pass = encoder.beginRenderPass(renderPassDescriptor);
|
|
pass.setPipeline(pipeline);
|
|
pass.setVertexBuffer(0, vertexBuffer);
|
|
|
|
const aspect = canvas.clientWidth / canvas.clientHeight;
|
|
const projection = mat4.perspective(
|
|
camera.fieldOfView,
|
|
aspect,
|
|
camera.near,
|
|
camera.far
|
|
);
|
|
|
|
// Compute a view matrix
|
|
const viewMatrix = mat4.inverse(camera.matrix);
|
|
|
|
mat4.multiply(projection, viewMatrix, viewProjectionValue);
|
|
|
|
// upload the uniform values to the uniform buffer
|
|
device.queue.writeBuffer(globalUniformBuffer, 0, globalUniformValues);
|
|
|
|
for (const {bindGroup} of objectInfos) {
|
|
pass.setBindGroup(0, bindGroup);
|
|
pass.draw(numVertices);
|
|
}
|
|
pass.end();
|
|
|
|
const commandBuffer = encoder.finish();
|
|
device.queue.submit([commandBuffer]);
|
|
}
|
|
|
|
const observer = new ResizeObserver(entries => {
|
|
for (const entry of entries) {
|
|
const canvas = entry.target;
|
|
const width = entry.contentBoxSize[0].inlineSize;
|
|
const height = entry.contentBoxSize[0].blockSize;
|
|
canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
|
|
canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
|
|
}
|
|
render();
|
|
});
|
|
observer.observe(canvas);
|
|
|
|
let startX;
|
|
let startY;
|
|
let startPinchDistance;
|
|
let lastMode;
|
|
let doubleTapMode;
|
|
let lastSingleTapTime;
|
|
let startXRotation;
|
|
let startYRotation;
|
|
const startCameraMatrix = mat4.create();
|
|
const startTarget = vec3.create();
|
|
let startCameraRadius;
|
|
const pointerToLastPosition = new Map();
|
|
|
|
const updateCameraStartInfo = () => {
|
|
startXRotation = camera.xRotation;
|
|
startYRotation = camera.yRotation;
|
|
startCameraRadius = camera.radius;
|
|
mat4.copy(camera.matrix, startCameraMatrix);
|
|
vec3.copy(camera.target, startTarget);
|
|
};
|
|
updateCameraStartInfo();
|
|
|
|
const updateStartPosition = (e) => {
|
|
startX = e.offsetX;
|
|
startY = e.offsetY;
|
|
if (pointerToLastPosition.size === 2) {
|
|
startPinchDistance = computePinchDistance();
|
|
}
|
|
updateCameraStartInfo();
|
|
};
|
|
|
|
const computeCameraMatrix = (mat) => {
|
|
mat = mat4.identity(mat);
|
|
mat4.translate(mat, camera.target, mat);
|
|
mat4.rotateY(mat, camera.yRotation, mat);
|
|
mat4.rotateX(mat, camera.xRotation, mat);
|
|
mat4.translate(mat, [0, 0, camera.radius], mat);
|
|
return mat;
|
|
};
|
|
|
|
const updateCamera = () => {
|
|
computeCameraMatrix(camera.matrix);
|
|
};
|
|
updateCamera();
|
|
|
|
const computePinchDistance = () => {
|
|
const pos = [...pointerToLastPosition.values()];
|
|
const dx = pos[0].x - pos[1].x;
|
|
const dy = pos[0].y - pos[1].y;
|
|
return Math.hypot(dx, dy);
|
|
};
|
|
|
|
const cameraTrack = (deltaX, deltaY) => {
|
|
const mat = mat4.copy(startCameraMatrix);
|
|
mat4.setTranslation(mat, [0, 0, 0], mat);
|
|
const direction = vec3.transformMat4([-deltaX, deltaY, 0], mat);
|
|
vec3.addScaled(startTarget, direction, 1, camera.target);
|
|
updateCamera();
|
|
};
|
|
|
|
// Rotate around camera.target
|
|
const cameraPanAndTilt = (deltaX, deltaY) => {
|
|
camera.xRotation = startXRotation - deltaY * 0.01;
|
|
camera.yRotation = startYRotation - deltaX * 0.01;
|
|
updateCamera();
|
|
};
|
|
|
|
const cameraPinchToZoom = () => {
|
|
const pinchDistance = computePinchDistance();
|
|
const delta = pinchDistance - startPinchDistance;
|
|
camera.radius = startCameraRadius + delta;
|
|
updateCamera();
|
|
};
|
|
|
|
const cameraSingleFingerZoom = (deltaY) => {
|
|
camera.radius = startCameraRadius + deltaY;
|
|
updateCamera();
|
|
};
|
|
|
|
const onMove = (e) => {
|
|
if (!pointerToLastPosition.has(e.pointerId) || !canvas.hasPointerCapture(e.pointerId)) {
|
|
return;
|
|
}
|
|
pointerToLastPosition.set(e.pointerId, { x: e.clientX, y: e.clientY });
|
|
|
|
const mode = pointerToLastPosition.size === 2
|
|
? 'pinch'
|
|
: pointerToLastPosition.size > 2
|
|
? 'undefined'
|
|
: doubleTapMode
|
|
? 'doubleTapZoom'
|
|
: e.shiftKey || (e.buttons & 4) !== 0
|
|
? 'track'
|
|
: 'panAndTilt';
|
|
|
|
if (mode !== lastMode) {
|
|
lastMode = mode;
|
|
updateStartPosition(e);
|
|
}
|
|
|
|
const deltaX = e.clientX - startX;
|
|
const deltaY = e.clientY - startY;
|
|
|
|
switch (mode) {
|
|
case 'pinch':
|
|
cameraPinchToZoom();
|
|
break;
|
|
case 'track':
|
|
cameraTrack(deltaX, deltaY);
|
|
break;
|
|
case 'panAndTilt':
|
|
cameraPanAndTilt(deltaX, deltaY);
|
|
break;
|
|
case 'doubleTapZoom':
|
|
cameraSingleFingerZoom(deltaY);
|
|
}
|
|
|
|
render();
|
|
};
|
|
|
|
const onUp = (e) => {
|
|
pointerToLastPosition.delete(e.pointerId);
|
|
canvas.releasePointerCapture(e.pointerId);
|
|
if (pointerToLastPosition.size === 0) {
|
|
doubleTapMode = false;
|
|
}
|
|
};
|
|
|
|
const onDown = (e) => {
|
|
canvas.setPointerCapture(e.pointerId);
|
|
pointerToLastPosition.set(e.pointerId, { x: e.clientX, y: e.clientY });
|
|
if (pointerToLastPosition.size === 1) {
|
|
if (!doubleTapMode) {
|
|
const now = performance.now();
|
|
const deltaTime = now - lastSingleTapTime;
|
|
if (deltaTime < 500) {
|
|
doubleTapMode = true;
|
|
}
|
|
lastSingleTapTime = now;
|
|
}
|
|
} else {
|
|
doubleTapMode = false;
|
|
}
|
|
updateStartPosition(e);
|
|
};
|
|
|
|
const onWheel = (e) => {
|
|
e.preventDefault();
|
|
camera.radius += e.deltaY;
|
|
updateCamera();
|
|
render();
|
|
};
|
|
|
|
canvas.addEventListener('pointerup', onUp);
|
|
canvas.addEventListener('pointercancel', onUp);
|
|
canvas.addEventListener('pointerdown', onDown);
|
|
canvas.addEventListener('pointermove', onMove);
|
|
canvas.addEventListener('wheel', onWheel);
|
|
}
|
|
|
|
function fail(msg) {
|
|
alert(msg);
|
|
}
|
|
|
|
main();
|
|
</script>
|
|
</html>
|