Loading...
Loading...
TypeGPU and raw WebGPU adapter patterns for HyperFrames. Use when creating GPU-rendered compositions with TypeGPU, raw WebGPU, WGSL fragment shaders, compute pipelines, liquid glass effects, particle systems, or any canvas layer driven by navigator.gpu that responds to HyperFrames hf-seek events.
npx skill4agent add heygen-com/hyperframes typegputypegpuawait navigator.gpu.requestAdapter()awaitperformance.now()hf-seekawait device.queue.onSubmittedWorkDone()window.__hfTypegpuTimenew CustomEvent("hf-seek", { detail: { time } })<canvas id="gpu-layer"></canvas>
<script>
(async () => {
if (!navigator.gpu) return;
const adapter = await navigator.gpu.requestAdapter();
if (!adapter) return;
const device = await adapter.requestDevice();
const canvas = document.getElementById("gpu-layer");
canvas.width = 1920;
canvas.height = 1080;
const ctx = canvas.getContext("webgpu");
const fmt = navigator.gpu.getPreferredCanvasFormat();
ctx.configure({ device, format: fmt, alphaMode: "opaque" });
// Build your pipeline, buffers, bind groups...
const timeUniform = new Float32Array([0]);
const timeBuf = device.createBuffer({
size: 16,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
function render(t) {
timeUniform[0] = t;
device.queue.writeBuffer(timeBuf, 0, timeUniform);
const enc = device.createCommandEncoder();
const pass = enc.beginRenderPass({
colorAttachments: [
{
view: ctx.getCurrentTexture().createView(),
loadOp: "clear",
clearValue: { r: 0, g: 0, b: 0, a: 1 },
storeOp: "store",
},
],
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(3);
pass.end();
device.queue.submit([enc.finish()]);
}
render(0);
window.addEventListener("hf-seek", (e) => render(e.detail.time));
})();
</script>awaitconst tl = gsap.timeline({ paused: true });
// Caption tweens: synchronous, added before WebGPU init
gsap.set(".cap", { opacity: 0 });
tl.to("#cap-1", { opacity: 1, duration: 0.3 }, 1.0);
tl.to("#cap-1", { opacity: 0, duration: 0.2 }, 3.5);
window.__timelines["my-comp"] = tl;
// GPU-dependent tweens can go inside the async IIFE
(async () => {
// ... WebGPU init ...
const proxy = { value: 0 };
tl.to(proxy, { value: 1, duration: 2, onUpdate: render }, 0.5);
})();<video>const videoEl = document.getElementById("aroll");
// Wait for video metadata before creating the texture
await new Promise((r) => {
if (videoEl.readyState >= 1) r();
else videoEl.addEventListener("loadedmetadata", r, { once: true });
});
// Create texture at the video's NATIVE resolution
const vw = videoEl.videoWidth,
vh = videoEl.videoHeight;
const bgTex = device.createTexture({
size: [vw, vh],
format: "rgba8unorm",
usage:
GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.RENDER_ATTACHMENT,
});
function render(t) {
try {
device.queue.copyExternalImageToTexture({ source: videoEl }, { texture: bgTex }, [vw, vh]);
} catch (_) {
/* frame not decoded yet */
}
// ... draw ...
}copyExternalImageToTexturetextureSampleBiasalphaMode: 'opaque'alphaMode: 'premultiplied'<video>struct Vo { @builtin(position) pos: vec4f, @location(0) uv: vec2f }
@vertex fn vs(@builtin(vertex_index) vi: u32) -> Vo {
let ps = array<vec2f, 3>(vec2f(-1., -1.), vec2f(3., -1.), vec2f(-1., 3.));
let ts = array<vec2f, 3>(vec2f(0., 1.), vec2f(2., 1.), vec2f(0., -1.));
return Vo(vec4f(ps[vi], 0., 1.), ts[vi]);
}pass.draw(3)fn sdf_box(p: vec2f, half_size: vec2f, corner_radius: f32) -> f32 {
let d = abs(p) - half_size + vec2f(corner_radius);
return length(max(d, vec2f(0.))) + min(max(d.x, d.y), 0.) - corner_radius;
}Math.random()requestAnimationFramehf-seekperformance.now()window.__hfTypegpuTimee.detail.timeawait device.queue.onSubmittedWorkDone()