Web Audio API Implementation for Audio Apps
Web Audio API is a low-level audio engine in the browser. Audio is processed through a node graph: source → processing (gain, filter, reverb, analyser) → destination (speakers). Runs in a separate thread, doesn't block UI, provides sample-accurate playback control. <audio> tag works for simple player. Web Audio API is for sequencers, synthesizers, visualizations, real-time effects.
AudioContext and Basic Graph
class AudioEngine {
private ctx: AudioContext
private masterGain: GainNode
private analyser: AnalyserNode
private compressor: DynamicsCompressorNode
constructor() {
this.ctx = new AudioContext({ sampleRate: 44100 })
// Master chain: input → compressor → gain → analyser → speakers
this.compressor = this.ctx.createDynamicsCompressor()
this.compressor.threshold.value = -24
this.compressor.knee.value = 30
this.compressor.ratio.value = 4
this.compressor.attack.value = 0.003
this.compressor.release.value = 0.25
this.masterGain = this.ctx.createGain()
this.masterGain.gain.value = 0.8
this.analyser = this.ctx.createAnalyser()
this.analyser.fftSize = 2048
this.compressor
.connect(this.masterGain)
.connect(this.analyser)
.connect(this.ctx.destination)
}
get destination(): AudioNode {
return this.compressor
}
setMasterVolume(value: number) {
// Smooth volume change without clicks
this.masterGain.gain.linearRampToValueAtTime(
value,
this.ctx.currentTime + 0.05
)
}
resume() {
// Browser requires AudioContext activation after user gesture
return this.ctx.resume()
}
}
Loading and Playing Sounds
class SoundLoader {
private ctx: AudioContext
private cache: Map<string, AudioBuffer> = new Map()
constructor(ctx: AudioContext) {
this.ctx = ctx
}
async load(url: string): Promise<AudioBuffer> {
if (this.cache.has(url)) return this.cache.get(url)!
const response = await fetch(url)
const arrayBuffer = await response.arrayBuffer()
const audioBuffer = await this.ctx.decodeAudioData(arrayBuffer)
this.cache.set(url, audioBuffer)
return audioBuffer
}
play(
buffer: AudioBuffer,
destination: AudioNode,
options: {
when?: number // Start time (ctx.currentTime for now)
offset?: number // Start from position in seconds
loop?: boolean
playbackRate?: number
onEnded?: () => void
} = {}
): AudioBufferSourceNode {
const source = this.ctx.createBufferSource()
source.buffer = buffer
source.loop = options.loop ?? false
source.playbackRate.value = options.playbackRate ?? 1
source.connect(destination)
source.start(options.when ?? this.ctx.currentTime, options.offset ?? 0)
if (options.onEnded) source.onended = options.onEnded
return source
}
}
Visualization: Oscilloscope and Spectrum
function AudioVisualizer({ analyser }: { analyser: AnalyserNode }) {
const canvasRef = useRef<HTMLCanvasElement>(null)
useEffect(() => {
const canvas = canvasRef.current!
const ctx = canvas.getContext('2d')!
const bufferLength = analyser.frequencyBinCount
const dataArray = new Uint8Array(bufferLength)
let animFrameId: number
function draw() {
animFrameId = requestAnimationFrame(draw)
// Switch between waveform and frequency
// analyser.getByteTimeDomainData(dataArray) // Oscilloscope
analyser.getByteFrequencyData(dataArray) // Spectrum
ctx.fillStyle = '#0f172a'
ctx.fillRect(0, 0, canvas.width, canvas.height)
const barWidth = canvas.width / bufferLength * 2.5
let x = 0
for (let i = 0; i < bufferLength; i++) {
const barHeight = (dataArray[i] / 255) * canvas.height
// Gradient from blue to purple
const hue = 220 + (dataArray[i] / 255) * 60
ctx.fillStyle = `hsl(${hue}, 80%, 60%)`
ctx.fillRect(x, canvas.height - barHeight, barWidth, barHeight)
x += barWidth + 1
}
}
draw()
return () => cancelAnimationFrame(animFrameId)
}, [analyser])
return (
<canvas
ref={canvasRef}
width={800}
height={200}
className="w-full rounded-lg"
/>
)
}
Step Sequencer
interface SequencerStep {
active: boolean
velocity: number // 0–1
}
class StepSequencer {
private ctx: AudioContext
private steps: SequencerStep[][] // [track][step]
private currentStep = 0
private bpm: number
private nextNoteTime = 0
private timerID: number | null = null
private sounds: AudioBuffer[]
constructor(ctx: AudioContext, sounds: AudioBuffer[], bpm = 120) {
this.ctx = ctx
this.sounds = sounds
this.bpm = bpm
this.steps = sounds.map(() => Array(16).fill({ active: false, velocity: 0.8 }))
}
private scheduleNote(trackIndex: number, time: number, velocity: number) {
const source = this.ctx.createBufferSource()
source.buffer = this.sounds[trackIndex]
const gainNode = this.ctx.createGain()
gainNode.gain.value = velocity
source.connect(gainNode)
gainNode.connect(this.ctx.destination)
source.start(time)
}
private scheduler() {
const secondsPerBeat = 60.0 / this.bpm
const secondsPerStep = secondsPerBeat / 4 // 16th notes
while (this.nextNoteTime < this.ctx.currentTime + 0.1) {
// Schedule notes 100ms ahead
this.steps.forEach((track, trackIndex) => {
const step = track[this.currentStep]
if (step.active) {
this.scheduleNote(trackIndex, this.nextNoteTime, step.velocity)
}
})
this.currentStep = (this.currentStep + 1) % 16
this.nextNoteTime += secondsPerStep
}
this.timerID = window.setTimeout(() => this.scheduler(), 25)
}
start() {
this.nextNoteTime = this.ctx.currentTime
this.scheduler()
}
stop() {
if (this.timerID) clearTimeout(this.timerID)
}
setStep(track: number, step: number, active: boolean, velocity = 0.8) {
this.steps[track][step] = { active, velocity }
}
}
Synthesis: Oscillator + ADSR Envelope
function playNote(ctx: AudioContext, frequency: number, destination: AudioNode) {
const osc = ctx.createOscillator()
const gainNode = ctx.createGain()
osc.type = 'sawtooth'
osc.frequency.value = frequency
// Low-pass filter for warm sound
const filter = ctx.createBiquadFilter()
filter.type = 'lowpass'
filter.frequency.value = 2000
filter.Q.value = 2
osc.connect(filter)
filter.connect(gainNode)
gainNode.connect(destination)
const now = ctx.currentTime
// ADSR envelope
gainNode.gain.setValueAtTime(0, now)
gainNode.gain.linearRampToValueAtTime(0.7, now + 0.01) // Attack: 10ms
gainNode.gain.linearRampToValueAtTime(0.4, now + 0.1) // Decay: 90ms → Sustain 0.4
gainNode.gain.linearRampToValueAtTime(0, now + 0.5) // Release: 400ms
osc.start(now)
osc.stop(now + 0.55) // After release ends
}
What We Do
Analyze the task: player with visualization, drum sequencer, synthesizer, microphone input analysis. Design audio node graph, implement UI controls (play/stop/tempo/volume), add visualization via AnalyserNode. Solve autoplay policy — AudioContext requires user gesture to start.
Timeline: audio player with visualizer — 2–3 days. Sequencer or synthesizer — 6–10 days.







