0b969f3f42
There are two parts to this change: 1. The computation of peaks is moved from `MessageAudio` to the `GlobalAudioContext` and thus we can limit the concurrency of the computations (`p-queue`!) and de-duplicate the computations as well 2. While the peaks are computed the component has to display spinning animation instead of empty waveform and unclickable UI.
159 lines
4.3 KiB
TypeScript
159 lines
4.3 KiB
TypeScript
// Copyright 2021 Signal Messenger, LLC
|
|
// SPDX-License-Identifier: AGPL-3.0-only
|
|
|
|
import * as React from 'react';
|
|
import PQueue from 'p-queue';
|
|
import LRU from 'lru-cache';
|
|
|
|
import { WaveformCache } from '../types/Audio';
|
|
|
|
const MAX_WAVEFORM_COUNT = 1000;
|
|
const MAX_PARALLEL_COMPUTE = 8;
|
|
|
|
export type ComputePeaksResult = {
|
|
duration: number;
|
|
peaks: ReadonlyArray<number>;
|
|
};
|
|
|
|
export type Contents = {
|
|
audio: HTMLAudioElement;
|
|
computePeaks(url: string, barCount: number): Promise<ComputePeaksResult>;
|
|
};
|
|
|
|
// This context's value is effectively global. This is not ideal but is necessary because
|
|
// the app has multiple React roots. In the future, we should use a single React root
|
|
// and instantiate these inside of `GlobalAudioProvider`. (We may wish to keep
|
|
// `audioContext` global, however, as the browser limits the number that can be
|
|
// created.)
|
|
const audioContext = new AudioContext();
|
|
const waveformCache: WaveformCache = new LRU({
|
|
max: MAX_WAVEFORM_COUNT,
|
|
});
|
|
|
|
const inProgressMap = new Map<string, Promise<ComputePeaksResult>>();
|
|
const computeQueue = new PQueue({
|
|
concurrency: MAX_PARALLEL_COMPUTE,
|
|
});
|
|
|
|
/**
|
|
* Load audio from `url`, decode PCM data, and compute RMS peaks for displaying
|
|
* the waveform.
|
|
*
|
|
* The results are cached in the `waveformCache` which is shared across
|
|
* messages in the conversation and provided by GlobalAudioContext.
|
|
*
|
|
* The computation happens off the renderer thread by AudioContext, but it is
|
|
* still quite expensive, so we cache it in the `waveformCache` LRU cache.
|
|
*/
|
|
async function doComputePeaks(
|
|
url: string,
|
|
barCount: number
|
|
): Promise<ComputePeaksResult> {
|
|
const existing = waveformCache.get(url);
|
|
if (existing) {
|
|
window.log.info('GlobalAudioContext: waveform cache hit', url);
|
|
return Promise.resolve(existing);
|
|
}
|
|
|
|
window.log.info('GlobalAudioContext: waveform cache miss', url);
|
|
|
|
// Load and decode `url` into a raw PCM
|
|
const response = await fetch(url);
|
|
const raw = await response.arrayBuffer();
|
|
|
|
const data = await audioContext.decodeAudioData(raw);
|
|
|
|
// Compute RMS peaks
|
|
const peaks = new Array(barCount).fill(0);
|
|
const norms = new Array(barCount).fill(0);
|
|
|
|
const samplesPerPeak = data.length / peaks.length;
|
|
for (
|
|
let channelNum = 0;
|
|
channelNum < data.numberOfChannels;
|
|
channelNum += 1
|
|
) {
|
|
const channel = data.getChannelData(channelNum);
|
|
|
|
for (let sample = 0; sample < channel.length; sample += 1) {
|
|
const i = Math.floor(sample / samplesPerPeak);
|
|
peaks[i] += channel[sample] ** 2;
|
|
norms[i] += 1;
|
|
}
|
|
}
|
|
|
|
// Average
|
|
let max = 1e-23;
|
|
for (let i = 0; i < peaks.length; i += 1) {
|
|
peaks[i] = Math.sqrt(peaks[i] / Math.max(1, norms[i]));
|
|
max = Math.max(max, peaks[i]);
|
|
}
|
|
|
|
// Normalize
|
|
for (let i = 0; i < peaks.length; i += 1) {
|
|
peaks[i] /= max;
|
|
}
|
|
|
|
const result = { peaks, duration: data.duration };
|
|
waveformCache.set(url, result);
|
|
return result;
|
|
}
|
|
|
|
export async function computePeaks(
|
|
url: string,
|
|
barCount: number
|
|
): Promise<ComputePeaksResult> {
|
|
const computeKey = `${url}:${barCount}`;
|
|
|
|
const pending = inProgressMap.get(computeKey);
|
|
if (pending) {
|
|
window.log.info(
|
|
'GlobalAudioContext: already computing peaks for',
|
|
computeKey
|
|
);
|
|
return pending;
|
|
}
|
|
|
|
window.log.info('GlobalAudioContext: queue computing peaks for', computeKey);
|
|
const promise = computeQueue.add(() => doComputePeaks(url, barCount));
|
|
|
|
inProgressMap.set(computeKey, promise);
|
|
const result = await promise;
|
|
inProgressMap.delete(computeKey);
|
|
|
|
return result;
|
|
}
|
|
|
|
const globalContents: Contents = {
|
|
audio: new Audio(),
|
|
computePeaks,
|
|
};
|
|
|
|
export const GlobalAudioContext = React.createContext<Contents>(globalContents);
|
|
|
|
export type GlobalAudioProps = {
|
|
conversationId: string;
|
|
children?: React.ReactNode | React.ReactChildren;
|
|
};
|
|
|
|
/**
|
|
* A global context that holds Audio, AudioContext, LRU instances that are used
|
|
* inside the conversation by ts/components/conversation/MessageAudio.tsx
|
|
*/
|
|
export const GlobalAudioProvider: React.FC<GlobalAudioProps> = ({
|
|
conversationId,
|
|
children,
|
|
}) => {
|
|
// When moving between conversations - stop audio
|
|
React.useEffect(() => {
|
|
return () => {
|
|
globalContents.audio.pause();
|
|
};
|
|
}, [conversationId]);
|
|
|
|
return (
|
|
<GlobalAudioContext.Provider value={globalContents}>
|
|
{children}
|
|
</GlobalAudioContext.Provider>
|
|
);
|
|
};
|