blob: 894c45d232ba172b566ff5928a1d85cecf9bb47f [file] [log] [blame]
<!DOCTYPE html>
<html>
<head>
<title>
Test AnalyserNode Downmixing
</title>
<script src="../../imported/w3c/web-platform-tests/resources/testharness.js"></script>
<script src="../../resources/testharnessreport.js"></script>
<script src="../resources/audit-util.js"></script>
<script src="../resources/audit.js"></script>
<script src="../resources/fft.js"></script>
<script src="../resources/realtimeanalyser-testing.js"></script>
</head>
<body>
<script id="layout-test-code">
let sampleRate = 44100;
let renderFrames = 2048;
let audit = Audit.createTaskRunner();
let testConfigs = [
{channelCount: 1, message: 'mono', floatRelError: 1.2657e-7},
{channelCount: 2, message: 'stereo', floatRelError: 1.1681e-7},
{channelCount: 4, message: 'quad', floatRelError: 4.9793e-7},
{channelCount: 6, message: '5.1', floatRelError: 2.0215e-7},
{channelCount: 3, message: '3-channel', floatRelError: 1.2657e-7}
];
// Create tasks for each entry in testConfigs
for (k in testConfigs) {
audit.define(testConfigs[k].message, (function(config) {
return function(task, should) {
runTest(config, should).then(() => task.done());
};
})(testConfigs[k]));
}
audit.run();
// Test downmixing of the AnalyserNode time data. We use the downmixing
// that automatically happens in the destination node to generate the
// reference data which is compared to the data that the Analyser node has
// captured.
function runTest(options, should) {
// Context MUST have exactly one channel so that we downmix the source
// to mono to generate the reference.
let context = new OfflineAudioContext(1, renderFrames, sampleRate);
let channels = options.channelCount || 1;
let source = context.createBufferSource();
// The signals in each channel. Doesn't matter much what is in here, but
// it's best if the values aren't linearly increasing so that the
// average of the values isn't one of the values (in case the
// implementation does something silly). Only need to support up to 6
// channels.
let bufferValues = [1, 2, 3, 4, 5, 6].map(function(x) {
return x * x
});
;
source.buffer = createConstantBuffer(
context, renderFrames, bufferValues.slice(0, channels));
let analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0;
analyser.fftSize = 256;
// Run analyser as an automatic pull node. Do NOT connect to the
// destination. We don't want the output of the analyser to mix in with
// the source that is also directly connected to the destination.
source.connect(analyser);
source.connect(context.destination);
let timeData = new Float32Array(analyser.fftSize);
let freqData = new Float32Array(analyser.frequencyBinCount);
let suspendFrame = analyser.fftSize;
context.suspend(suspendFrame / context.sampleRate)
.then(function() {
analyser.getFloatTimeDomainData(timeData);
analyser.getFloatFrequencyData(freqData);
})
.then(context.resume.bind(context));
source.start();
return context.startRendering().then(function(renderedBuffer) {
// Verify the time domain data is correct.
let prefix = 'Analyser downmix ' + options.message + ' to mono'
should(timeData, prefix + ' time data')
.beEqualToArray(renderedBuffer.getChannelData(0).subarray(
0, analyser.fftSize));
let expectedTimeData =
renderedBuffer.getChannelData(0).subarray(0, analyser.fftSize);
let fftOrder = Math.floor(Math.log2(analyser.fftSize));
let expectedFreqData =
computeFFTMagnitude(expectedTimeData, fftOrder).map(linearToDb);
compareFloatFreq(
prefix + ' freq data', freqData, expectedFreqData, should, {
precision: 6,
floatRelError: options.floatRelError,
});
});
}
</script>
</body>
</html>