blob: 600cc1dfcad9723438f36864277669cf836321aa [file] [log] [blame]
// Copyright 2013 the V8 project authors. All rights reserved.
// Copyright (C) 2015 Apple Inc. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Performance.now is used in latency benchmarks, the fallback is Date.now.
var performance = performance || {};
performance.now = (function() {
return performance.now ||
performance.mozNow ||
performance.msNow ||
performance.oNow ||
performance.webkitNow ||
Date.now;
})();
// Simple framework for running the benchmark suites and
// computing a score based on the timing measurements.
// A benchmark has a name (string) and a function that will be run to
// do the performance measurement. The optional setup and tearDown
// arguments are functions that will be invoked before and after
// running the benchmark, but the running time of these functions will
// not be accounted for in the benchmark score.
function Benchmark(name, doWarmup, doDeterministic, run, setup, tearDown, latencyResult, minIterations) {
this.name = name;
this.doWarmup = doWarmup;
this.doDeterministic = doDeterministic;
this.run = run;
this.Setup = setup ? setup : function() { };
this.TearDown = tearDown ? tearDown : function() { };
this.latencyResult = latencyResult ? latencyResult : null;
this.minIterations = minIterations ? minIterations : 32;
}
// Benchmark results hold the benchmark and the measured time used to
// run the benchmark. The benchmark score is computed later once a
// full benchmark suite has run to completion. If latency is set to 0
// then there is no latency score for this benchmark.
function BenchmarkResult(benchmark, time, latency) {
this.benchmark = benchmark;
this.time = time;
this.latency = latency;
}
// Automatically convert results to numbers. Used by the geometric
// mean computation.
BenchmarkResult.prototype.valueOf = function() {
return this.time;
}
// Suites of benchmarks consist of a name and the set of benchmarks in
// addition to the reference timing that the final score will be based
// on. This way, all scores are relative to a reference run and higher
// scores implies better performance.
function BenchmarkSuite(name, reference, benchmarks) {
this.name = name;
this.reference = reference;
this.benchmarks = benchmarks;
BenchmarkSuite.suites.push(this);
}
// Keep track of all declared benchmark suites.
BenchmarkSuite.suites = [];
// Scores are not comparable across versions. Bump the version if
// you're making changes that will affect that scores, e.g. if you add
// a new benchmark or change an existing one.
BenchmarkSuite.version = '9';
// Override the alert function to throw an exception instead.
alert = function(s) {
throw "Alert called with argument: " + s;
};
// To make the benchmark results predictable, we replace Math.random
// with a 100% deterministic alternative.
BenchmarkSuite.ResetRNG = function() {
Math.random = (function() {
var seed = 49734321;
return function() {
// Robert Jenkins' 32 bit integer hash function.
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff;
seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
return (seed & 0xfffffff) / 0x10000000;
};
})();
}
// Runs all registered benchmark suites and optionally yields between
// each individual benchmark to avoid running for too long in the
// context of browsers. Once done, the final score is reported to the
// runner.
BenchmarkSuite.RunSuites = function(runner) {
var continuation = null;
var suites = BenchmarkSuite.suites;
var length = suites.length;
BenchmarkSuite.scores = [];
var index = 0;
function RunStep() {
while (continuation || index < length) {
if (continuation) {
continuation = continuation();
} else {
var suite = suites[index++];
if (runner.NotifyStart) runner.NotifyStart(suite.name);
continuation = suite.RunStep(runner);
}
if (continuation && typeof window != 'undefined' && window.setTimeout) {
window.setTimeout(RunStep, 25);
return;
}
}
// show final result
if (runner.NotifyScore) {
var score = BenchmarkSuite.GeometricMean(BenchmarkSuite.scores);
var formatted = BenchmarkSuite.FormatScore(100 * score);
runner.NotifyScore(formatted);
}
}
RunStep();
}
// Counts the total number of registered benchmarks. Useful for
// showing progress as a percentage.
BenchmarkSuite.CountBenchmarks = function() {
var result = 0;
var suites = BenchmarkSuite.suites;
for (var i = 0; i < suites.length; i++) {
result += suites[i].benchmarks.length;
}
return result;
}
// Computes the geometric mean of a set of numbers.
BenchmarkSuite.GeometricMean = function(numbers) {
var log = 0;
for (var i = 0; i < numbers.length; i++) {
log += Math.log(numbers[i]);
}
return Math.pow(Math.E, log / numbers.length);
}
// Computes the geometric mean of a set of throughput time measurements.
BenchmarkSuite.GeometricMeanTime = function(measurements) {
var log = 0;
for (var i = 0; i < measurements.length; i++) {
log += Math.log(measurements[i].time);
}
return Math.pow(Math.E, log / measurements.length);
}
// Computes the average of the worst samples. For example, if percentile is 99, this will report the
// average of the worst 1% of the samples.
BenchmarkSuite.AverageAbovePercentile = function(numbers, percentile) {
// Don't change the original array.
numbers = numbers.slice();
// Sort in ascending order.
numbers.sort(function(a, b) { return a - b; });
// Now the elements we want are at the end. Keep removing them until the array size shrinks too much.
// Examples assuming percentile = 99:
//
// - numbers.length starts at 100: we will remove just the worst entry and then not remove anymore,
// since then numbers.length / originalLength = 0.99.
//
// - numbers.length starts at 1000: we will remove the ten worst.
//
// - numbers.length starts at 10: we will remove just the worst.
var numbersWeWant = [];
var originalLength = numbers.length;
while (numbers.length / originalLength > percentile / 100)
numbersWeWant.push(numbers.pop());
var sum = 0;
for (var i = 0; i < numbersWeWant.length; ++i)
sum += numbersWeWant[i];
var result = sum / numbersWeWant.length;
// Do a sanity check.
if (numbers.length && result < numbers[numbers.length - 1]) {
throw "Sanity check fail: the worst case result is " + result +
" but we didn't take into account " + numbers;
}
return result;
}
// Computes the geometric mean of a set of latency measurements.
BenchmarkSuite.GeometricMeanLatency = function(measurements) {
var log = 0;
var hasLatencyResult = false;
for (var i = 0; i < measurements.length; i++) {
if (measurements[i].latency != 0) {
log += Math.log(measurements[i].latency);
hasLatencyResult = true;
}
}
if (hasLatencyResult) {
return Math.pow(Math.E, log / measurements.length);
} else {
return 0;
}
}
// Converts a score value to a string with at least three significant
// digits.
BenchmarkSuite.FormatScore = function(value) {
if (value > 100) {
return value.toFixed(0);
} else {
return value.toPrecision(3);
}
}
// Notifies the runner that we're done running a single benchmark in
// the benchmark suite. This can be useful to report progress.
BenchmarkSuite.prototype.NotifyStep = function(result) {
this.results.push(result);
if (this.runner.NotifyStep) this.runner.NotifyStep(result.benchmark.name);
}
// Notifies the runner that we're done with running a suite and that
// we have a result which can be reported to the user if needed.
BenchmarkSuite.prototype.NotifyResult = function() {
var mean = BenchmarkSuite.GeometricMeanTime(this.results);
var score = this.reference[0] / mean;
BenchmarkSuite.scores.push(score);
if (this.runner.NotifyResult) {
var formatted = BenchmarkSuite.FormatScore(100 * score);
this.runner.NotifyResult(this.name, formatted);
}
if (this.reference.length == 2) {
var meanLatency = BenchmarkSuite.GeometricMeanLatency(this.results);
if (meanLatency != 0) {
var scoreLatency = this.reference[1] / meanLatency;
BenchmarkSuite.scores.push(scoreLatency);
if (this.runner.NotifyResult) {
var formattedLatency = BenchmarkSuite.FormatScore(100 * scoreLatency)
this.runner.NotifyResult(this.name + "Latency", formattedLatency);
}
}
}
}
// Notifies the runner that running a benchmark resulted in an error.
BenchmarkSuite.prototype.NotifyError = function(error) {
if (this.runner.NotifyError) {
this.runner.NotifyError(this.name, error);
}
if (this.runner.NotifyStep) {
this.runner.NotifyStep(this.name);
}
}
// Runs a single benchmark for at least a second and computes the
// average time it takes to run a single iteration.
BenchmarkSuite.prototype.RunSingleBenchmark = function(benchmark, data) {
function Measure(data) {
var elapsed = 0;
var start = new Date();
// Run either for 1 second or for the number of iterations specified
// by minIterations, depending on the config flag doDeterministic.
for (var i = 0; (benchmark.doDeterministic ?
i<benchmark.minIterations : elapsed < 1000); i++) {
benchmark.run();
elapsed = new Date() - start;
}
if (data != null) {
data.runs += i;
data.elapsed += elapsed;
}
}
// Sets up data in order to skip or not the warmup phase.
if (!benchmark.doWarmup && data == null) {
data = { runs: 0, elapsed: 0 };
}
if (data == null) {
Measure(null);
return { runs: 0, elapsed: 0 };
} else {
Measure(data);
// If we've run too few iterations, we continue for another second.
if (data.runs < benchmark.minIterations) return data;
var usec = (data.elapsed * 1000) / data.runs;
var latencySamples = (benchmark.latencyResult != null) ? benchmark.latencyResult() : [0];
var percentile = 99.5;
var latency = BenchmarkSuite.AverageAbovePercentile(latencySamples, percentile) * 1000;
this.NotifyStep(new BenchmarkResult(benchmark, usec, latency));
return null;
}
}
// This function starts running a suite, but stops between each
// individual benchmark in the suite and returns a continuation
// function which can be invoked to run the next benchmark. Once the
// last benchmark has been executed, null is returned.
BenchmarkSuite.prototype.RunStep = function(runner) {
BenchmarkSuite.ResetRNG();
this.results = [];
this.runner = runner;
var length = this.benchmarks.length;
var index = 0;
var suite = this;
var data;
// Run the setup, the actual benchmark, and the tear down in three
// separate steps to allow the framework to yield between any of the
// steps.
function RunNextSetup() {
if (index < length) {
try {
suite.benchmarks[index].Setup();
} catch (e) {
suite.NotifyError(e);
return null;
}
return RunNextBenchmark;
}
suite.NotifyResult();
return null;
}
function RunNextBenchmark() {
try {
data = suite.RunSingleBenchmark(suite.benchmarks[index], data);
} catch (e) {
suite.NotifyError(e);
return null;
}
// If data is null, we're done with this benchmark.
return (data == null) ? RunNextTearDown : RunNextBenchmark();
}
function RunNextTearDown() {
try {
suite.benchmarks[index++].TearDown();
} catch (e) {
suite.NotifyError(e);
return null;
}
return RunNextSetup;
}
// Start out running the setup.
return RunNextSetup();
}