blob: 2ccfe923d30e50b7b209a2480432e672f6cb3ed0 [file] [log] [blame]
#!/usr/bin/env ruby
# Copyright (C) 2011-2018 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
require 'rubygems'
require 'getoptlong'
require 'pathname'
require 'shellwords'
require 'socket'
begin
require 'json'
rescue LoadError => e
$stderr.puts "It does not appear that you have the 'json' package installed. Try running 'sudo gem install json'."
exit 1
end
SCRIPT_PATH = Pathname.new(__FILE__).realpath
raise unless SCRIPT_PATH.dirname.basename.to_s == "Scripts"
raise unless SCRIPT_PATH.dirname.dirname.basename.to_s == "Tools"
OPENSOURCE_PATH = SCRIPT_PATH.dirname.dirname.dirname
PERFORMANCETESTS_PATH = OPENSOURCE_PATH + "PerformanceTests"
SUNSPIDER_PATH = PERFORMANCETESTS_PATH + "SunSpider" + "tests" + "sunspider-1.0"
LONGSPIDER_PATH = PERFORMANCETESTS_PATH + "LongSpider"
V8_PATH = PERFORMANCETESTS_PATH + "SunSpider" + "tests" + "v8-v6"
TAILBENCH_PATH = PERFORMANCETESTS_PATH + "TailBench9000"
BIGINTBENCH_PATH = PERFORMANCETESTS_PATH + "BigIntBench"
MICROBENCHMARKS_PATH = OPENSOURCE_PATH + "JSTests" + "microbenchmarks"
OPENSOURCE_OCTANE_PATH = PERFORMANCETESTS_PATH + "Octane"
OCTANE_WRAPPER_PATH = OPENSOURCE_OCTANE_PATH + "wrappers"
JSBENCH_PATH = PERFORMANCETESTS_PATH + "JSBench"
SIXSPEED_PATH = PERFORMANCETESTS_PATH + "SixSpeed" + "tests"
SIXSPEED_WRAPPER_PATH = PERFORMANCETESTS_PATH + "SixSpeed" + "wrappers"
TEMP_PATH = OPENSOURCE_PATH + "BenchmarkTemp"
if TEMP_PATH.exist?
raise unless TEMP_PATH.directory?
else
Dir.mkdir(TEMP_PATH)
end
BENCH_DATA_PATH = TEMP_PATH + "benchdata"
IBR_LOOKUP=[0.00615583, 0.0975, 0.22852, 0.341628, 0.430741, 0.500526, 0.555933,
0.600706, 0.637513, 0.668244, 0.694254, 0.716537, 0.735827, 0.752684,
0.767535, 0.780716, 0.792492, 0.803074, 0.812634, 0.821313, 0.829227,
0.836472, 0.843129, 0.849267, 0.854943, 0.860209, 0.865107, 0.869674,
0.873942, 0.877941, 0.881693, 0.885223, 0.888548, 0.891686, 0.894652,
0.897461, 0.900124, 0.902652, 0.905056, 0.907343, 0.909524, 0.911604,
0.91359, 0.91549, 0.917308, 0.919049, 0.920718, 0.92232, 0.923859, 0.925338,
0.926761, 0.92813, 0.929449, 0.930721, 0.931948, 0.933132, 0.934275, 0.93538,
0.936449, 0.937483, 0.938483, 0.939452, 0.940392, 0.941302, 0.942185,
0.943042, 0.943874, 0.944682, 0.945467, 0.94623, 0.946972, 0.947694,
0.948396, 0.94908, 0.949746, 0.950395, 0.951027, 0.951643, 0.952244,
0.952831, 0.953403, 0.953961, 0.954506, 0.955039, 0.955559, 0.956067,
0.956563, 0.957049, 0.957524, 0.957988, 0.958443, 0.958887, 0.959323,
0.959749, 0.960166, 0.960575, 0.960975, 0.961368, 0.961752, 0.962129,
0.962499, 0.962861, 0.963217, 0.963566, 0.963908, 0.964244, 0.964574,
0.964897, 0.965215, 0.965527, 0.965834, 0.966135, 0.966431, 0.966722,
0.967007, 0.967288, 0.967564, 0.967836, 0.968103, 0.968366, 0.968624,
0.968878, 0.969128, 0.969374, 0.969617, 0.969855, 0.97009, 0.970321,
0.970548, 0.970772, 0.970993, 0.97121, 0.971425, 0.971636, 0.971843,
0.972048, 0.97225, 0.972449, 0.972645, 0.972839, 0.973029, 0.973217,
0.973403, 0.973586, 0.973766, 0.973944, 0.97412, 0.974293, 0.974464,
0.974632, 0.974799, 0.974963, 0.975125, 0.975285, 0.975443, 0.975599,
0.975753, 0.975905, 0.976055, 0.976204, 0.97635, 0.976495, 0.976638,
0.976779, 0.976918, 0.977056, 0.977193, 0.977327, 0.97746, 0.977592,
0.977722, 0.97785, 0.977977, 0.978103, 0.978227, 0.978349, 0.978471,
0.978591, 0.978709, 0.978827, 0.978943, 0.979058, 0.979171, 0.979283,
0.979395, 0.979504, 0.979613, 0.979721, 0.979827, 0.979933, 0.980037,
0.98014, 0.980242, 0.980343, 0.980443, 0.980543, 0.980641, 0.980738,
0.980834, 0.980929, 0.981023, 0.981116, 0.981209, 0.9813, 0.981391, 0.981481,
0.981569, 0.981657, 0.981745, 0.981831, 0.981916, 0.982001, 0.982085,
0.982168, 0.982251, 0.982332, 0.982413, 0.982493, 0.982573, 0.982651,
0.982729, 0.982807, 0.982883, 0.982959, 0.983034, 0.983109, 0.983183,
0.983256, 0.983329, 0.983401, 0.983472, 0.983543, 0.983613, 0.983683,
0.983752, 0.98382, 0.983888, 0.983956, 0.984022, 0.984089, 0.984154,
0.984219, 0.984284, 0.984348, 0.984411, 0.984474, 0.984537, 0.984599,
0.98466, 0.984721, 0.984782, 0.984842, 0.984902, 0.984961, 0.985019,
0.985077, 0.985135, 0.985193, 0.985249, 0.985306, 0.985362, 0.985417,
0.985472, 0.985527, 0.985582, 0.985635, 0.985689, 0.985742, 0.985795,
0.985847, 0.985899, 0.985951, 0.986002, 0.986053, 0.986103, 0.986153,
0.986203, 0.986252, 0.986301, 0.98635, 0.986398, 0.986446, 0.986494,
0.986541, 0.986588, 0.986635, 0.986681, 0.986727, 0.986773, 0.986818,
0.986863, 0.986908, 0.986953, 0.986997, 0.987041, 0.987084, 0.987128,
0.987171, 0.987213, 0.987256, 0.987298, 0.98734, 0.987381, 0.987423,
0.987464, 0.987504, 0.987545, 0.987585, 0.987625, 0.987665, 0.987704,
0.987744, 0.987783, 0.987821, 0.98786, 0.987898, 0.987936, 0.987974,
0.988011, 0.988049, 0.988086, 0.988123, 0.988159, 0.988196, 0.988232,
0.988268, 0.988303, 0.988339, 0.988374, 0.988409, 0.988444, 0.988479,
0.988513, 0.988547, 0.988582, 0.988615, 0.988649, 0.988682, 0.988716,
0.988749, 0.988782, 0.988814, 0.988847, 0.988879, 0.988911, 0.988943,
0.988975, 0.989006, 0.989038, 0.989069, 0.9891, 0.989131, 0.989161, 0.989192,
0.989222, 0.989252, 0.989282, 0.989312, 0.989342, 0.989371, 0.989401,
0.98943, 0.989459, 0.989488, 0.989516, 0.989545, 0.989573, 0.989602, 0.98963,
0.989658, 0.989685, 0.989713, 0.98974, 0.989768, 0.989795, 0.989822,
0.989849, 0.989876, 0.989902, 0.989929, 0.989955, 0.989981, 0.990007,
0.990033, 0.990059, 0.990085, 0.99011, 0.990136, 0.990161, 0.990186,
0.990211, 0.990236, 0.990261, 0.990285, 0.99031, 0.990334, 0.990358,
0.990383, 0.990407, 0.99043, 0.990454, 0.990478, 0.990501, 0.990525,
0.990548, 0.990571, 0.990594, 0.990617, 0.99064, 0.990663, 0.990686,
0.990708, 0.990731, 0.990753, 0.990775, 0.990797, 0.990819, 0.990841,
0.990863, 0.990885, 0.990906, 0.990928, 0.990949, 0.99097, 0.990991,
0.991013, 0.991034, 0.991054, 0.991075, 0.991096, 0.991116, 0.991137,
0.991157, 0.991178, 0.991198, 0.991218, 0.991238, 0.991258, 0.991278,
0.991298, 0.991317, 0.991337, 0.991356, 0.991376, 0.991395, 0.991414,
0.991433, 0.991452, 0.991471, 0.99149, 0.991509, 0.991528, 0.991547,
0.991565, 0.991584, 0.991602, 0.99162, 0.991639, 0.991657, 0.991675,
0.991693, 0.991711, 0.991729, 0.991746, 0.991764, 0.991782, 0.991799,
0.991817, 0.991834, 0.991851, 0.991869, 0.991886, 0.991903, 0.99192,
0.991937, 0.991954, 0.991971, 0.991987, 0.992004, 0.992021, 0.992037,
0.992054, 0.99207, 0.992086, 0.992103, 0.992119, 0.992135, 0.992151,
0.992167, 0.992183, 0.992199, 0.992215, 0.99223, 0.992246, 0.992262,
0.992277, 0.992293, 0.992308, 0.992324, 0.992339, 0.992354, 0.992369,
0.992384, 0.9924, 0.992415, 0.992429, 0.992444, 0.992459, 0.992474, 0.992489,
0.992503, 0.992518, 0.992533, 0.992547, 0.992561, 0.992576, 0.99259,
0.992604, 0.992619, 0.992633, 0.992647, 0.992661, 0.992675, 0.992689,
0.992703, 0.992717, 0.99273, 0.992744, 0.992758, 0.992771, 0.992785,
0.992798, 0.992812, 0.992825, 0.992839, 0.992852, 0.992865, 0.992879,
0.992892, 0.992905, 0.992918, 0.992931, 0.992944, 0.992957, 0.99297,
0.992983, 0.992995, 0.993008, 0.993021, 0.993034, 0.993046, 0.993059,
0.993071, 0.993084, 0.993096, 0.993109, 0.993121, 0.993133, 0.993145,
0.993158, 0.99317, 0.993182, 0.993194, 0.993206, 0.993218, 0.99323, 0.993242,
0.993254, 0.993266, 0.993277, 0.993289, 0.993301, 0.993312, 0.993324,
0.993336, 0.993347, 0.993359, 0.99337, 0.993382, 0.993393, 0.993404,
0.993416, 0.993427, 0.993438, 0.993449, 0.99346, 0.993472, 0.993483,
0.993494, 0.993505, 0.993516, 0.993527, 0.993538, 0.993548, 0.993559,
0.99357, 0.993581, 0.993591, 0.993602, 0.993613, 0.993623, 0.993634,
0.993644, 0.993655, 0.993665, 0.993676, 0.993686, 0.993697, 0.993707,
0.993717, 0.993727, 0.993738, 0.993748, 0.993758, 0.993768, 0.993778,
0.993788, 0.993798, 0.993808, 0.993818, 0.993828, 0.993838, 0.993848,
0.993858, 0.993868, 0.993877, 0.993887, 0.993897, 0.993907, 0.993916,
0.993926, 0.993935, 0.993945, 0.993954, 0.993964, 0.993973, 0.993983,
0.993992, 0.994002, 0.994011, 0.99402, 0.99403, 0.994039, 0.994048, 0.994057,
0.994067, 0.994076, 0.994085, 0.994094, 0.994103, 0.994112, 0.994121,
0.99413, 0.994139, 0.994148, 0.994157, 0.994166, 0.994175, 0.994183,
0.994192, 0.994201, 0.99421, 0.994218, 0.994227, 0.994236, 0.994244,
0.994253, 0.994262, 0.99427, 0.994279, 0.994287, 0.994296, 0.994304,
0.994313, 0.994321, 0.994329, 0.994338, 0.994346, 0.994354, 0.994363,
0.994371, 0.994379, 0.994387, 0.994395, 0.994404, 0.994412, 0.99442,
0.994428, 0.994436, 0.994444, 0.994452, 0.99446, 0.994468, 0.994476,
0.994484, 0.994492, 0.9945, 0.994508, 0.994516, 0.994523, 0.994531, 0.994539,
0.994547, 0.994554, 0.994562, 0.99457, 0.994577, 0.994585, 0.994593, 0.9946,
0.994608, 0.994615, 0.994623, 0.994631, 0.994638, 0.994645, 0.994653,
0.99466, 0.994668, 0.994675, 0.994683, 0.99469, 0.994697, 0.994705, 0.994712,
0.994719, 0.994726, 0.994734, 0.994741, 0.994748, 0.994755, 0.994762,
0.994769, 0.994777, 0.994784, 0.994791, 0.994798, 0.994805, 0.994812,
0.994819, 0.994826, 0.994833, 0.99484, 0.994847, 0.994854, 0.99486, 0.994867,
0.994874, 0.994881, 0.994888, 0.994895, 0.994901, 0.994908, 0.994915,
0.994922, 0.994928, 0.994935, 0.994942, 0.994948, 0.994955, 0.994962,
0.994968, 0.994975, 0.994981, 0.994988, 0.994994, 0.995001, 0.995007,
0.995014, 0.99502, 0.995027, 0.995033, 0.99504, 0.995046, 0.995052, 0.995059,
0.995065, 0.995071, 0.995078, 0.995084, 0.99509, 0.995097, 0.995103,
0.995109, 0.995115, 0.995121, 0.995128, 0.995134, 0.99514, 0.995146,
0.995152, 0.995158, 0.995164, 0.995171, 0.995177, 0.995183, 0.995189,
0.995195, 0.995201, 0.995207, 0.995213, 0.995219, 0.995225, 0.995231,
0.995236, 0.995242, 0.995248, 0.995254, 0.99526, 0.995266, 0.995272,
0.995277, 0.995283, 0.995289, 0.995295, 0.995301, 0.995306, 0.995312,
0.995318, 0.995323, 0.995329, 0.995335, 0.99534, 0.995346, 0.995352,
0.995357, 0.995363, 0.995369, 0.995374, 0.99538, 0.995385, 0.995391,
0.995396, 0.995402, 0.995407, 0.995413, 0.995418, 0.995424, 0.995429,
0.995435, 0.99544, 0.995445, 0.995451, 0.995456, 0.995462, 0.995467,
0.995472, 0.995478, 0.995483, 0.995488, 0.995493, 0.995499, 0.995504,
0.995509, 0.995515, 0.99552, 0.995525, 0.99553, 0.995535, 0.995541, 0.995546,
0.995551, 0.995556, 0.995561, 0.995566, 0.995571, 0.995577, 0.995582,
0.995587, 0.995592, 0.995597, 0.995602, 0.995607, 0.995612, 0.995617,
0.995622, 0.995627, 0.995632, 0.995637, 0.995642, 0.995647, 0.995652,
0.995657, 0.995661, 0.995666, 0.995671, 0.995676, 0.995681, 0.995686,
0.995691, 0.995695, 0.9957, 0.995705, 0.99571, 0.995715, 0.995719, 0.995724,
0.995729, 0.995734, 0.995738, 0.995743, 0.995748, 0.995753, 0.995757,
0.995762, 0.995767, 0.995771, 0.995776, 0.995781, 0.995785, 0.99579,
0.995794, 0.995799, 0.995804, 0.995808, 0.995813, 0.995817, 0.995822,
0.995826, 0.995831, 0.995835, 0.99584, 0.995844, 0.995849, 0.995853,
0.995858, 0.995862, 0.995867, 0.995871, 0.995876, 0.99588, 0.995885,
0.995889, 0.995893, 0.995898, 0.995902, 0.995906, 0.995911, 0.995915,
0.99592, 0.995924, 0.995928, 0.995932, 0.995937, 0.995941, 0.995945, 0.99595,
0.995954, 0.995958, 0.995962, 0.995967, 0.995971, 0.995975, 0.995979,
0.995984, 0.995988, 0.995992, 0.995996, 0.996, 0.996004, 0.996009, 0.996013,
0.996017, 0.996021, 0.996025, 0.996029, 0.996033, 0.996037, 0.996041,
0.996046, 0.99605, 0.996054, 0.996058, 0.996062, 0.996066, 0.99607, 0.996074,
0.996078, 0.996082, 0.996086, 0.99609, 0.996094, 0.996098, 0.996102,
0.996106, 0.99611, 0.996114, 0.996117, 0.996121, 0.996125, 0.996129,
0.996133, 0.996137, 0.996141, 0.996145, 0.996149, 0.996152, 0.996156,
0.99616, 0.996164]
# Run-time configuration parameters (can be set with command-line options)
$rerun=1
$inner=1
$warmup=1
$outer=4
$quantum=1000
$includeSunSpider=true
$includeSunSpiderCompileTime=true
$includeLongSpider=false
$includeV8=true
$includeV8CompileTime=true
$includeKraken=true
$includeJSBench=true
$includeMicrobenchmarks=true
$includeAsmBench=true
$includeDSPJS=true
$includeBrowsermarkJS=false
$includeBrowsermarkDOM=false
$includeOctane=true
$includeCompressionBench = false
$includeSixSpeed = false
$includeTailBench = true
$includeBigIntBench = false
$measureGC=false
$benchmarkPattern=nil
$verbosity=0
$timeMode=:preciseTime
$forceVMKind=nil
$brief=false
$silent=false
$remoteHosts=[]
$alsoLocal=false
$sshOptions=[]
$vms = []
$environment = {}
$dependencies = []
$needToCopyVMs = false
$dontCopyVMs = false
$allDRT = true
$outputName = nil
$sunSpiderWarmup = true
$configPath = Pathname.new(ENV["HOME"]) + ".run-jsc-benchmarks"
$prepare = true
$run = true
$analyze = []
# Helpful functions and classes
def smallUsage
puts "Use the --help option to get basic usage information."
exit 1
end
def usage
puts "run-jsc-benchmarks [options] <vm1> [<vm2> ...]"
puts
puts "Runs one or more JavaScript runtimes against SunSpider, V8, and/or Kraken"
puts "benchmarks, and reports detailed statistics. What makes run-jsc-benchmarks"
puts "special is that each benchmark/VM configuration is run in a single VM invocation,"
puts "and the invocations are run in random order. This minimizes systematics due to"
puts "one benchmark polluting the running time of another. The fine-grained"
puts "interleaving of VM invocations further minimizes systematics due to changes in"
puts "the performance or behavior of your machine."
puts
puts "Run-jsc-benchmarks is highly configurable. You can compare as many VMs as you"
puts "like. You can change the amount of warm-up iterations, number of iterations"
puts "executed per VM invocation, and the number of VM invocations per benchmark."
puts
puts "The <vm> should be either a path to a JavaScript runtime executable (such as"
puts "jsc), or a string of the form <name>:<path>, where the <path> is the path to"
puts "the executable and <name> is the name that you would like to give the"
puts "configuration for the purposeof reporting. If no name is given, a generic name"
puts "of the form Conf#<n> will be ascribed to the configuration automatically."
puts
puts "It's also possible to specify per-VM environment variables. For example, you"
puts "might specify a VM like Foo:JSC_useJIT=false:/path/to/jsc, in which case the"
puts "harness will set the JSC_useJIT environment variable to false just before running"
puts "the given VM. Note that the harness will not unset the environment variable, so"
puts "you must ensure that your other VMs will use the opposite setting"
puts "(JSC_useJIT=true in this case)."
puts
puts "Options:"
puts "--rerun <n> Set the number of iterations of the benchmark that"
puts " contribute to the measured run time. Default is #{$rerun}."
puts "--inner <n> Set the number of inner (per-runtime-invocation)"
puts " iterations. Default is #{$inner}."
puts "--outer <n> Set the number of runtime invocations for each benchmark."
puts " Default is #{$outer}."
puts "--warmup <n> Set the number of warm-up runs per invocation. Default"
puts " is #{$warmup}. This has a different effect on different kinds"
puts " benchmarks. Some benchmarks have no notion of warm-up."
puts "--no-ss-warmup Disable SunSpider-based warm-up runs."
puts "--quantum <n> Set the duration in milliseconds for which an iteration of"
puts " a throughput benchmark should be run. Default is #{$quantum}."
puts "--timing-mode Set the way that time is measured. Possible values"
puts " are 'preciseTime' and 'date'. Default is 'preciseTime'."
puts "--force-vm-kind Turn off auto-detection of VM kind, and assume that it is"
puts " the one specified. Valid arguments are 'jsc', "
puts " 'DumpRenderTree', or 'WebKitTestRunner'."
puts "--force-vm-copy Force VM builds to be copied to the working directory."
puts " This may reduce pathologies resulting from path names."
puts "--dont-copy-vms Don't copy VMs even when doing a remote benchmarking run;"
puts " instead assume that they are already there."
puts "--sunspider Only run SunSpider."
puts "--sunspider-compile-time"
puts " Only run the SunSpider compile time benchmark."
puts "--v8-spider Only run SunSpider-style V8."
puts "--v8-spider-compile-time"
puts " Only run the SunSpider-style V8 compile time benchmark."
puts "--kraken Only run Kraken."
puts "--js-bench Only run JSBench."
puts "--microbenchmarks Only run microbenchmarks."
puts "--dsp Only run DSP."
puts "--asm-bench Only run AsmBench."
puts "--browsermark-js Only run browsermark-js."
puts "--browsermark-dom Only run browsermark-dom."
puts "--octane Only run Octane."
puts "--tail-bench Only run TailBench"
puts "--compression-bench Only run compression bench"
puts " The default is to run all benchmarks. The above options can"
puts " be combined to run any subset (so --sunspider --dsp will run"
puts " both SunSpider and DSP)."
puts "--six-speed Only run SixSpeed."
puts "--benchmarks Only run benchmarks matching the given regular expression."
puts "--measure-gc Turn off manual calls to gc(), so that GC time is measured."
puts " Works best with large values of --inner. You can also say"
puts " --measure-gc <conf>, which turns this on for one"
puts " configuration only."
puts "--verbose or -v Print more stuff."
puts "--brief Print only the final result for each VM."
puts "--silent Don't print progress. This might slightly reduce some"
puts " performance perturbation."
puts "--remote <sshhosts> Perform performance measurements remotely, on the given"
puts " SSH host(s). Easiest way to use this is to specify the SSH"
puts " user@host string. However, you can also supply a comma-"
puts " separated list of SSH hosts. Alternatively, you can use this"
puts " option multiple times to specify multiple hosts. This"
puts " automatically copies the WebKit release builds of the VMs"
puts " you specified to all of the hosts."
puts "--ssh-options Pass additional options to SSH."
puts "--local Also do a local benchmark run even when doing --remote."
puts "--vms Use a JSON file to specify which VMs to run, as opposed to"
puts " specifying them on the command line."
puts "--prepare-only Only prepare the runscript (a shell script that"
puts " invokes the VMs to run benchmarks) but don't run it."
puts "--analyze Only read the output of the runscript but don't do anything"
puts " else. This requires passing the same arguments that you"
puts " passed when running --prepare-only."
puts "--output-name Base of the filenames to put results into. Will write a file"
puts " called <base>_report.txt and <base>.json. By default this"
puts " name is automatically synthesized from the machine name,"
puts " date, set of benchmarks run, and set of configurations."
puts "--environment JSON file that specifies the environment variables that should"
puts " be used for particular VMs and benchmarks."
puts "--config <path> Specify the path of the configuration file. Defaults to"
puts " ~/.run-jsc-benchmarks"
puts "--dependencies Additional dependent library paths."
puts "--help or -h Display this message."
puts
puts "Example:"
puts "run-jsc-benchmarks TipOfTree:/Volumes/Data/pizlo/OpenSource/WebKitBuild/Release/jsc MyChanges:/Volumes/Data/pizlo/secondary/OpenSource/WebKitBuild/Release/jsc"
exit 1
end
def fail(reason)
if reason.respond_to? :backtrace
puts "FAILED: #{reason.inspect}"
puts "Stack trace:"
puts reason.backtrace.join("\n")
else
puts "FAILED: #{reason.inspect}"
end
smallUsage
end
def quickFail(r1,r2)
$stderr.puts "#{$0}: #{r1}"
puts
fail(r2)
end
def intArg(argName,arg,min,max)
result=arg.to_i
unless result.to_s == arg
quickFail("Expected an integer value for #{argName}, but got #{arg}.",
"Invalid argument for command-line option")
end
if min and result<min
quickFail("Argument for #{argName} cannot be smaller than #{min}.",
"Invalid argument for command-line option")
end
if max and result>max
quickFail("Argument for #{argName} cannot be greater than #{max}.",
"Invalid argument for command-line option")
end
result
end
def computeMean(array)
sum=0.0
array.each {
| value |
sum += value
}
sum/array.length
end
def computeGeometricMean(array)
sum = 0.0
array.each {
| value |
sum += Math.log(value)
}
Math.exp(sum * (1.0/array.length))
end
def computeHarmonicMean(array)
1.0 / computeMean(array.collect{ | value | 1.0 / value })
end
def computeStdDev(array)
case array.length
when 0
0.0/0.0
when 1
0.0
else
begin
mean=computeMean(array)
sum=0.0
array.each {
| value |
sum += (value-mean)**2
}
Math.sqrt(sum/(array.length-1))
rescue
0.0/0.0
end
end
end
class Array
def shuffle!
size.downto(1) { |n| push delete_at(rand(n)) }
self
end
end
def inverseBetaRegularized(n)
IBR_LOOKUP[n-1]
end
def numToStr(num, decimalShift)
("%." + (4 + decimalShift).to_s + "f") % (num.to_f)
end
class CantSay
def initialize
end
def shortForm
" "
end
def longForm
""
end
def to_s
""
end
end
class NoChange
attr_reader :amountFaster
def initialize(amountFaster)
@amountFaster = amountFaster
end
def shortForm
" "
end
def longForm
" might be #{numToStr(@amountFaster, 0)}x faster"
end
def to_s
if @amountFaster < 1.01
""
else
longForm
end
end
end
class Faster
attr_reader :amountFaster
def initialize(amountFaster)
@amountFaster = amountFaster
end
def shortForm
"^"
end
def longForm
"^ definitely #{numToStr(@amountFaster, 0)}x faster"
end
def to_s
longForm
end
end
class Slower
attr_reader :amountSlower
def initialize(amountSlower)
@amountSlower = amountSlower
end
def shortForm
"!"
end
def longForm
"! definitely #{numToStr(@amountSlower, 0)}x slower"
end
def to_s
longForm
end
end
class MayBeSlower
attr_reader :amountSlower
def initialize(amountSlower)
@amountSlower = amountSlower
end
def shortForm
"?"
end
def longForm
"? might be #{numToStr(@amountSlower, 0)}x slower"
end
def to_s
if @amountSlower < 1.01
"?"
else
longForm
end
end
end
def jsonSanitize(value)
if value.is_a? Fixnum
value
elsif value.is_a? Float
if value.nan? or value.infinite?
value.to_s
else
value
end
elsif value.is_a? Array
value.map{|v| jsonSanitize(v)}
elsif value.nil?
value
else
raise "Unrecognized value #{value.inspect}"
end
end
class Stats
def initialize
@array = []
end
def add(value)
if not value or not @array
@array = nil
elsif value.is_a? Float
if value.nan? or value.infinite?
@array = nil
else
@array << value
end
elsif value.is_a? Stats
add(value.array)
elsif value.respond_to? :each
value.each {
| v |
add(v)
}
else
@array << value.to_f
end
end
def status
if @array
:ok
else
:error
end
end
def error?
# TODO: We're probably still not handling this case correctly.
not @array or @array.empty?
end
def ok?
not not @array
end
def array
@array
end
def sum
result=0
@array.each {
| value |
result += value
}
result
end
def min
@array.min
end
def max
@array.max
end
def size
@array.length
end
def mean
computeMean(array)
end
def arithmeticMean
mean
end
def stdDev
computeStdDev(array)
end
def stdErr
stdDev/Math.sqrt(size)
end
# Computes a 95% Student's t distribution confidence interval
def confInt
if size < 2
0.0/0.0
else
raise if size > 1000
Math.sqrt(size-1.0)*stdErr*Math.sqrt(-1.0+1.0/inverseBetaRegularized(size-1))
end
end
def lower
mean-confInt
end
def upper
mean+confInt
end
def geometricMean
computeGeometricMean(array)
end
def harmonicMean
computeHarmonicMean(array)
end
def compareTo(other)
return CantSay.new unless ok? and other.ok?
if upper < other.lower
Faster.new(other.mean/mean)
elsif lower > other.upper
Slower.new(mean/other.mean)
elsif mean > other.mean
MayBeSlower.new(mean/other.mean)
else
NoChange.new(other.mean/mean)
end
end
def to_s
"size = #{size}, mean = #{mean}, stdDev = #{stdDev}, stdErr = #{stdErr}, confInt = #{confInt}"
end
def jsonMap
if ok?
{"data"=>jsonSanitize(@array), "mean"=>jsonSanitize(mean), "confInt"=>jsonSanitize(confInt)}
else
"ERROR"
end
end
end
def doublePuts(out1,out2,msg)
out1.puts "#{out2.path}: #{msg}" if $verbosity>=3
out2.puts msg
end
class Benchfile < File
@@counter = 0
attr_reader :filename, :basename
def initialize(name)
@basename, @filename = Benchfile.uniqueFilename(name)
super(@filename, "w")
end
def self.uniqueFilename(name)
if name.is_a? Array
basename = name[0] + @@counter.to_s + name[1]
else
basename = name + @@counter.to_s
end
filename = BENCH_DATA_PATH + basename
@@counter += 1
raise "Benchfile #{filename} already exists" if FileTest.exist?(filename)
[basename, filename]
end
def self.create(name)
file = Benchfile.new(name)
yield file
file.close
file.basename
end
end
$dataFiles={}
def ensureFile(key, filename)
unless $dataFiles[key]
$dataFiles[key] = Benchfile.create(key) {
| outp |
doublePuts($stderr,outp,IO::read(filename))
}
end
$dataFiles[key]
end
# Helper for files that cannot be renamed.
$absoluteFiles={}
def ensureAbsoluteFile(filename, basedir=nil)
return if $absoluteFiles[filename]
filename = Pathname.new(filename)
directory = Pathname.new('')
if basedir and filename.dirname != basedir
remainingPath = filename.dirname
while remainingPath != basedir
directory = remainingPath.basename + directory
remainingPath = remainingPath.dirname
end
if not $absoluteFiles[directory]
cmd = "mkdir -p #{Shellwords.shellescape((BENCH_DATA_PATH + directory).to_s)}"
$stderr.puts ">> #{cmd}" if $verbosity >= 2
raise unless system(cmd)
intermediateDirectory = Pathname.new(directory)
while intermediateDirectory.basename.to_s != "."
$absoluteFiles[intermediateDirectory] = true
intermediateDirectory = intermediateDirectory.dirname
end
end
end
cmd = "cp #{Shellwords.shellescape(filename.to_s)} #{Shellwords.shellescape((BENCH_DATA_PATH + directory + filename.basename).to_s)}"
$stderr.puts ">> #{cmd}" if $verbosity >= 2
raise unless system(cmd)
$absoluteFiles[filename] = true
end
# Helper for large benchmarks with lots of files and directories.
def ensureBenchmarkFiles(rootdir)
toProcess = [rootdir]
while not toProcess.empty?
currdir = toProcess.pop
Dir.foreach(currdir.to_s) {
| filename |
path = currdir + filename
next if filename.match(/^\./)
toProcess.push(path) if File.directory?(path.to_s)
ensureAbsoluteFile(path, rootdir) if File.file?(path.to_s)
}
end
end
class JSCommand
attr_reader :js, :html
def initialize(js, html)
@js = js
@html = html
end
end
def loadCommandForFile(key, filename)
file = ensureFile(key, filename)
JSCommand.new("load(#{file.inspect});", "<script src=#{file.inspect}></script>")
end
def simpleCommand(command)
JSCommand.new(command, "<script type=\"text/javascript\">#{command}</script>")
end
# Benchmark that consists of a single file and must be loaded in its own global object each
# time (i.e. run()).
class SingleFileTimedBenchmarkParameters
attr_reader :benchPath
def initialize(benchPath)
@benchPath = benchPath
end
def kind
:singleFileTimedBenchmark
end
end
# Benchmark that consists of a single file and must be loaded in its own global object each
# time (i.e. run()), but returns its result in a custom way.
class SingleFileCustomTimedBenchmarkParameters
attr_reader :benchPath
def initialize(benchPath)
@benchPath = benchPath
end
def kind
:singleFileCustomTimedBenchmark
end
end
# Benchmark that consists of a single file and must be loaded in its own global object each
# time (i.e. run()), and returns the time spent compiling
class SingleFileCompileTimeBenchmarkParameters
attr_reader :benchPath
def initialize(benchPath)
@benchPath = benchPath
end
def kind
:singleFileCompileTimeBenchmark
end
end
# Benchmark that consists of one or more data files that should be loaded globally, followed
# by a command to run the benchmark.
class MultiFileTimedBenchmarkParameters
attr_reader :dataPaths, :command
def initialize(dataPaths, command)
@dataPaths = dataPaths
@command = command
end
def kind
:multiFileTimedBenchmark
end
end
# Benchmark that consists of one or more data files that should be loaded globally, followed
# by a command to run a short tick of the benchmark. The benchmark should be run for as many
# ticks as possible, for one quantum (quantum is 1000ms by default).
class ThroughputBenchmarkParameters
attr_reader :dataPaths, :setUpCommand, :command, :tearDownCommand, :doWarmup, :deterministic, :minimumIterations
def initialize(dataPaths, setUpCommand, command, tearDownCommand, doWarmup, deterministic, minimumIterations)
@dataPaths = dataPaths
@setUpCommand = setUpCommand
@command = command
@tearDownCommand = tearDownCommand
@doWarmup = doWarmup
@deterministic = deterministic
@minimumIterations = minimumIterations
end
def kind
:throughputBenchmark
end
end
# Benchmark that can only run in DumpRenderTree or WebKitTestRunner, that has its own callback for reporting
# results. Other than that it's just like SingleFileTimedBenchmark.
class SingleFileTimedCallbackBenchmarkParameters
attr_reader :callbackDecl, :benchPath
def initialize(callbackDecl, benchPath)
@callbackDecl = callbackDecl
@benchPath = benchPath
end
def kind
:singleFileTimedCallbackBenchmark
end
end
def emitTimerFunctionCode(file)
case $timeMode
when :preciseTime
doublePuts($stderr,file,"function __bencher_curTimeMS() {")
doublePuts($stderr,file," return preciseTime()*1000")
doublePuts($stderr,file,"}")
when :date
doublePuts($stderr,file,"function __bencher_curTimeMS() {")
doublePuts($stderr,file," return Date.now()")
doublePuts($stderr,file,"}")
else
raise
end
end
def emitBenchRunCodeFile(name, plan, benchParams)
case plan.vm.vmType
when :jsc
Benchfile.create("bencher") {
| file |
emitTimerFunctionCode(file)
if benchParams.kind == :multiFileTimedBenchmark
benchParams.dataPaths.each {
| path |
doublePuts($stderr,file,"load(#{path.inspect});")
}
doublePuts($stderr,file,"gc();")
doublePuts($stderr,file,"for (var __bencher_index = 0; __bencher_index < #{$warmup+$inner}; ++__bencher_index) {")
doublePuts($stderr,file," var __before = __bencher_curTimeMS();")
$rerun.times {
doublePuts($stderr,file," #{benchParams.command.js}")
}
doublePuts($stderr,file," var __after = __bencher_curTimeMS();")
doublePuts($stderr,file," if (__bencher_index >= #{$warmup}) print(\"#{name}: #{plan.vm}: #{plan.iteration}: \" + (__bencher_index - #{$warmup}) + \": Time: \"+(__after-__before));");
doublePuts($stderr,file," gc();") unless plan.vm.shouldMeasureGC
doublePuts($stderr,file,"}")
elsif benchParams.kind == :throughputBenchmark
emitTimerFunctionCode(file)
benchParams.dataPaths.each {
| path |
doublePuts($stderr,file,"load(#{path.inspect});")
}
doublePuts($stderr,file,"#{benchParams.setUpCommand.js}")
if benchParams.doWarmup
warmup = $warmup
else
warmup = 0
end
doublePuts($stderr,file,"for (var __bencher_index = 0; __bencher_index < #{warmup + $inner}; __bencher_index++) {")
doublePuts($stderr,file," var __before = __bencher_curTimeMS();")
doublePuts($stderr,file," var __after = __before;")
doublePuts($stderr,file," var __runs = 0;")
doublePuts($stderr,file," var __expected = #{$quantum};")
doublePuts($stderr,file," while (true) {")
$rerun.times {
doublePuts($stderr,file," #{benchParams.command.js}")
}
doublePuts($stderr,file," __runs++;")
doublePuts($stderr,file," __after = __bencher_curTimeMS();")
if benchParams.deterministic
doublePuts($stderr,file," if (true) {")
else
doublePuts($stderr,file," if (__after - __before >= __expected) {")
end
doublePuts($stderr,file," if (__runs >= #{benchParams.minimumIterations} || __bencher_index < #{warmup})")
doublePuts($stderr,file," break;")
doublePuts($stderr,file," __expected += #{$quantum}")
doublePuts($stderr,file," }")
doublePuts($stderr,file," }")
doublePuts($stderr,file," if (__bencher_index >= #{warmup}) print(\"#{name}: #{plan.vm}: #{plan.iteration}: \" + (__bencher_index - #{warmup}) + \": Time: \"+((__after-__before)/__runs));")
doublePuts($stderr,file,"}")
doublePuts($stderr,file,"#{benchParams.tearDownCommand.js}")
elsif benchParams.kind == :singleFileCustomTimedBenchmark
doublePuts($stderr,file,"function __bencher_run(__bencher_what) {")
doublePuts($stderr,file," var __bencher_result = {value: null};")
$rerun.times {
doublePuts($stderr,file," run(__bencher_what, __bencher_result);")
}
doublePuts($stderr,file," if (__bencher_result.value == null)")
doublePuts($stderr,file," throw new Error(\"Null result\");")
doublePuts($stderr,file," return __bencher_result.value;")
doublePuts($stderr,file,"}")
$warmup.times {
doublePuts($stderr,file,"__bencher_run(#{benchParams.benchPath.inspect})")
doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
}
$inner.times {
| innerIndex |
doublePuts($stderr,file,"print(\"#{name}: #{plan.vm}: #{plan.iteration}: #{innerIndex}: Time: \"+__bencher_run(#{benchParams.benchPath.inspect}));")
doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
}
elsif benchParams.kind == :singleFileCompileTimeBenchmark
doublePuts($stderr,file,"function __bencher_run(__bencher_what) {")
doublePuts($stderr,file," var __compileTimeBefore = totalCompileTime();")
$rerun.times {
doublePuts($stderr,file," run(__bencher_what);")
}
doublePuts($stderr,file," return totalCompileTime() - __compileTimeBefore;")
doublePuts($stderr,file,"}")
$warmup.times {
doublePuts($stderr,file,"__bencher_run(#{benchParams.benchPath.inspect})")
doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
}
$inner.times {
| innerIndex |
doublePuts($stderr,file,"print(\"#{name}: #{plan.vm}: #{plan.iteration}: #{innerIndex}: Time: \"+__bencher_run(#{benchParams.benchPath.inspect}));")
doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
}
else
raise unless benchParams.kind == :singleFileTimedBenchmark
doublePuts($stderr,file,"function __bencher_run(__bencher_what) {")
doublePuts($stderr,file," var __bencher_before = __bencher_curTimeMS();")
$rerun.times {
doublePuts($stderr,file," run(__bencher_what);")
}
doublePuts($stderr,file," var __bencher_after = __bencher_curTimeMS();")
doublePuts($stderr,file," return __bencher_after - __bencher_before;")
doublePuts($stderr,file,"}")
$warmup.times {
doublePuts($stderr,file,"__bencher_run(#{benchParams.benchPath.inspect})")
doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
}
$inner.times {
| innerIndex |
doublePuts($stderr,file,"print(\"#{name}: #{plan.vm}: #{plan.iteration}: #{innerIndex}: Time: \"+__bencher_run(#{benchParams.benchPath.inspect}));")
doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
}
end
}
when :dumpRenderTree, :webkitTestRunner
case $timeMode
when :preciseTime
curTime = "(testRunner.preciseTime()*1000)"
when :date
curTime = "(Date.now())"
else
raise
end
mainCode = Benchfile.create("bencher") {
| file |
doublePuts($stderr,file,"__bencher_count = 0;")
doublePuts($stderr,file,"function __bencher_doNext(result) {")
doublePuts($stderr,file," if (__bencher_count >= #{$warmup})")
doublePuts($stderr,file," debug(\"#{name}: #{plan.vm}: #{plan.iteration}: \" + (__bencher_count - #{$warmup}) + \": Time: \" + result);")
doublePuts($stderr,file," __bencher_count++;")
doublePuts($stderr,file," if (__bencher_count < #{$inner+$warmup})")
doublePuts($stderr,file," __bencher_runImpl(__bencher_doNext);")
doublePuts($stderr,file," else")
doublePuts($stderr,file," quit();")
doublePuts($stderr,file,"}")
doublePuts($stderr,file,"__bencher_runImpl(__bencher_doNext);")
}
cssCode = Benchfile.create("bencher-css") {
| file |
doublePuts($stderr,file,".pass {\n font-weight: bold;\n color: green;\n}\n.fail {\n font-weight: bold;\n color: red;\n}\n\#console {\n white-space: pre-wrap;\n font-family: monospace;\n}")
}
preCode = Benchfile.create("bencher-pre") {
| file |
doublePuts($stderr,file,"if (window.testRunner) {")
doublePuts($stderr,file," testRunner.dumpAsText(window.enablePixelTesting);")
doublePuts($stderr,file," testRunner.waitUntilDone();")
doublePuts($stderr,file,"}")
doublePuts($stderr,file,"")
doublePuts($stderr,file,"function debug(msg)")
doublePuts($stderr,file,"{")
doublePuts($stderr,file," var span = document.createElement(\"span\");")
doublePuts($stderr,file," document.getElementById(\"console\").appendChild(span); // insert it first so XHTML knows the namespace")
doublePuts($stderr,file," span.innerHTML = msg + '<br />';")
doublePuts($stderr,file,"}")
doublePuts($stderr,file,"")
doublePuts($stderr,file,"function quit() {")
doublePuts($stderr,file," testRunner.notifyDone();")
doublePuts($stderr,file,"}")
doublePuts($stderr,file,"")
doublePuts($stderr,file,"__bencher_continuation=null;")
doublePuts($stderr,file,"")
doublePuts($stderr,file,"function reportResult(result) {")
doublePuts($stderr,file," __bencher_continuation(result);")
doublePuts($stderr,file,"}")
doublePuts($stderr,file,"")
doublePuts($stderr,file,"function currentTimeInMS(msg)")
doublePuts($stderr,file,"{")
doublePuts($stderr,file," return #{curTime};")
doublePuts($stderr,file,"}")
if benchParams.kind == :singleFileTimedCallbackBenchmark
doublePuts($stderr,file,"")
doublePuts($stderr,file,benchParams.callbackDecl)
end
doublePuts($stderr,file,"")
doublePuts($stderr,file,"function __bencher_runImpl(continuation) {")
doublePuts($stderr,file," function doit() {")
doublePuts($stderr,file," document.getElementById(\"frameparent\").innerHTML = \"\";")
doublePuts($stderr,file," document.getElementById(\"frameparent\").innerHTML = \"<iframe id='testframe'>\";")
doublePuts($stderr,file," var testFrame = document.getElementById(\"testframe\");")
doublePuts($stderr,file," testFrame.contentDocument.open();")
doublePuts($stderr,file," testFrame.contentDocument.write(\"<!DOCTYPE html>\\n<head></head><body><div id=\\\"console\\\"></div>\");")
if benchParams.kind == :throughputBenchmark or benchParams.kind == :multiFileTimedBenchmark
benchParams.dataPaths.each {
| path |
doublePuts($stderr,file," testFrame.contentDocument.write(\"<script src=#{path.inspect.inspect[1..-2]}></script>\");")
}
end
if benchParams.kind == :throughputBenchmark
if benchParams.doWarmup
warmup = $warmup
else
warmup = 0
end
doublePuts($stderr,file," testFrame.contentDocument.write(\"<script type=\\\"text/javascript\\\">\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"#{benchParams.setUpCommand.js}\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"var __bencher_before = #{curTime};\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"var __bencher_after = __bencher_before;\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"var __bencher_expected = #{$quantum};\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"var __bencher_runs = 0;\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"while (true) {\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\" #{benchParams.command.js}\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\" __bencher_runs++;\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\" __bencher_after = #{curTime};\");")
if benchParams.deterministic
doublePuts($stderr,file," testFrame.contentDocument.write(\" if (true) {\");")
else
doublePuts($stderr,file," testFrame.contentDocument.write(\" if (__bencher_after - __bencher_before >= __bencher_expected) {\");")
end
doublePuts($stderr,file," testFrame.contentDocument.write(\" if (__bencher_runs >= #{benchParams.minimumIterations} || window.parent.__bencher_count < #{warmup})\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\" break;\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\" __bencher_expected += #{$quantum}\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\" }\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"}\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"#{benchParams.tearDownCommand.js}\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"window.parent.reportResult((__bencher_after - __bencher_before) / __bencher_runs);\");")
doublePuts($stderr,file," testFrame.contentDocument.write(\"</script>\");")
else
doublePuts($stderr,file," testFrame.contentDocument.write(\"<script type=\\\"text/javascript\\\">var __bencher_before = #{curTime};</script>\");")
if benchParams.kind == :multiFileTimedBenchmark
doublePuts($stderr,file," testFrame.contentDocument.write(#{benchParams.command.html.inspect});")
else
doublePuts($stderr,file," testFrame.contentDocument.write(\"<script src=#{benchParams.benchPath.inspect.inspect[1..-2]}></script>\");")
end
unless benchParams.kind == :singleFileTimedCallbackBenchmark
doublePuts($stderr,file," testFrame.contentDocument.write(\"<script type=\\\"text/javascript\\\">window.parent.reportResult(#{curTime} - __bencher_before);</script>\");")
end
end
doublePuts($stderr,file," testFrame.contentDocument.write(\"</body></html>\");")
doublePuts($stderr,file," testFrame.contentDocument.close();")
doublePuts($stderr,file," }")
doublePuts($stderr,file," __bencher_continuation = continuation;")
doublePuts($stderr,file," window.setTimeout(doit, 10);")
doublePuts($stderr,file,"}")
}
Benchfile.create(["bencher-htmldoc",".html"]) {
| file |
doublePuts($stderr,file,"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML//EN\">\n<html><head><link rel=\"stylesheet\" href=\"#{cssCode}\"><script src=\"#{preCode}\"></script></head><body><div id=\"console\"></div><div id=\"frameparent\"></div><script src=\"#{mainCode}\"></script></body></html>")
}
else
raise
end
end
def emitBenchRunCode(name, plan, benchParams)
plan.vm.emitRunCode(emitBenchRunCodeFile(name, plan, benchParams), plan)
end
class FileCreator
def initialize(filename)
@filename = filename
@state = :empty
end
def puts(text)
$script.print "echo #{Shellwords.shellescape(text)}"
if @state == :empty
$script.print " > "
@state = :nonEmpty
else
$script.print " >> "
end
$script.puts "#{Shellwords.shellescape(@filename)}"
end
def close
if @state == :empty
$script.puts "rm -f #{Shellwords.shellescape(text)}"
$script.puts "touch #{Shellwords.shellescape(text)}"
end
end
def self.open(filename)
outp = FileCreator.new(filename)
yield outp
outp.close
end
end
def emitSelfContainedBenchRunCode(name, plan, targetFile, configFile, benchmark)
FileCreator.open(configFile) {
| outp |
outp.puts "__bencher_message = \"#{name}: #{plan.vm}: #{plan.iteration}: \";"
outp.puts "__bencher_warmup = #{$warmup};"
outp.puts "__bencher_inner = #{$inner};"
outp.puts "__bencher_benchmark = #{benchmark.to_json};"
case $timeMode
when :preciseTime
outp.puts "__bencher_curTime = (function(){ return testRunner.preciseTime() * 1000; });"
when :date
outp.puts "__bencher_curTime = (function(){ return Date.now(); });"
else
raise
end
}
plan.vm.emitRunCode(targetFile, plan)
end
def planForDescription(string, plans, benchFullname, vmName, iteration)
raise "Unexpected benchmark full name: #{benchFullname.inspect}, string: #{string.inspect}" unless benchFullname =~ /\//
suiteName = $~.pre_match
return nil if suiteName == "WARMUP"
benchName = $~.post_match
result = plans.select{|v| v.suite.name == suiteName and v.benchmark.name == benchName and v.vm.name == vmName and v.iteration == iteration}
raise "Unexpected result dimensions: #{result.inspect}, string: #{string.inspect}" unless result.size == 1
result[0]
end
class ParsedResult
attr_reader :plan, :innerIndex, :time, :result
def initialize(plan, innerIndex, time)
@plan = plan
@innerIndex = innerIndex
if time == :crashed
@result = :error
else
@time = time
@result = :success
end
raise unless @plan.is_a? BenchPlan
raise unless @innerIndex.is_a? Integer
raise unless @time.is_a? Numeric or @result == :error
end
def benchmark
plan.benchmark
end
def suite
plan.suite
end
def vm
plan.vm
end
def outerIndex
plan.iteration
end
def self.create(plan, innerIndex, time)
if plan
ParsedResult.new(plan, innerIndex, time)
else
nil
end
end
def self.parse(plans, string)
if string =~ /([a-zA-Z0-9\/_.-]+): ([a-zA-Z0-9_#. ]+): ([0-9]+): ([0-9]+): Time: /
benchFullname = $1
vmName = $2
outerIndex = $3.to_i
innerIndex = $4.to_i
time = $~.post_match.to_f
ParsedResult.create(planForDescription(string, plans, benchFullname, vmName, outerIndex), innerIndex, time)
elsif string =~ /([a-zA-Z0-9\/_.-]+): ([a-zA-Z0-9_#. ]+): ([0-9]+): ([0-9]+): CRASHED/
benchFullname = $1
vmName = $2
outerIndex = $3.to_i
innerIndex = $4.to_i
time = $~.post_match.to_f
ParsedResult.create(planForDescription(string, plans, benchFullname, vmName, outerIndex), innerIndex, :crashed)
else
nil
end
end
end
class VM
@@extraEnvSet = {}
def initialize(origPath, name, nameKind, svnRevision)
@origPath = origPath.to_s
@path = origPath.to_s
@name = name
@nameKind = nameKind
@extraEnv = {}
if $forceVMKind
@vmType = $forceVMKind
else
if @origPath =~ /DumpRenderTree$/
@vmType = :dumpRenderTree
elsif @origPath =~ /WebKitTestRunner$/
@vmType = :webkitTestRunner
else
@vmType = :jsc
end
end
@svnRevision = svnRevision
# Try to detect information about the VM.
if path =~ /\/WebKitBuild\/(Release|Debug)+\/([a-zA-Z]+)$/
@checkoutPath = $~.pre_match
# FIXME: Use some variant of this:
# <bdash> def retrieve_revision
# <bdash> `perl -I#{@path}/Tools/Scripts -MVCSUtils -e 'print svnRevisionForDirectory("#{@path}");'`.to_i
# <bdash> end
unless @svnRevision
begin
Dir.chdir(@checkoutPath) {
$stderr.puts ">> cd #{@checkoutPath} && svn info" if $verbosity>=2
IO.popen("svn info", "r") {
| inp |
inp.each_line {
| line |
if line =~ /Revision: ([0-9]+)/
@svnRevision = $1
end
}
}
}
unless @svnRevision
$stderr.puts "Warning: running svn info for #{name} silently failed."
end
rescue => e
# Failed to detect svn revision.
$stderr.puts "Warning: could not get svn revision information for #{name}: #{e}"
end
end
else
$stderr.puts "Warning: could not identify checkout location for #{name}"
end
if @path =~ /\/Release\/([a-zA-Z]+)$/
@libPath, @relativeBinPath = [$~.pre_match+"/Release"], "./#{$1}"
elsif @path =~ /\/Debug\/([a-zA-Z]+)$/
@libPath, @relativeBinPath = [$~.pre_match+"/Debug"], "./#{$1}"
elsif @path =~ /\/Release\/bin(64|32|)\/([\.a-zA-Z]+)$/
@libPath, @relativeBinPath = [$~.pre_match+"/Release/lib#{$1}"], "./#{$2}"
elsif @path =~ /\/Debug\/bin(64|32|)\/([\.a-zA-Z]+)$/
@libPath, @relativeBinPath = [$~.pre_match+"/Debug/lib#{$1}"], "./#{$2}"
elsif @path =~ /\/Contents\/Resources\/([a-zA-Z]+)$/
@libPath = [$~.pre_match + "/Contents/Resources", $~.pre_match + "/Contents/Frameworks"]
elsif @path =~ /\/JavaScriptCore.framework\/Resources\/([a-zA-Z]+)$/
@libPath, @relativeBinPath = [$~.pre_match], $&[1..-1]
elsif @path =~ /(DumpRenderTree|webkitTestRunner|jsc)$/
@libPath, @relativeBinPath = [$~.pre_match+"/"], "./#{$1}"
end
@libPath += $dependencies
end
def canCopyIntoBenchPath
if @libPath and @relativeBinPath
true
else
false
end
end
def addExtraEnv(key, val)
@extraEnv[key] = val
@@extraEnvSet[key] = true
end
def copyIntoBenchPath
raise unless canCopyIntoBenchPath
basename, filename = Benchfile.uniqueFilename("vm")
raise unless Dir.mkdir(filename)
@libPath.each {
| libPathPart |
cmd = "cp -a #{Shellwords.shellescape(libPathPart)}/* #{Shellwords.shellescape(filename.to_s)}"
$stderr.puts ">> #{cmd}" if $verbosity>=2
raise unless system(cmd)
}
@path = "#{basename}/#{@relativeBinPath}"
@libPath = [basename]
end
def to_s
@name
end
def name
@name
end
def shouldMeasureGC
$measureGC == true or ($measureGC == name)
end
def origPath
@origPath
end
def path
@path
end
def nameKind
@nameKind
end
def vmType
@vmType
end
def checkoutPath
@checkoutPath
end
def svnRevision
@svnRevision
end
def extraEnv
@extraEnv
end
def printFunction
case @vmType
when :jsc
"print"
when :dumpRenderTree, :webkitTestRunner
"debug"
else
raise @vmType
end
end
def emitRunCode(fileToRun, plan)
myLibPath = @libPath
myLibPath = [] unless myLibPath
@@extraEnvSet.keys.each {
| key |
$script.puts "unset #{Shellwords.shellescape(key)}"
}
$script.puts "export JSC_useDollarVM=true" if @vmType == :jsc
$script.puts "export DYLD_LIBRARY_PATH=#{Shellwords.shellescape(myLibPath.join(':').to_s)}"
$script.puts "export DYLD_FRAMEWORK_PATH=#{Shellwords.shellescape(myLibPath.join(':').to_s)}"
$script.puts "export LD_LIBRARY_PATH=#{Shellwords.shellescape(myLibPath.join(':').to_s)}"
unless myLibPath.empty?
primaryLibPath = myLibPath.first
$script.puts "export TEST_RUNNER_TEST_PLUGIN_PATH=#{Shellwords.shellescape(Pathname.new(primaryLibPath).join('plugins').to_s)}"
$script.puts "export TEST_RUNNER_INJECTED_BUNDLE_FILENAME=#{Shellwords.shellescape(Pathname.new(primaryLibPath).join('libTestRunnerInjectedBundle.so').to_s)}"
end
@extraEnv.each_pair {
| key, val |
$script.puts "export #{Shellwords.shellescape(key)}=#{Shellwords.shellescape(val)}"
}
plan.environment.each_pair {
| key, val |
$script.puts "export #{Shellwords.shellescape(key)}=#{Shellwords.shellescape(val)}"
}
$script.puts "#{path} #{fileToRun} 2>&1 || {"
$script.puts " echo " + Shellwords.shellescape("#{name} failed to run!") + " 1>&2"
$inner.times {
| iteration |
$script.puts " echo " + Shellwords.shellescape("#{plan.prefix}: #{iteration}: CRASHED")
}
$script.puts "}"
plan.environment.keys.each {
| key |
$script.puts "unset #{Shellwords.shellescape(key)}"
}
end
end
class StatsAccumulator
def initialize
@stats = []
($outer*$inner).times {
@stats << Stats.new
}
end
def statsForIteration(outerIteration, innerIteration)
@stats[outerIteration*$inner + innerIteration]
end
def stats
result = Stats.new
@stats.each {
| stat |
if stat.ok?
result.add(yield stat)
else
result.add(nil)
end
}
result
end
def geometricMeanStats
stats {
| stat |
stat.geometricMean
}
end
def arithmeticMeanStats
stats {
| stat |
stat.arithmeticMean
}
end
end
module Benchmark
attr_accessor :benchmarkSuite
attr_reader :name
def fullname
benchmarkSuite.name + "/" + name
end
def to_s
fullname
end
def weight
1
end
def weightString
raise unless weight.is_a? Fixnum
raise unless weight >= 1
if weight == 1
""
else
"x#{weight} "
end
end
def environment
{}
end
end
class SunSpiderBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileTimedBenchmarkParameters.new(ensureFile("SunSpider-#{@name}", "#{SUNSPIDER_PATH}/#{@name}.js")))
end
end
class LongSpiderBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileTimedBenchmarkParameters.new(ensureFile("LongSpider-#{@name}", "#{LONGSPIDER_PATH}/#{@name}.js")))
end
end
class V8Benchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileTimedBenchmarkParameters.new(ensureFile("V8-#{@name}", "#{V8_PATH}/v8-#{@name}.js")))
end
end
class V8RealBenchmark
include Benchmark
attr_reader :v8SuiteName
def initialize(v8SuiteName, name, weight, minimumIterations)
@v8SuiteName = v8SuiteName
@name = name
@weight = weight
@minimumIterations = minimumIterations
end
def weight
@weight
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, ThroughputBenchmarkParameters.new(["base", @v8SuiteName, "jsc-#{@name}"].collect{|v| ensureFile("V8Real-#{v}", "#{V8_REAL_PATH}/#{v}.js")}, simpleCommand("jscSetUp();"), simpleCommand("jscRun();"), simpleCommand("jscTearDown();"), true, false, @minimumIterations))
end
end
class OctaneBenchmark
include Benchmark
def initialize(files, name, weight, doWarmup, deterministic, minimumIterations)
@files = files
@name = name
@weight = weight
@doWarmup = doWarmup
@deterministic = deterministic
@minimumIterations = minimumIterations
end
def weight
@weight
end
def emitRunCode(plan)
files = []
files += (["base"] + @files).collect {
| v |
ensureFile("Octane-#{v}", "#{OCTANE_PATH}/#{v}.js")
}
files += ["jsc-#{@name}"].collect {
| v |
ensureFile("Octane-#{v}", "#{OCTANE_WRAPPER_PATH}/#{v}.js")
}
emitBenchRunCode(fullname, plan, ThroughputBenchmarkParameters.new(files, simpleCommand("jscSetUp();"), simpleCommand("jscRun();"), simpleCommand("jscTearDown();"), @doWarmup, @deterministic, @minimumIterations))
end
end
class CustomTimedBenchmark
include Benchmark
def initialize(name, fullPath)
@name = name
@fullPath = fullPath
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileCustomTimedBenchmarkParameters.new(ensureFile("CustomTimed-#{@name}", @fullPath)))
end
end
class CompileTimeBenchmark
include Benchmark
def initialize(name, fullPath)
@name = name
@fullPath = fullPath
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileCompileTimeBenchmarkParameters.new(ensureFile("CompileTime-#{@name}", @fullPath)))
end
def environment
{"JSC_useConcurrentJIT" => "false", "JSC_reportTotalCompileTimes" => "true"}
end
end
class KrakenBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, MultiFileTimedBenchmarkParameters.new([ensureFile("KrakenData-#{@name}", "#{KRAKEN_PATH}/#{@name}-data.js")], loadCommandForFile("Kraken-#{@name}", "#{KRAKEN_PATH}/#{@name}.js")))
end
end
class JSBenchBenchmark
include Benchmark
attr_reader :jsBenchMode
def initialize(name, jsBenchMode)
@name = name
@jsBenchMode = jsBenchMode
end
def emitRunCode(plan)
callbackDecl = "function JSBNG_handleResult(result) {\n"
callbackDecl += " if (result.error) {\n"
callbackDecl += " console.log(\"Did not run benchmark correctly!\");\n"
callbackDecl += " quit();\n"
callbackDecl += " }\n"
callbackDecl += " reportResult(result.time);\n"
callbackDecl += "}\n"
emitBenchRunCode(fullname, plan, SingleFileTimedCallbackBenchmarkParameters.new(callbackDecl, ensureFile("JSBench-#{@name}", "#{JSBENCH_PATH}/#{@name}/#{@jsBenchMode}.js")))
end
end
class TailBenchBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileTimedBenchmarkParameters.new(ensureFile("TailBench-#{@name}", "#{TAILBENCH_PATH}/#{@name}.js")))
end
end
class BigIntBenchBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileTimedBenchmarkParameters.new(ensureFile("BigIntBench-#{@name}", "#{BIGINTBENCH_PATH}/#{@name}.js")))
end
def environment
{"JSC_useBigInt" => "true"}
end
end
class MicrobenchmarksBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileTimedBenchmarkParameters.new(ensureFile("Microbenchmarks-#{@name}", "#{MICROBENCHMARKS_PATH}/#{@name}.js")))
end
end
class AsmBenchBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, SingleFileTimedBenchmarkParameters.new(ensureFile("AsmBench-#{@name}", "#{ASMBENCH_PATH}/#{@name}.js")))
end
end
class SixSpeedBenchmark
include Benchmark
def initialize(name, path, iterations)
@name = name
@path = path
@iterations = iterations
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, MultiFileTimedBenchmarkParameters.new([ensureFile("SixSpeed-#{@name}-wrapper", "#{SIXSPEED_WRAPPER_PATH}/wrapper.js"), ensureFile("SixSpeed-#{@name}", "#{SIXSPEED_PATH}/#{@path}")], simpleCommand("jscRun(#{@iterations});")))
end
end
class CompressionBenchBenchmark
include Benchmark
def initialize(files, name, model)
@files = files
@name = name;
@name = name + "-" + model if !model.empty?
@name = @name.gsub(" ", "-").downcase
@scriptName = name
@weight = 1
@doWarmup = true
@deterministic = true
@minimumIterations = 1
@model = model
end
def weight
@weight
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, ThroughputBenchmarkParameters.new((["base"] + @files + ["jsc-#{@scriptName}"]).collect{|v| ensureFile("Compression-#{v}", "#{COMPRESSIONBENCH_PATH}/#{v}.js")}, simpleCommand("jscSetUp('#{@model}');"), simpleCommand("jscRun();"), simpleCommand("jscTearDown();"), @doWarmup, @deterministic, @minimumIterations))
end
end
class DSPJSFiltrrBenchmark
include Benchmark
def initialize(name, filterKey)
@name = name
@filterKey = filterKey
end
def emitRunCode(plan)
ensureAbsoluteFile(DSPJS_FILTRR_PATH + "filtrr.js")
ensureAbsoluteFile(DSPJS_FILTRR_PATH + "filtrr_back.jpg")
ensureAbsoluteFile(DSPJS_FILTRR_PATH + "filtrr-jquery.min.js")
ensureAbsoluteFile(DSPJS_FILTRR_PATH + "filtrr-bencher.html")
emitSelfContainedBenchRunCode(fullname, plan, "filtrr-bencher.html", "bencher-config.js", @filterKey)
end
end
class DSPJSVP8Benchmark
include Benchmark
def initialize
@name = "route9-vp8"
end
def weight
5
end
def emitRunCode(plan)
ensureBenchmarkFiles(DSPJS_ROUTE9_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "route9-bencher.html", "bencher-config.js", "")
end
end
class DSPStarfieldBenchmark
include Benchmark
def initialize
@name = "starfield"
end
def weight
5
end
def emitRunCode(plan)
ensureBenchmarkFiles(DSPJS_STARFIELD_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "starfield-bencher.html", "bencher-config.js", "")
end
end
class DSPJSJSLinuxBenchmark
include Benchmark
def initialize
@name = "bellard-jslinux"
end
def weight
5
end
def emitRunCode(plan)
ensureBenchmarkFiles(DSPJS_JSLINUX_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "jslinux-bencher.html", "bencher-config.js", "")
end
end
class DSPJSQuake3Benchmark
include Benchmark
def initialize
@name = "zynaps-quake3"
end
def weight
5
end
def emitRunCode(plan)
ensureBenchmarkFiles(DSPJS_QUAKE3_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "quake-bencher.html", "bencher-config.js", "")
end
end
class DSPJSMandelbrotBenchmark
include Benchmark
def initialize
@name = "zynaps-mandelbrot"
end
def weight
5
end
def emitRunCode(plan)
ensureBenchmarkFiles(DSPJS_MANDELBROT_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "mandelbrot-bencher.html", "bencher-config.js", "")
end
end
class DSPJSAmmoJSASMBenchmark
include Benchmark
def initialize
@name = "ammojs-asm-js"
end
def weight
5
end
def emitRunCode(plan)
ensureBenchmarkFiles(DSPJS_AMMOJS_ASMJS_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "ammo-asmjs-bencher.html", "bencher-config.js", "")
end
end
class DSPJSAmmoJSRegularBenchmark
include Benchmark
def initialize
@name = "ammojs-regular-js"
end
def weight
5
end
def emitRunCode(plan)
ensureBenchmarkFiles(DSPJS_AMMOJS_REGULAR_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "ammo-regular-bencher.html", "bencher-config.js", "")
end
end
class BrowsermarkJSBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
emitBenchRunCode(fullname, plan, ThroughputBenchmarkParameters.new([ensureFile(name, "#{BROWSERMARK_JS_PATH}/#{name}/test.js"), ensureFile("browsermark-bencher", "#{BROWSERMARK_JS_PATH}/browsermark-bencher.js")], simpleCommand("jscSetUp();"), simpleCommand("jscRun();"), simpleCommand("jscTearDown();"), true, 32))
end
end
class BrowsermarkDOMBenchmark
include Benchmark
def initialize(name)
@name = name
end
def emitRunCode(plan)
ensureBenchmarkFiles(BROWSERMARK_PATH)
emitSelfContainedBenchRunCode(fullname, plan, "tests/benchmarks/dom/#{name}/index.html", "bencher-config.js", name)
end
end
class BenchmarkSuite
def initialize(name, preferredMean, decimalShift)
@name = name
@preferredMean = preferredMean
@benchmarks = []
@subSuites = []
@decimalShift = decimalShift
end
def name
@name
end
def to_s
@name
end
def decimalShift
@decimalShift
end
def addIgnoringPattern(benchmark)
benchmark.benchmarkSuite = self
@benchmarks << benchmark
end
def add(benchmark)
if not $benchmarkPattern or "#{@name}/#{benchmark.name}" =~ $benchmarkPattern
addIgnoringPattern(benchmark)
end
end
def addSubSuite(subSuite)
@subSuites << subSuite
end
def benchmarks
@benchmarks
end
def benchmarkForName(name)
result = @benchmarks.select{|v| v.name == name}
raise unless result.length == 1
result[0]
end
def hasBenchmark(benchmark)
array = @benchmarks.select{|v| v == benchmark}
raise unless array.length == 1 or array.length == 0
array.length == 1
end
def subSuites
@subSuites
end
def suites
[self] + @subSuites
end
def suitesWithBenchmark(benchmark)
result = [self]
@subSuites.each {
| subSuite |
if subSuite.hasBenchmark(benchmark)
result << subSuite
end
}
result
end
def empty?
@benchmarks.empty?
end
def retain_if
@benchmarks.delete_if {
| benchmark |
not yield benchmark
}
end
def preferredMean
@preferredMean
end
def computeMean(stat)
if stat.ok?
(stat.send @preferredMean) * (10 ** decimalShift)
else
nil
end
end
end
class BenchRunPlan
def initialize(benchmark, vm, iteration)
@benchmark = benchmark
@vm = vm
@iteration = iteration
@environment = {}
if $environment.has_key?(vm.name)
if $environment[vm.name].has_key?(benchmark.benchmarkSuite.name)
if $environment[vm.name][benchmark.benchmarkSuite.name].has_key?(benchmark.name)
@environment = $environment[vm.name][benchmark.benchmarkSuite.name][benchmark.name]
end
end
end
end
def benchmark
@benchmark
end
def suite
@benchmark.benchmarkSuite
end
def vm
@vm
end
def iteration
@iteration
end
def environment
result = @environment.clone
benchmark.environment.each_pair {
| key, value |
result[key] = value
}
result
end
def prefix
"#{@benchmark.fullname}: #{vm.name}: #{iteration}"
end
def emitRunCode
@benchmark.emitRunCode(self)
end
def to_s
benchmark.to_s + "/" + vm.to_s
end
end
class BenchmarkOnVM
def initialize(benchmark, suiteOnVM, subSuitesOnVM)
@benchmark = benchmark
@suiteOnVM = suiteOnVM
@subSuitesOnVM = subSuitesOnVM
@stats = Stats.new
end
def to_s
"#{@benchmark} on #{@suiteOnVM.vm}"
end
def benchmark
@benchmark
end
def vm
@suiteOnVM.vm
end
def vmStats
@suiteOnVM.vmStats
end
def suite
@benchmark.benchmarkSuite
end
def suiteOnVM
@suiteOnVM
end
def subSuitesOnVM
@subSuitesOnVM
end
def stats
@stats
end
def parseResult(result)
raise "VM mismatch; I've got #{vm} and they've got #{result.vm}" unless result.vm == vm
raise unless result.benchmark == @benchmark
@stats.add(result.time)
end
end
class NamedStatsAccumulator < StatsAccumulator
def initialize(name)
super()
@name = name
end
def reportingName
@name
end
end
class SuiteOnVM < StatsAccumulator
def initialize(vm, vmStats, suite)
super()
@vm = vm
@vmStats = vmStats
@suite = suite
raise unless @vm.is_a? VM
raise unless @vmStats.is_a? StatsAccumulator
raise unless @suite.is_a? BenchmarkSuite
end
def to_s
"#{@suite} on #{@vm}"
end
def suite
@suite
end
def vm
@vm
end
def reportingName
@vm.name
end
def vmStats
raise unless @vmStats
@vmStats
end
end
class SubSuiteOnVM < StatsAccumulator
def initialize(vm, suite)
super()
@vm = vm
@suite = suite
raise unless @vm.is_a? VM
raise unless @suite.is_a? BenchmarkSuite
end
def to_s
"#{@suite} on #{@vm}"
end
def suite
@suite
end
def vm
@vm
end
def reportingName
@vm.name
end
end
class BenchPlan
def initialize(benchmarkOnVM, iteration)
@benchmarkOnVM = benchmarkOnVM
@iteration = iteration
end
def to_s
"#{@benchmarkOnVM} \##{@iteration+1}"
end
def benchmarkOnVM
@benchmarkOnVM
end
def benchmark
@benchmarkOnVM.benchmark
end
def suite
@benchmarkOnVM.suite
end
def vm
@benchmarkOnVM.vm
end
def iteration
@iteration
end
def parseResult(result)
raise unless result.plan == self
@benchmarkOnVM.parseResult(result)
benchmark.weight.times {
@benchmarkOnVM.vmStats.statsForIteration(@iteration, result.innerIndex).add(result.time)
@benchmarkOnVM.suiteOnVM.statsForIteration(@iteration, result.innerIndex).add(result.time)
@benchmarkOnVM.subSuitesOnVM.each {
| subSuite |
subSuite.statsForIteration(@iteration, result.innerIndex).add(result.time)
}
}
end
end
def lpad(str,chars)
if str.length>chars
str
else
"%#{chars}s"%(str)
end
end
def rpad(str,chars)
while str.length < chars
str+=" "
end
str
end
def center(str,chars)
while str.length<chars
str+=" "
if str.length<chars
str=" "+str
end
end
str
end
def statsToStr(stats, decimalShift)
if stats.error?
lpad(center("ERROR", 10+10+2), 12+10+2)
elsif $inner*$outer == 1
string = numToStr(stats.mean, decimalShift)
raise unless string =~ /\./
left = $~.pre_match
right = $~.post_match
lpad(left, 13 - decimalShift) + "." + rpad(right, 10 + decimalShift)
else
lpad(numToStr(stats.mean, decimalShift), 12) + "+-" + rpad(numToStr(stats.confInt, decimalShift), 10)
end
end
def plural(num)
if num == 1
""
else
"s"
end
end
def wrap(str, columns)
array = str.split
result = ""
curLine = array.shift
array.each {
| curStr |
if (curLine + " " + curStr).size > columns
result += curLine + "\n"
curLine = curStr
else
curLine += " " + curStr
end
}
result + curLine + "\n"
end
def runAndGetResults
results = nil
Dir.chdir(BENCH_DATA_PATH) {
$stderr.puts ">> sh ./runscript" if $verbosity >= 2
raise "Script did not complete correctly: #{$?}" unless system("sh ./runscript > runlog")
results = IO::read("runlog")
}
raise unless results
results
end
def parseAndDisplayResults(results)
vmStatses = []
$vms.each {
| vm |
vmStatses << NamedStatsAccumulator.new(vm.name)
}
suitesOnVMs = []
suitesOnVMsForSuite = {}
subSuitesOnVMsForSubSuite = {}
$suites.each {
| suite |
suitesOnVMsForSuite[suite] = []
suite.subSuites.each {
| subSuite |
subSuitesOnVMsForSubSuite[subSuite] = []
}
}
suitesOnVMsForVM = {}
$vms.each {
| vm |
suitesOnVMsForVM[vm] = []
}
benchmarksOnVMs = []
benchmarksOnVMsForBenchmark = {}
$benchmarks.each {
| benchmark |
benchmarksOnVMsForBenchmark[benchmark] = []
}
$vms.each_with_index {
| vm, vmIndex |
vmStats = vmStatses[vmIndex]
$suites.each {
| suite |
suiteOnVM = SuiteOnVM.new(vm, vmStats, suite)
subSuitesOnVM = suite.subSuites.map {
| subSuite |
result = SubSuiteOnVM.new(vm, subSuite)
subSuitesOnVMsForSubSuite[subSuite] << result
result
}
suitesOnVMs << suiteOnVM
suitesOnVMsForSuite[suite] << suiteOnVM
suitesOnVMsForVM[vm] << suiteOnVM
suite.benchmarks.each {
| benchmark |
subSuitesOnVMForThisBenchmark = []
subSuitesOnVM.each {
| subSuiteOnVM |
if subSuiteOnVM.suite.hasBenchmark(benchmark)
subSuitesOnVMForThisBenchmark << subSuiteOnVM
end
}
benchmarkOnVM = BenchmarkOnVM.new(benchmark, suiteOnVM, subSuitesOnVMForThisBenchmark)
benchmarksOnVMs << benchmarkOnVM
benchmarksOnVMsForBenchmark[benchmark] << benchmarkOnVM
}
}
}
plans = []
benchmarksOnVMs.each {
| benchmarkOnVM |
$outer.times {
| iteration |
plans << BenchPlan.new(benchmarkOnVM, iteration)
}
}
hostname = nil
hwmodel = nil
results.each_line {
| line |
line.chomp!
if line =~ /HOSTNAME:([^.]+)/
hostname = $1
elsif line =~ /HARDWARE:hw\.model: /
hwmodel = $~.post_match.chomp
else
result = ParsedResult.parse(plans, line)
if result
result.plan.parseResult(result)
end
end
}
# Compute the geomean of the preferred means of results on a SuiteOnVM
overallResults = []
$vms.each {
| vm |
result = Stats.new
$outer.times {
| outerIndex |
$inner.times {
| innerIndex |
curResult = Stats.new
suitesOnVMsForVM[vm].each {
| suiteOnVM |
# For a given iteration, suite, and VM, compute the suite's preferred mean
# over the data collected for all benchmarks in that suite. We'll have one
# sample per benchmark. For example on V8 this will be the geomean of 1
# sample for crypto, 1 sample for deltablue, and so on, and 1 sample for
# splay.
curResult.add(suiteOnVM.suite.computeMean(suiteOnVM.statsForIteration(outerIndex, innerIndex)))
}
# curResult now holds 1 sample for each of the means computed in the above
# loop. Compute the geomean over this, and store it.
if curResult.ok?
result.add(curResult.geometricMean)
else
result.add(nil)
end
}
}
# $overallResults will have a Stats for each VM. That Stats object will hold
# $inner*$outer geomeans, allowing us to compute the arithmetic mean and
# confidence interval of the geomeans of preferred means. Convoluted, but
# useful and probably sound.
overallResults << result
}
if $verbosity >= 2
benchmarksOnVMs.each {
| benchmarkOnVM |
$stderr.puts "#{benchmarkOnVM}: #{benchmarkOnVM.stats}"
}
$vms.each_with_index {
| vm, vmIndex |
vmStats = vmStatses[vmIndex]
$stderr.puts "#{vm} (arithmeticMean): #{vmStats.arithmeticMeanStats}"
$stderr.puts "#{vm} (geometricMean): #{vmStats.geometricMeanStats}"
}
end
if $outputName
reportName = $outputName
else
reportName =
(if ($vms.collect {
| vm |
vm.nameKind
}.index :auto)
""
else
text = $vms.collect {
| vm |
vm.to_s
}.join("_") + "_"
if text.size >= 40
""
else
text
end
end) +
($suites.collect {
| suite |
suite.to_s
}.join("")) + "_" +
(if hostname
hostname + "_"
else
""
end)+
(begin
time = Time.now
"%04d%02d%02d_%02d%02d" %
[ time.year, time.month, time.day,
time.hour, time.min ]
end)
end
unless $brief
puts "Generating benchmark report at #{Dir.pwd}/#{reportName}_report.txt"
puts "And raw data at #{Dir.pwd}/#{reportName}.json"
end
outp = $stdout
json = {}
begin
outp = File.open(reportName + "_report.txt","w")
rescue => e
$stderr.puts "Error: could not save report to #{reportName}_report.txt: #{e}"
$stderr.puts
end
def createVMsString
result = ""
result += " " if $allSuites.size > 1
result += rpad("", $benchpad + $weightpad)
result += " "
$vms.size.times {
| index |
if index != 0
result += " "+NoChange.new(0).shortForm
end
result += lpad(center($vms[index].name, 10+10+2), 12+10+2)
}
result += " "
if $vms.size >= 3
result += center("#{$vms[-1].name} v. #{$vms[0].name}",26)
elsif $vms.size >= 2
result += " "*26
end
result
end
def andJoin(list)
if list.size == 1
list[0].to_s
elsif list.size == 2
"#{list[0]} and #{list[1]}"
else
"#{list[0..-2].join(', ')}, and #{list[-1]}"
end
end
json["vms"] = $vms.collect{|v| v.name}
json["suites"] = {}
json["runlog"] = results
columns = [createVMsString.size, 78].max
outp.print "Benchmark report for "
outp.print andJoin($suites)
if hostname
outp.print " on #{hostname}"
end
if hwmodel
outp.print " (#{hwmodel})"
end
outp.puts "."
outp.puts
outp.puts "VMs tested:"
$vms.each {
| vm |
outp.print "\"#{vm.name}\" at #{vm.origPath}"
if vm.svnRevision
outp.print " (r#{vm.svnRevision})"
end
outp.puts
vm.extraEnv.each_pair {
| key, val |
outp.puts " export #{key}=#{val}"
}
}
outp.puts
outp.puts wrap("Collected #{$outer*$inner} sample#{plural($outer*$inner)} per benchmark/VM, "+
"with #{$outer} VM invocation#{plural($outer)} per benchmark."+
(if $rerun > 1 then (" Ran #{$rerun} benchmark iterations, and measured the "+
"total time of those iterations, for each sample.")
else "" end)+
(if $measureGC == true then (" No manual garbage collection invocations were "+
"emitted.")
elsif $measureGC then (" Emitted a call to gc() between sample measurements for "+
"all VMs except #{$measureGC}.")
else (" Emitted a call to gc() between sample measurements.") end)+
(if $warmup == 0 then (" Did not include any warm-up iterations; measurements "+
"began with the very first iteration.")
else (" Used #{$warmup*$rerun} benchmark iteration#{plural($warmup*$rerun)} per VM "+
"invocation for warm-up.") end)+
(case $timeMode
when :preciseTime then (" Used the jsc-specific preciseTime() function to get "+
"microsecond-level timing.")
when :date then (" Used the portable Date.now() method to get millisecond-"+
"level timing.")
else raise end)+
" Reporting benchmark execution times with 95% confidence "+
"intervals in milliseconds.",
columns)
outp.puts
def printVMs(outp)
outp.puts createVMsString
end
def summaryStats(outp, json, accumulators, name, decimalShift, &proc)
resultingJson = {}
outp.print " " if $allSuites.size > 1
outp.print rpad(name, $benchpad + $weightpad)
outp.print " "
accumulators.size.times {
| index |
if index != 0
outp.print " "+accumulators[index].stats(&proc).compareTo(accumulators[index-1].stats(&proc)).shortForm
end
outp.print statsToStr(accumulators[index].stats(&proc), decimalShift)
resultingJson[accumulators[index].reportingName] = accumulators[index].stats(&proc).jsonMap
}
if accumulators.size>=2
outp.print(" "+accumulators[-1].stats(&proc).compareTo(accumulators[0].stats(&proc)).longForm)
end
outp.puts
json[name] = resultingJson
end
def allSummaryStats(outp, json, accumulators, preferredMean, decimalShift)
meanLabel = '<' + preferredMean.to_s.sub(/Mean$/, '') + '>'
summaryStats(outp, json, accumulators, meanLabel, decimalShift) {
| stat |
stat.send(preferredMean)
}
end
$suites.each {
| suite |
suiteJson = {}
subSuiteJsons = {}
suite.subSuites.each {
| subSuite |
subSuiteJsons[subSuite] = {}
}
printVMs(outp)
if $allSuites.size > 1
outp.puts(andJoin(suite.suites.map{|v| v.name}) + ":")
else
outp.puts
end
suite.benchmarks.each {
| benchmark |
benchmarkJson = {}
outp.print " " if $allSuites.size > 1
outp.print rpad(benchmark.name, $benchpad) + rpad(benchmark.weightString, $weightpad)
if benchmark.name.size > $benchNameClip
outp.puts
outp.print " " if $allSuites.size > 1
outp.print((" " * $benchpad) + (" " * $weightpad))
end
outp.print " "
myConfigs = benchmarksOnVMsForBenchmark[benchmark]
myConfigs.size.times {
| index |
if index != 0
outp.print " "+myConfigs[index].stats.compareTo(myConfigs[index-1].stats).shortForm
end
outp.print statsToStr(myConfigs[index].stats, suite.decimalShift)
benchmarkJson[myConfigs[index].vm.name] = myConfigs[index].stats.jsonMap
}
if $vms.size>=2
outp.print(" "+myConfigs[-1].stats.compareTo(myConfigs[0].stats).to_s)
end
outp.puts
suiteJson[benchmark.name] = benchmarkJson
suite.subSuites.each {
| subSuite |
if subSuite.hasBenchmark(benchmark)
subSuiteJsons[subSuite][benchmark.name] = benchmarkJson
end
}
}
outp.puts
unless suite.subSuites.empty?
suite.subSuites.each {
| subSuite |
outp.puts "#{subSuite.name}:"
allSummaryStats(outp, subSuiteJsons[subSuite], subSuitesOnVMsForSubSuite[subSuite], subSuite.preferredMean, subSuite.decimalShift)
outp.puts
}
outp.puts "#{suite.name} including #{andJoin(suite.subSuites.map{|v| v.name})}:"
end
allSummaryStats(outp, suiteJson, suitesOnVMsForSuite[suite], suite.preferredMean, suite.decimalShift)
outp.puts if $allSuites.size > 1
json["suites"][suite.name] = suiteJson
suite.subSuites.each {
| subSuite |
json["suites"][subSuite.name] = subSuiteJsons[subSuite]
}
}
if $suites.size > 1
scaledResultJson = {}
printVMs(outp)
outp.puts "Geomean of preferred means:"
outp.print " "
outp.print rpad("<scaled-result>", $benchpad + $weightpad)
outp.print " "
$vms.size.times {
| index |
if index != 0
outp.print " "+overallResults[index].compareTo(overallResults[index-1]).shortForm
end
outp.print statsToStr(overallResults[index], 0)
scaledResultJson[$vms[index].name] = overallResults[index].jsonMap
}
if overallResults.size>=2
outp.print(" "+overallResults[-1].compareTo(overallResults[0]).longForm)
end
outp.puts
json["<scaled-result>"] = scaledResultJson
end
outp.puts
if outp != $stdout
outp.close
end
if outp != $stdout and not $brief
puts
File.open(reportName + "_report.txt") {
| inp |
puts inp.read
}
end
if $brief
puts(overallResults.collect{|stats| stats.mean}.join("\t"))
puts(overallResults.collect{|stats| stats.confInt}.join("\t"))
end
File.open(reportName + ".json", "w") {
| outp |
outp.puts json.to_json
}
end
begin
$sawBenchOptions = false
def resetBenchOptionsIfNecessary
unless $sawBenchOptions
$includeSunSpider = false
$includeSunSpiderCompileTime = false
$includeLongSpider = false
$includeV8 = false
$includeV8CompileTime = false
$includeKraken = false
$includeJSBench = false
$includeMicrobenchmarks = false
$includeAsmBench = false
$includeDSPJS = false
$includeBrowsermarkJS = false
$includeBrowsermarkDOM = false
$includeOctane = false
$includeCompressionBench = false
$includeSixSpeed = false
$includeTailBench = false;
$sawBenchOptions = true
end
end
GetoptLong.new(['--rerun', GetoptLong::REQUIRED_ARGUMENT],
['--inner', GetoptLong::REQUIRED_ARGUMENT],
['--outer', GetoptLong::REQUIRED_ARGUMENT],
['--warmup', GetoptLong::REQUIRED_ARGUMENT],
['--no-ss-warmup', GetoptLong::NO_ARGUMENT],
['--quantum', GetoptLong::REQUIRED_ARGUMENT],
['--minimum', GetoptLong::REQUIRED_ARGUMENT],
['--timing-mode', GetoptLong::REQUIRED_ARGUMENT],
['--sunspider', GetoptLong::NO_ARGUMENT],
['--sunspider-compile-time', GetoptLong::NO_ARGUMENT],
['--longspider', GetoptLong::NO_ARGUMENT],
['--v8-spider', GetoptLong::NO_ARGUMENT],
['--v8-spider-compile-time', GetoptLong::NO_ARGUMENT],
['--kraken', GetoptLong::NO_ARGUMENT],
['--js-bench', GetoptLong::NO_ARGUMENT],
['--microbenchmarks', GetoptLong::NO_ARGUMENT],
['--asm-bench', GetoptLong::NO_ARGUMENT],
['--dsp', GetoptLong::NO_ARGUMENT],
['--browsermark-js', GetoptLong::NO_ARGUMENT],
['--browsermark-dom', GetoptLong::NO_ARGUMENT],
['--octane', GetoptLong::NO_ARGUMENT],
['--compression-bench', GetoptLong::NO_ARGUMENT],
['--six-speed', GetoptLong::NO_ARGUMENT],
['--tail-bench', GetoptLong::NO_ARGUMENT],
['--big-int-bench', GetoptLong::NO_ARGUMENT],
['--benchmarks', GetoptLong::REQUIRED_ARGUMENT],
['--measure-gc', GetoptLong::OPTIONAL_ARGUMENT],
['--force-vm-kind', GetoptLong::REQUIRED_ARGUMENT],
['--force-vm-copy', GetoptLong::NO_ARGUMENT],
['--dont-copy-vms', GetoptLong::NO_ARGUMENT],
['--verbose', '-v', GetoptLong::NO_ARGUMENT],
['--brief', GetoptLong::NO_ARGUMENT],
['--silent', GetoptLong::NO_ARGUMENT],
['--remote', GetoptLong::REQUIRED_ARGUMENT],
['--local', GetoptLong::NO_ARGUMENT],
['--ssh-options', GetoptLong::REQUIRED_ARGUMENT],
['--slave', GetoptLong::NO_ARGUMENT],
['--prepare-only', GetoptLong::NO_ARGUMENT],
['--analyze', GetoptLong::REQUIRED_ARGUMENT],
['--vms', GetoptLong::REQUIRED_ARGUMENT],
['--output-name', GetoptLong::REQUIRED_ARGUMENT],
['--environment', GetoptLong::REQUIRED_ARGUMENT],
['--dependencies', GetoptLong::REQUIRED_ARGUMENT],
['--config', GetoptLong::REQUIRED_ARGUMENT],
['--help', '-h', GetoptLong::NO_ARGUMENT]).each {
| opt, arg |
case opt
when '--rerun'
$rerun = intArg(opt,arg,1,nil)
when '--inner'
$inner = intArg(opt,arg,1,nil)
when '--outer'
$outer = intArg(opt,arg,1,nil)
when '--warmup'
$warmup = intArg(opt,arg,0,nil)
when '--no-ss-warmup'
$sunSpiderWarmup = false
when '--quantum'
$quantum = intArg(opt,arg,1,nil)
when '--minimum'
$minimum = intArg(opt,arg,1,nil)
when '--timing-mode'
if arg.upcase == "PRECISETIME"
$timeMode = :preciseTime
elsif arg.upcase == "DATE"
$timeMode = :date
elsif arg.upcase == "AUTO"
$timeMode = :auto
else
quickFail("Expected either 'preciseTime', 'date', or 'auto' for --time-mode, but got '#{arg}'.",
"Invalid argument for command-line option")
end
when '--force-vm-kind'
if arg.upcase == "JSC"
$forceVMKind = :jsc
elsif arg.upcase == "DUMPRENDERTREE"
$forceVMKind = :dumpRenderTree
elsif arg.upcase == "WEBKITTESTRUNNER"
$forceVMKind = :webkitTestRunner
elsif arg.upcase == "AUTO"
$forceVMKind = nil
else
quickFail("Expected 'jsc', 'DumpRenderTree', or 'WebKitTestRunner' for --force-vm-kind, but got '#{arg}'.",
"Invalid argument for command-line option")
end
when '--force-vm-copy'
$needToCopyVMs = true
when '--dont-copy-vms'
$dontCopyVMs = true
when '--sunspider'
resetBenchOptionsIfNecessary
$includeSunSpider = true
when '--sunspider-compile-time'
resetBenchOptionsIfNecessary
$includeSunSpiderCompileTime = true
when '--longspider'
resetBenchOptionsIfNecessary
$includeLongSpider = true
when '--v8-spider'
resetBenchOptionsIfNecessary
$includeV8 = true
when '--v8-spider-compile-time'
resetBenchOptionsIfNecessary
$includeV8CompileTime = true
when '--kraken'
resetBenchOptionsIfNecessary
$includeKraken = true
when '--js-bench'
resetBenchOptionsIfNecessary
$includeJSBench = true
when '--tail-bench'
resetBenchOptionsIfNecessary
$includeTailBench = true
when '--microbenchmarks'
resetBenchOptionsIfNecessary
$includeMicrobenchmarks = true
when '--asm-bench'
resetBenchOptionsIfNecessary
$includeAsmBench = true
when '--dsp'
resetBenchOptionsIfNecessary
$includeDSPJS = true
when '--browsermark-js'
resetBenchOptionsIfNecessary
$includeBrowsermarkJS = true
when '--browsermark-dom'
resetBenchOptionsIfNecessary
$includeBrowsermarkDOM = true
when '--octane'
resetBenchOptionsIfNecessary
$includeOctane = true
when '--compression-bench'
resetBenchOptionsIfNecessary
$includeCompressionBench = true
when '--six-speed'
resetBenchOptionsIfNecessary
$includeSixSpeed = true
when '--big-int-bench'
resetBenchOptionsIfNecessary
$includeBigIntBench = true
when '--benchmarks'
$benchmarkPattern = Regexp.new(arg)
when '--measure-gc'
if arg == ''
$measureGC = true
else
$measureGC = arg
end
when '--verbose'
$verbosity += 1
when '--brief'
$brief = true
when '--silent'
$silent = true
when '--remote'
$remoteHosts += arg.split(',')
$needToCopyVMs = true
when '--ssh-options'
$sshOptions << arg
when '--local'
$alsoLocal = true
when '--prepare-only'
$run = false
when '--analyze'
$prepare = false
$run = false
$analyze << arg
when '--output-name'
$outputName = arg
when '--vms'
JSON::parse(IO::read(arg)).each {
| vmDescription |
path = Pathname.new(vmDescription["path"]).realpath
if vmDescription["name"]
name = vmDescription["name"]
nameKind = :given
else
name = "Conf\##{$vms.length+1}"
nameKind = :auto
end
vm = VM.new(path, name, nameKind, nil)
if vmDescription["env"]
vmDescription["env"].each_pair {
| key, val |
vm.addExtraEnv(key, val)
}
end
$vms << vm
}
when '--environment'
$environment = JSON::parse(IO::read(arg))
when '--dependencies'
$dependencies.push(Pathname.new(arg).realpath)
when '--config'
$configPath = Pathname.new(arg)
when '--help'
usage
else
raise "bad option: #{opt}"
end
}
# Figure out the configuration
if $configPath.file?
config = JSON::parse(IO::read($configPath.to_s))
else
config = {}
end
def pathname_if_exist config
if config
Pathname.new config
else
config
end
end
OCTANE_PATH = pathname_if_exist(config["OctanePath"])
BROWSERMARK_PATH = pathname_if_exist(config["BrowserMarkPath"])
BROWSERMARK_JS_PATH = pathname_if_exist(config["BrowserMarkJSPath"])
BROWSERMARK_DOM_PATH = pathname_if_exist(config["BrowserMarkDOMPath"])
ASMBENCH_PATH = pathname_if_exist(config["AsmBenchPath"])
COMPRESSIONBENCH_PATH = pathname_if_exist(config["CompressionBenchPath"])
DSPJS_FILTRR_PATH = pathname_if_exist(config["DSPJSFiltrrPath"])
DSPJS_ROUTE9_PATH = pathname_if_exist(config["DSPJSRoute9Path"])
DSPJS_STARFIELD_PATH = pathname_if_exist(config["DSPJSStarfieldPath"])
DSPJS_QUAKE3_PATH = pathname_if_exist(config["DSPJSQuake3Path"])
DSPJS_MANDELBROT_PATH = pathname_if_exist(config["DSPJSMandelbrotPath"])
DSPJS_JSLINUX_PATH = pathname_if_exist(config["DSPJSLinuxPath"])
DSPJS_AMMOJS_ASMJS_PATH = pathname_if_exist(config["DSPJSAmmoJSAsmJSPath"])
DSPJS_AMMOJS_REGULAR_PATH = pathname_if_exist(config["DSPJSAmmoJSRegularPath"])
KRAKEN_PATH = pathname_if_exist(config["KrakenPath"])
# If the --dont-copy-vms option was passed, it overrides the --force-vm-copy option.
if $dontCopyVMs
$needToCopyVMs = false
end
ARGV.each {
| vm |
if vm =~ /([a-zA-Z0-9_ .]+):/
name = $1
nameKind = :given
vm = $~.post_match
else
name = "Conf\##{$vms.length+1}"
nameKind = :auto
end
envs = []
while vm =~ /([a-zA-Z0-9_]+)=([a-zA-Z0-9_:.\/-]+):/
envs << [$1, $2]
vm = $~.post_match
end
$stderr.puts "#{name}: #{vm}" if $verbosity >= 1
vm = VM.new(Pathname.new(vm).realpath, name, nameKind, nil)
envs.each {
| pair |
vm.addExtraEnv(pair[0], pair[1])
}
$vms << vm
}
if $vms.empty?
quickFail("Please specify at least on configuraiton on the command line.",
"Insufficient arguments")
end
$vms.each {
| vm |
if vm.vmType == :jsc
$allDRT = false
end
}
SUNSPIDER = BenchmarkSuite.new("SunSpider", :arithmeticMean, 0)
SUNSPIDER_COMPILE_TIME = BenchmarkSuite.new("SunSpider-CompileTime", :arithmeticMean, 0)
WARMUP = BenchmarkSuite.new("WARMUP", :arithmeticMean, 0)
["3d-cube", "3d-morph", "3d-raytrace", "access-binary-trees",
"access-fannkuch", "access-nbody", "access-nsieve",
"bitops-3bit-bits-in-byte", "bitops-bits-in-byte", "bitops-bitwise-and",
"bitops-nsieve-bits", "controlflow-recursive", "crypto-aes",
"crypto-md5", "crypto-sha1", "date-format-tofte", "date-format-xparb",
"math-cordic", "math-partial-sums", "math-spectral-norm", "regexp-dna",
"string-base64", "string-fasta", "string-tagcloud",
"string-unpack-code", "string-validate-input"].each {
| name |
SUNSPIDER.add SunSpiderBenchmark.new(name)
SUNSPIDER_COMPILE_TIME.add CompileTimeBenchmark.new(name, "#{SUNSPIDER_PATH}/#{name}.js")
WARMUP.addIgnoringPattern SunSpiderBenchmark.new(name)
}
LONGSPIDER = BenchmarkSuite.new("LongSpider", :geometricMean, 0)
["3d-cube", "3d-morph", "3d-raytrace", "access-binary-trees",
"access-fannkuch", "access-nbody", "access-nsieve",
"bitops-3bit-bits-in-byte", "bitops-bits-in-byte", "bitops-nsieve-bits",
"controlflow-recursive", "crypto-aes", "crypto-md5", "crypto-sha1",
"date-format-tofte", "date-format-xparb", "hash-map", "math-cordic",
"math-partial-sums", "math-spectral-norm", "string-base64",
"string-fasta", "string-tagcloud"].each {
| name |
LONGSPIDER.add LongSpiderBenchmark.new(name)
}
V8 = BenchmarkSuite.new("V8Spider", :geometricMean, 0)
V8_COMPILE_TIME = BenchmarkSuite.new("V8Spider-CompileTime", :geometricMean, 0)
["crypto", "deltablue", "earley-boyer", "raytrace",
"regexp", "richards", "splay"].each {
| name |
V8.add V8Benchmark.new(name)
V8_COMPILE_TIME.add CompileTimeBenchmark.new(name, "#{V8_PATH}/v8-#{name}.js")
}
OCTANE = BenchmarkSuite.new("Octane", :geometricMean, 1)
[[["crypto"], "encrypt", 1, true, false, 32],
[["crypto"], "decrypt", 1, true, false, 32],
[["deltablue"], "deltablue", 2, true, false, 32],
[["earley-boyer"], "earley", 1, true, false, 32],
[["earley-boyer"], "boyer", 1, true, false, 32],
[["navier-stokes"], "navier-stokes", 2, true, false, 16],
[["raytrace"], "raytrace", 2, true, false, 32],
[["richards"], "richards", 2, true, false, 32],
[["splay"], "splay", 2, true, false, 32],
[["regexp"], "regexp", 2, true, false, 16],
[["pdfjs"], "pdfjs", 2, false, false, 4],
[["mandreel"], "mandreel", 2, false, false, 4],
[["gbemu-part1", "gbemu-part2"], "gbemu", 2, false, false, 4],
[["code-load"], "closure", 1, false, false, 16],
[["code-load"], "jquery", 1, false, false, 16],
[["box2d"], "box2d", 2, false, false, 8],
[["zlib", "zlib-data"], "zlib", 2, false, true, 3],
[["typescript", "typescript-input", "typescript-compiler"], "typescript", 2, false, true, 1]].each {
| args |
OCTANE.add OctaneBenchmark.new(*args)
}
OCTANE.add CustomTimedBenchmark.new("splay-latency", (OPENSOURCE_OCTANE_PATH + "splay.js").to_s)
KRAKEN = BenchmarkSuite.new("Kraken", :arithmeticMean, -1)
["ai-astar", "audio-beat-detection", "audio-dft", "audio-fft",
"audio-oscillator", "imaging-darkroom", "imaging-desaturate",
"imaging-gaussian-blur", "json-parse-financial",
"json-stringify-tinderbox", "stanford-crypto-aes",
"stanford-crypto-ccm", "stanford-crypto-pbkdf2",
"stanford-crypto-sha256-iterative"].each {
| name |
KRAKEN.add KrakenBenchmark.new(name)
}
JSBENCH = BenchmarkSuite.new("JSBench", :geometricMean, 0)
[["amazon-chrome", "urem"], ["amazon-chrome-win", "urem"], ["amazon-firefox", "urm"],
["amazon-firefox-win", "urm"], ["amazon-safari", "urem"], ["facebook-chrome", "urem"],
["facebook-chrome-win", "urem"], ["facebook-firefox", "urem"],
["facebook-firefox-win", "urem"], ["facebook-safari", "urem"],
["google-chrome", "urem"], ["google-chrome-win", "urem"], ["google-firefox", "uem"],
["google-firefox-win", "urem"], ["google-safari", "urem"], ["twitter-chrome", "urem"],
["twitter-chrome-win", "rem"], ["twitter-firefox", "urem"],
["twitter-firefox-win", "urem"], ["twitter-safari", "urem"], ["yahoo-chrome", "urem"],
["yahoo-chrome-win", "urem"], ["yahoo-firefox", "urem"], ["yahoo-firefox-win", "urem"],
["yahoo-safari", "urem"]].each {
| nameAndMode |
JSBENCH.add JSBenchBenchmark.new(*nameAndMode)
}
TAILBENCH = BenchmarkSuite.new("TailBench", :geometricMean, 0)
["n-body", "merge-sort", "merge-sort-cps", "bf-interpreter"].each {
| name |
TAILBENCH.add TailBenchBenchmark.new(name);
}
BIGINTBENCH = BenchmarkSuite.new("BigIntBench", :geometricMean, 0)
Dir.foreach(BIGINTBENCH_PATH) {
| filename |
if filename =~ /\.js$/
name = $~.pre_match
BIGINTBENCH.add BigIntBenchBenchmark.new(name)
end
}
MICROBENCHMARKS = BenchmarkSuite.new("Microbenchmarks", :geometricMean, 0)
Dir.foreach(MICROBENCHMARKS_PATH) {
| filename |
if filename =~ /\.js$/
name = $~.pre_match
MICROBENCHMARKS.add MicrobenchmarksBenchmark.new(name)
end
}
ASMBENCH = BenchmarkSuite.new("AsmBench", :geometricMean, 0)
if ASMBENCH_PATH
Dir.foreach(ASMBENCH_PATH) {
| filename |
if filename =~ /\.js$/
name = $~.pre_match
ASMBENCH.add AsmBenchBenchmark.new(name)
end
}
end
COMPRESSIONBENCH = BenchmarkSuite.new("CompressionBench", :geometricMean, 0)
[[["huffman", "compression-data"], "huffman", ""],
[["arithmetic", "compression-data"], "arithmetic", "Simple"],
[["arithmetic", "compression-data"], "arithmetic", "Precise"],
[["arithmetic", "compression-data"], "arithmetic", "Complex Precise"],
[["arithmetic", "compression-data"], "arithmetic", "Precise Order 0"],
[["arithmetic", "compression-data"], "arithmetic", "Precise Order 1"],
[["arithmetic", "compression-data"], "arithmetic", "Precise Order 2"],
[["arithmetic", "compression-data"], "arithmetic", "Simple Order 1"],
[["arithmetic", "compression-data"], "arithmetic", "Simple Order 2"],
[["lz-string", "compression-data"], "lz-string", ""]
].each {
| args |
COMPRESSIONBENCH.add CompressionBenchBenchmark.new(*args)
}
SIXSPEED = BenchmarkSuite.new("SixSpeed", :geometricMean, 0)
[[ "template_string", 200000000 ],
[ "defaults", 100000000 ],
[ "map-set-lookup", 200000 ],
[ "spread", 1000000 ],
[ "object-assign", 600000 ],
[ "spread-literal", 1000000 ],
[ "map-set", 10000 ],
[ "destructuring-simple", 20000000 ],
[ "super", 3000000 ],
[ "for-of-object", 1000000 ],
[ "rest", 500000 ],
[ "regex-u", 1000000 ],
[ "arrow", 20000000 ],
[ "bindings-compound", 20000000 ],
[ "classes", 10000000 ],
[ "template_string_tag", 2000000 ],
[ "map-string", 30000000 ],
[ "arrow-declare", 30000000 ],
[ "spread-generator", 1000000 ],
[ "object-literal-ext", 1000000 ],
[ "generator", 3000000 ],
[ "arrow-args", 20000000 ],
[ "for-of-array", 5000000 ],
[ "bindings", 20000000 ],
[ "destructuring", 20000000 ],
[ "map-set-object", 5000 ],
].each {
| name, iterations |
SIXSPEED.add SixSpeedBenchmark.new("#{name}.es5", "#{name}/#{name}.es5", iterations)
SIXSPEED.add SixSpeedBenchmark.new("#{name}.es6", "#{name}/#{name}.es6", iterations)
}
DSPJS = BenchmarkSuite.new("DSP", :geometricMean, 0)
DSPJS.add DSPJSFiltrrBenchmark.new("filtrr-posterize-tint", "e2")
DSPJS.add DSPJSFiltrrBenchmark.new("filtrr-tint-contrast-sat-bright", "e5")
DSPJS.add DSPJSFiltrrBenchmark.new("filtrr-tint-sat-adj-contr-mult", "e7")
DSPJS.add DSPJSFiltrrBenchmark.new("filtrr-blur-overlay-sat-contr", "e8")
DSPJS.add DSPJSFiltrrBenchmark.new("filtrr-sat-blur-mult-sharpen-contr", "e9")
DSPJS.add DSPJSFiltrrBenchmark.new("filtrr-sepia-bias", "e10")
DSPJS.add DSPJSVP8Benchmark.new
DSPJS.add DSPStarfieldBenchmark.new
DSPJS.add DSPJSJSLinuxBenchmark.new
DSPJS.add DSPJSQuake3Benchmark.new
DSPJS.add DSPJSMandelbrotBenchmark.new
DSPJS.add DSPJSAmmoJSASMBenchmark.new
DSPJS.add DSPJSAmmoJSRegularBenchmark.new
BROWSERMARK_JS = BenchmarkSuite.new("BrowsermarkJS", :geometricMean, 1)
["array_blur", "array_weighted", "string_chat", "string_filter", "string_weighted"].each {
| name |
BROWSERMARK_JS.add BrowsermarkJSBenchmark.new(name)
}
BROWSERMARK_DOM = BenchmarkSuite.new("BrowsermarkDOM", :geometricMean, 1)
["advanced_search", "create_source", "dynamic_create", "search"].each {
| name |
BROWSERMARK_DOM.add BrowsermarkDOMBenchmark.new(name)
}
$suites = []
if $includeSunSpider and not SUNSPIDER.empty?
$suites << SUNSPIDER
end
if $includeSunSpiderCompileTime and not SUNSPIDER_COMPILE_TIME.empty?
$suites << SUNSPIDER_COMPILE_TIME
end
if $includeLongSpider and not LONGSPIDER.empty?
$suites << LONGSPIDER
end
if $includeV8 and not V8.empty?
$suites << V8
end
if $includeV8CompileTime and not V8_COMPILE_TIME.empty?
$suites << V8_COMPILE_TIME
end
if $includeOctane and not OCTANE.empty?
if OCTANE_PATH
$suites << OCTANE
else
$stderr.puts "Warning: refusing to run Octane because \"OctanePath\" isn't set in #{$configPath}."
end
end
if $includeKraken and not KRAKEN.empty?
if KRAKEN_PATH
$suites << KRAKEN
else
$stderr.puts "Warning: refusing to run Kraken because \"KrakenPath\" isn't set in #{$configPath}."
end
end
if $includeJSBench and not JSBENCH.empty?
if $allDRT
if JSBENCH_PATH
$suites << JSBENCH
else
$stderr.puts "Warning: refusing to run JSBench because \"JSBenchPath\" isn't set in #{$configPath}"
end
else
$stderr.puts "Warning: refusing to run JSBench because not all VMs are DumpRenderTree or WebKitTestRunner."
end
end
if $includeTailBench and not TAILBENCH.empty?
$suites << TAILBENCH
end
if $includeMicrobenchmarks and not MICROBENCHMARKS.empty?
$suites << MICROBENCHMARKS
end
if $includeBigIntBench and not BIGINTBENCH.empty?
$suites << BIGINTBENCH
end
if $includeAsmBench and not ASMBENCH.empty?
if ASMBENCH_PATH
$suites << ASMBENCH
else
$stderr.puts "Warning: refusing to run AsmBench because \"AsmBenchPath\" isn't set in #{$configPath}."
end
end
if $includeDSPJS and not DSPJS.empty?
if $allDRT
if DSPJS_FILTRR_PATH and DSPJS_ROUTE9_PATH and DSPJS_STARFIELD_PATH and DSPJS_QUAKE3_PATH and DSPJS_MANDELBROT_PATH and DSPJS_JSLINUX_PATH and DSPJS_AMMOJS_ASMJS_PATH and DSPJS_AMMOJS_REGULAR_PATH
$suites << DSPJS
else
$stderr.puts "Warning: refusing to run DSPJS because one of the following isn't set in #{$configPath}: \"DSPJSFiltrrPath\", \"DSPJSRoute9Path\", \"DSPJSStarfieldPath\", \"DSPJSQuake3Path\", \"DSPJSMandelbrotPath\", \"DSPJSLinuxPath\", \"DSPJSAmmoJSAsmJSPath\", \"DSPJSAmmoJSRegularPath\"."
end
else
$stderr.puts "Warning: refusing to run DSPJS because not all VMs are DumpRenderTree or WebKitTestRunner."
end
end
if $includeBrowsermarkJS and not BROWSERMARK_JS.empty?
if BROWSERMARK_PATH and BROWSERMARK_JS_PATH
$suites << BROWSERMARK_JS
else
$stderr.puts "Warning: refusing to run Browsermark-JS because one of the following isn't set in #{$configPath}: \"BrowserMarkPath\" or \"BrowserMarkJSPath\"."
end
end
if $includeBrowsermarkDOM and not BROWSERMARK_DOM.empty?
if $allDRT
if BROWSERMARK_PATH and BROWSERMARK_JS_PATH and BROWSERMARK_DOM_PATH
$suites << BROWSERMARK_DOM
else
$stderr.puts "Warning: refusing to run Browsermark-DOM because one of the following isn't set in #{$configPath}: \"BrowserMarkPath\", \"BrowserMarkJSPath\", or \"BrowserMarkDOMPath\"."
end
else
$stderr.puts "Warning: refusing to run Browsermark-DOM because not all VMs are DumpRenderTree or WebKitTestRunner."
end
end
if $includeCompressionBench and not COMPRESSIONBENCH.empty?
if COMPRESSIONBENCH_PATH
$suites << COMPRESSIONBENCH
else
$stderr.puts "Warning: refusing to run CompressionBench because \"CompressionBenchPath\" isn't set in #{$configPath}"
end
end
if $includeSixSpeed and not SIXSPEED.empty?
if SIXSPEED_PATH
$suites << SIXSPEED
else
$stderr.puts "Warning: refusing to run SixSpeed because \"SixSpeedPath\" isn't set in #{$configPath}."
end
end
$allSuites = $suites.map{|v| v.suites}.flatten(1)
$benchmarks = []
$suites.each {
| suite |
$benchmarks += suite.benchmarks
}
if $suites.empty? or $benchmarks.empty?
$stderr.puts "No benchmarks found. Bailing out."
exit 1
end
if $outer*$inner == 1
$stderr.puts "Warning: will only collect one sample per benchmark/VM. Confidence interval calculation will fail."
end
$stderr.puts "Using timeMode = #{$timeMode}." if $verbosity >= 1
$runPlans = []
$vms.each {
| vm |
$benchmarks.each {
| benchmark |
$outer.times {
| iteration |
$runPlans << BenchRunPlan.new(benchmark, vm, iteration)
}
}
}
$runPlans.shuffle!
if $sunSpiderWarmup
warmupPlans = []
$vms.each {
| vm |
WARMUP.benchmarks.each {
| benchmark |
warmupPlans << BenchRunPlan.new(benchmark, vm, 0)
}
}
$runPlans = warmupPlans.shuffle + $runPlans
end
$suitepad = $suites.collect {
| suite |
suite.to_s.size
}.max + 1
$planpad = $runPlans.collect {
| plan |
plan.to_s.size
}.max + 1
maxBenchNameLength =
($benchmarks + ["<arithmetic> *", "<geometric> *", "<harmonic> *"]).collect {
| benchmark |
if benchmark.respond_to? :name
benchmark.name.size
else
benchmark.size
end
}.max
$benchNameClip = 40
$benchpad = [maxBenchNameLength, $benchNameClip].min + 1
$weightpad = $benchmarks.collect {
| benchmark |
benchmark.weightString.size
}.max
$vmpad = $vms.collect {
| vm |
vm.to_s.size
}.max + 1
$analyze.each_with_index {
| filename, index |
if index >= 1
puts
end
parseAndDisplayResults(IO::read(filename))
}
if not $prepare and not $run
exit 0
end
if FileTest.exist? BENCH_DATA_PATH
cmd = "rm -rf #{BENCH_DATA_PATH}"
$stderr.puts ">> #{cmd}" if $verbosity >= 2
raise unless system cmd
end
Dir.mkdir BENCH_DATA_PATH
if $needToCopyVMs
canCopyIntoBenchPath = true
$vms.each {
| vm |
canCopyIntoBenchPath = false unless vm.canCopyIntoBenchPath
}
if canCopyIntoBenchPath
$vms.each {
| vm |
$stderr.puts "Copying #{vm} into #{BENCH_DATA_PATH}..."
vm.copyIntoBenchPath
}
$stderr.puts "All VMs are in place."
else
$stderr.puts "Warning: don't know how to copy some VMs into #{BENCH_DATA_PATH}, so I won't do it."
end
end
if $measureGC and $measureGC != true
found = false
$vms.each {
| vm |
if vm.name == $measureGC
found = true
end
}
unless found
$stderr.puts "Warning: --measure-gc option ignored because no VM is named #{$measureGC}"
end
end
if $prepare
File.open("#{BENCH_DATA_PATH}/runscript", "w") {
| file |
file.puts "echo -e \"HOSTNAME:\\c\""
file.puts "hostname"
file.puts "echo"
file.puts "echo -e \"HARDWARE:\\c\""
file.puts "/usr/sbin/sysctl hw.model"
file.puts "echo"
file.puts "set -e"
$script = file
$runPlans.each_with_index {
| plan, idx |
if $verbosity == 0 and not $silent
text1 = lpad(idx.to_s,$runPlans.size.to_s.size)+"/"+$runPlans.size.to_s
text2 = plan.to_s
file.puts("echo -e " + Shellwords.shellescape("\r#{text1} #{rpad(text2,$planpad)}") + "\"\\c\" 1>&2")
file.puts("echo -e " + Shellwords.shellescape("\r#{text1} #{text2}") + "\"\\c\" 1>&2")
end
plan.emitRunCode
}
if $verbosity == 0 and not $silent
file.puts("echo -e " + Shellwords.shellescape("\r#{$runPlans.size}/#{$runPlans.size} #{' '*($suitepad+1+$benchpad+1+$vmpad)}") + "\"\\c\" 1>&2")
file.puts("echo -e " + Shellwords.shellescape("\r#{$runPlans.size}/#{$runPlans.size}") + " 1>&2")
end
}
end
if $run
unless $remoteHosts.empty?
$stderr.puts "Packaging benchmarking directory for remote hosts..." if $verbosity==0
Dir.chdir(TEMP_PATH) {
cmd = "tar -czf payload.tar.gz benchdata"
$stderr.puts ">> #{cmd}" if $verbosity>=2
raise unless system(cmd)
}
def grokHost(host)
if host =~ /:([0-9]+)$/
"-p " + $1 + " " + Shellwords.shellescape($~.pre_match)
else
Shellwords.shellescape(host)
end
end
def sshRead(host, command)
cmd = "ssh #{$sshOptions.collect{|x| Shellwords.shellescape(x)}.join(' ')} #{grokHost(host)} #{Shellwords.shellescape(command)}"
$stderr.puts ">> #{cmd}" if $verbosity>=2
result = ""
IO.popen(cmd, "r") {
| inp |
inp.each_line {
| line |
$stderr.puts "#{host}: #{line}" if $verbosity>=2
result += line
}
}
raise "#{$?}" unless $?.success?
result
end
def sshWrite(host, command, data)
cmd = "ssh #{$sshOptions.collect{|x| Shellwords.shellescape(x)}.join(' ')} #{grokHost(host)} #{Shellwords.shellescape(command)}"
$stderr.puts ">> #{cmd}" if $verbosity>=2
IO.popen(cmd, "w") {
| outp |
outp.write(data)
}
raise "#{$?}" unless $?.success?
end
$remoteHosts.each {
| host |
$stderr.puts "Sending benchmark payload to #{host}..." if $verbosity==0
remoteTempPath = JSON::parse(sshRead(host, "cat ~/.bencher"))["tempPath"]
raise unless remoteTempPath
sshWrite(host, "cd #{Shellwords.shellescape(remoteTempPath)} && rm -rf benchdata && tar -xz", IO::read("#{TEMP_PATH}/payload.tar.gz"))
$stderr.puts "Running on #{host}..." if $verbosity==0
parseAndDisplayResults(sshRead(host, "cd #{Shellwords.shellescape(remoteTempPath + '/benchdata')} && sh runscript"))
}
end
if not $remoteHosts.empty? and $alsoLocal
$stderr.puts "Running locally..."
end
if $remoteHosts.empty? or $alsoLocal
parseAndDisplayResults(runAndGetResults)
end
end
if $prepare and not $run and $analyze.empty?
puts wrap("Benchmarking script and data are in #{BENCH_DATA_PATH}. You can run "+
"the benchmarks and get the results by doing:", 78)
puts
puts "cd #{BENCH_DATA_PATH}"
puts "sh runscript > results.txt"
puts
puts wrap("Then you can analyze the results by running bencher with the same arguments "+
"as now, but replacing --prepare-only with --analyze results.txt.", 78)
end
rescue => e
fail(e)
end