blob: 5498642629334441c99a3514c51facc7bf455d30 [file] [log] [blame]
#!/usr/bin/env ruby
# Copyright (C) 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'getoptlong'
require 'pathname'
require 'yaml'
THIS_SCRIPT_PATH = Pathname.new(__FILE__).realpath
SCRIPTS_PATH = THIS_SCRIPT_PATH.dirname
WEBKIT_PATH = SCRIPTS_PATH.dirname.dirname
LAYOUTTESTS_PATH = WEBKIT_PATH + "LayoutTests"
raise unless SCRIPTS_PATH.basename.to_s == "Scripts"
raise unless SCRIPTS_PATH.dirname.basename.to_s == "Tools"
HELPERS_PATH = SCRIPTS_PATH + "jsc-stress-test-helpers"
IMPORTANT_ENVS = ["JSC_timeout", "DYLD_FRAMEWORK_PATH"]
begin
require 'shellwords'
rescue Exception => e
$stderr.puts "Warning: did not find shellwords, not running any tests."
exit 0
end
$canRunDisplayProfilerOutput = false
begin
require 'rubygems'
require 'json'
require 'highline'
$canRunDisplayProfilerOutput = true
rescue Exception => e
$stderr.puts "Warning: did not find json or highline; some features will be disabled."
$stderr.puts "Error: #{e.inspect}"
end
def printCommandArray(*cmd)
begin
commandArray = cmd.each{|value| Shellwords.shellescape(value.to_s)}.join(' ')
rescue
commandArray = cmd.join(' ')
end
$stderr.puts ">> #{commandArray}"
end
def mysys(*cmd)
printCommandArray(*cmd)
raise "Command failed: #{$?.inspect}" unless system(*cmd)
end
$numProcessors = `sysctl -n hw.availcpu`.to_i
if $numProcessors == 0
$numProcessors = `nproc --all 2>/dev/null`.to_i
end
if $numProcessors == 0
$numProcessors = 1
end
$jscPath = nil
$enableFTL = false
$collections = []
$outputDir = Pathname.new("results")
$verbosity = 0
$errorOnFailure = false
def usage
puts "run-jsc-stress-tests -j <shell path> <collections path> [<collections path> ...]"
puts
puts "--jsc (-j) Path to JavaScriptCore. This option is required."
puts "--ftl-jit Indicate that we have the FTL JIT."
puts "--output-dir (-o) Path where to put results. Default is #{$outputDir}."
puts "--[no-]error-on-failure Exit with exit code 1 if any tests fail. Default is #{$errorOnFailure}."
puts "--verbose (-v) Print more things while running."
puts "--help (-h) Print this message."
exit 1
end
GetoptLong.new(['--help', '-h', GetoptLong::NO_ARGUMENT],
['--jsc', '-j', GetoptLong::REQUIRED_ARGUMENT],
['--ftl-jit', GetoptLong::NO_ARGUMENT],
['--output-dir', '-o', GetoptLong::REQUIRED_ARGUMENT],
['--verbose', '-v', GetoptLong::NO_ARGUMENT],
['--error-on-failure', GetoptLong::NO_ARGUMENT],
['--no-error-on-failure', GetoptLong::NO_ARGUMENT]).each {
| opt, arg |
case opt
when '--help'
usage
when '--jsc'
$jscPath = Pathname.new(arg).realpath
when '--output-dir'
$outputDir = Pathname.new(arg)
when '--ftl-jit'
$enableFTL = true
when '--error-on-failure'
$errorOnFailure = true
when '--no-error-on-failure'
$errorOnFailure = false
when '--verbose'
$verbosity += 1
end
}
$progressMeter = ($verbosity == 0 and $stdin.tty?)
unless $jscPath
$stderr.puts "Error: must specify -j <path>."
exit 1
end
$numFailures = 0
EAGER_OPTIONS = ["--thresholdForJITAfterWarmUp=10", "--thresholdForJITSoon=10", "--thresholdForOptimizeAfterWarmUp=20", "--thresholdForOptimizeAfterLongWarmUp=20", "--thresholdForOptimizeSoon=20", "--thresholdForFTLOptimizeAfterWarmUp=20", "--thresholdForFTLOptimizeSoon=20"]
$runlist = []
def prefixCommand(prefix)
"awk " + Shellwords.shellescape("{ printf #{(prefix + ': ').inspect}; print }")
end
def pipeAndPrefixCommand(outputFilename, prefix)
"tee " + Shellwords.shellescape(outputFilename.to_s) + " | " + prefixCommand(prefix)
end
# Output handler for tests that are expected to be silent.
def silentOutputHandler
Proc.new {
| name |
" | " + pipeAndPrefixCommand(($outputDir + (name + ".out")).to_s, name)
}
end
# Output handler for tests that are expected to produce meaningful output.
def noisyOutputHandler
Proc.new {
| name |
" | cat > " + Shellwords.shellescape(($outputDir + (name + ".out")).to_s)
}
end
# Error handler for tests that fail exactly when they return non-zero exit status.
def simpleErrorHandler
Proc.new {
| outp, plan |
outp.puts "if test -e #{plan.failFile}"
outp.puts "then"
outp.puts " (echo ERROR: Unexpected exit code: `cat #{plan.failFile}`) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts "else"
outp.puts " " + plan.successCommand
outp.puts "fi"
}
end
# Error handler for tests that diff their output with some expectation.
def diffErrorHandler(expectedFilename)
Proc.new {
| outp, plan |
outputFilename = Shellwords.shellescape(($outputDir + (plan.name + ".out")).to_s)
diffFilename = Shellwords.shellescape(($outputDir + (plan.name + ".diff")).to_s)
outp.puts "if test -e #{plan.failFile}"
outp.puts "then"
outp.puts " (cat #{outputFilename} && echo ERROR: Unexpected exit code: `cat #{plan.failFile}`) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts "elif test -e #{Shellwords.shellescape(expectedFilename)}"
outp.puts "then"
outp.puts " diff -u #{Shellwords.shellescape(expectedFilename)} #{outputFilename} > #{diffFilename}"
outp.puts " if [ $? -eq 0 ]"
outp.puts " then"
outp.puts " " + plan.successCommand
outp.puts " else"
outp.puts " (echo \"DIFF FAILURE!\" && cat #{diffFilename}) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts " fi"
outp.puts "else"
outp.puts " (echo \"NO EXPECTATION!\" && cat #{outputFilename}) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts "fi"
}
end
# Error handler for tests that report error by saying "failed!". This is used by Mozilla
# tests.
def mozillaErrorHandler
Proc.new {
| outp, plan |
outputFilename = Shellwords.shellescape(($outputDir + (plan.name + ".out")).to_s)
outp.puts "if test -e #{plan.failFile}"
outp.puts "then"
outp.puts " (cat #{outputFilename} && echo ERROR: Unexpected exit code: `cat #{plan.failFile}`) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts "elif ruby " + Shellwords.shellescape((HELPERS_PATH + "check-mozilla-failure").to_s) + " #{outputFilename}"
outp.puts "then"
outp.puts " (echo Detected failures: && cat #{outputFilename}) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts "else"
outp.puts " " + plan.successCommand
outp.puts "fi"
}
end
# Error handler for tests that report error by saying "failed!", and are expected to
# fail. This is used by Mozilla tests.
def mozillaFailErrorHandler
Proc.new {
| outp, plan |
outputFilename = Shellwords.shellescape(($outputDir + (plan.name + ".out")).to_s)
outp.puts "if test -e #{plan.failFile}"
outp.puts "then"
outp.puts " " + plan.successCommand
outp.puts "elif ruby " + Shellwords.shellescape((HELPERS_PATH + "check-mozilla-failure").to_s) + " #{outputFilename}"
outp.puts "then"
outp.puts " " + plan.successCommand
outp.puts "else"
outp.puts " (echo NOTICE: You made this test pass, but it was expected to fail) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts "fi"
}
end
# Error handler for tests that report error by saying "failed!", and are expected to have
# an exit code of 3.
def mozillaExit3ErrorHandler
Proc.new {
| outp, plan |
outputFilename = Shellwords.shellescape(($outputDir + (plan.name + ".out")).to_s)
outp.puts "if test -e #{plan.failFile}"
outp.puts "then"
outp.puts " if [ `cat #{plan.failFile}` -eq 3 ]"
outp.puts " then"
outp.puts " if ruby " + Shellwords.shellescape((HELPERS_PATH + "check-mozilla-failure").to_s) + " #{outputFilename}"
outp.puts " then"
outp.puts " (echo Detected failures: && cat #{outputFilename}) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts " else"
outp.puts " " + plan.successCommand
outp.puts " fi"
outp.puts " else"
outp.puts " (cat #{outputFilename} && echo ERROR: Unexpected exit code: `cat #{plan.failFile}`) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts " fi"
outp.puts "else"
outp.puts " (cat #{outputFilename} && echo ERROR: Test expected to fail, but returned successfully) | " + prefixCommand(plan.name)
outp.puts " " + plan.failCommand
outp.puts "fi"
}
end
$runCommandOptions = {}
class Plan
attr_reader :directory, :arguments, :name, :outputHandler, :errorHandler
attr_accessor :index
def initialize(directory, arguments, name, outputHandler, errorHandler)
@directory = directory.realpath
@arguments = arguments
@name = name
@outputHandler = outputHandler
@errorHandler = errorHandler
@isSlow = !!$runCommandOptions[:isSlow]
end
def shellCommand
"(cd #{Shellwords.shellescape(@directory.to_s)} && \"$@\" " + @arguments.map{
| v |
raise "Detected a non-string in #{inspect}" unless v.is_a? String
Shellwords.shellescape(v)
}.join(' ') + ")"
end
def reproScriptCommand
script = ""
IMPORTANT_ENVS.each {
| key |
if ENV[key]
script += "export #{key}=#{Shellwords.shellescape(ENV[key])}\n"
end
}
script += "#{shellCommand} || exit 1"
"echo #{Shellwords.shellescape(script)} > #{Shellwords.shellescape(($outputDir + @name).to_s)}"
end
def failCommand
"echo FAIL: #{Shellwords.shellescape(@name)} ; touch #{failFile} ; " + reproScriptCommand
end
def successCommand
if $progressMeter or $verbosity >= 2
"rm -f #{failFile} ; echo PASS: #{Shellwords.shellescape(@name)}"
else
"rm -f #{failFile}"
end
end
def failFile
"test_fail_#{@index}"
end
def writeRunScript(filename)
File.open(filename, "w") {
| outp |
outp.puts "echo Running #{Shellwords.shellescape(@name)}"
cmd = "(" + shellCommand + " || (echo $? > #{failFile})) 2>&1 "
cmd += @outputHandler.call(@name)
if $verbosity >= 3
outp.puts "echo #{Shellwords.shellescape(cmd)}"
end
outp.puts cmd
@errorHandler.call(outp, self)
}
end
end
$uniqueFilenameCounter = 0
def uniqueFilename(extension)
payloadDir = $outputDir + "_payload"
Dir.mkdir payloadDir unless payloadDir.directory?
result = payloadDir.realpath + "temp-#{$uniqueFilenameCounter}#{extension}"
$uniqueFilenameCounter += 1
result
end
def baseOutputName(kind)
"#{$collectionName}/#{$benchmark}.#{kind}"
end
def addRunCommand(kind, command, outputHandler, errorHandler)
plan = Plan.new($benchmarkDirectory, command, baseOutputName(kind), outputHandler, errorHandler)
if $numProcessors > 1 and $runCommandOptions[:isSlow]
$runlist.unshift plan
else
$runlist << plan
end
end
# Returns true if there were run commands found in the file ($benchmarkDirectory +
# $benchmark), in which case those run commands have already been executed. Otherwise
# returns false, in which case you're supposed to add your own run commands.
def parseRunCommands
didRun = false
File.open($benchmarkDirectory + $benchmark) {
| inp |
inp.each_line {
| line |
begin
doesMatch = line =~ /^\/\/@/
rescue Exception => e
# Apparently this happens in the case of some UTF8 stuff in some files, where
# Ruby tries to be strict and throw exceptions.
next
end
next unless doesMatch
eval $~.post_match
didRun = true
}
}
didRun
end
def slow!
$runCommandOptions[:isSlow] = true
end
def run(kind, *options)
addRunCommand(kind, [$jscPath.to_s] + options + [$benchmark.to_s], silentOutputHandler, simpleErrorHandler)
end
def runDefault
run("default")
end
def runNoLLInt
run("no-llint", "--useLLInt=false")
end
def runNoCJIT
run("no-cjit", "--enableConcurrentJIT=false")
end
def runDefaultFTL
run("default-ftl", "--useExperimentalFTL=true")
end
def runFTLNoCJIT
run("ftl-no-cjit", "--enableConcurrentJIT=false", "--useExperimentalFTL=true")
end
def runDFGEager
run("dfg-eager", *EAGER_OPTIONS)
end
def runDFGEagerNoCJIT
run("dfg-eager-no-cjit", "--enableConcurrentJIT=false", *EAGER_OPTIONS)
end
def runFTLEager
run("ftl-eager", "--useExperimentalFTL=true", *EAGER_OPTIONS)
end
def runFTLEagerNoCJIT
run("ftl-eager-no-cjit", "--useExperimentalFTL=true", "--enableConcurrentJIT=false", *EAGER_OPTIONS)
end
def runAlwaysTriggerCopyPhase
run("always-trigger-copy-phase", "--minHeapUtilization=2.0", "--minCopiedBlockUtilization=2.0")
end
def defaultRun
runDefault
runNoLLInt
runAlwaysTriggerCopyPhase
runNoCJIT
runDFGEager
runDFGEagerNoCJIT
if $enableFTL
runDefaultFTL
runFTLNoCJIT
runFTLEager
runFTLEagerNoCJIT
end
end
def defaultQuickRun
if $enableFTL
runDefaultFTL
runFTLNoCJIT
else
runDefault
runNoCJIT
end
end
def runProfiler
profilerOutput = uniqueFilename(".json")
if $canRunDisplayProfilerOutput
addRunCommand("profiler", ["ruby", (HELPERS_PATH + "profiler-test-helper").to_s, (SCRIPTS_PATH + "display-profiler-output").to_s, profilerOutput.to_s, $jscPath.to_s, "-p", profilerOutput.to_s, $benchmark.to_s], silentOutputHandler, simpleErrorHandler)
else
puts "Running simple version of #{$collectionName}/#{$benchmark} because some required Ruby features are unavailable."
run("profiler-simple", "-p", profilerOutput.to_s)
end
end
def runLayoutTest(kind, *options)
raise unless $benchmark.to_s =~ /\.js$/
testName = $~.pre_match
if kind
kind = "layout-" + kind
else
kind = "layout"
end
args =
[$jscPath.to_s] + options +
[(LAYOUTTESTS_PATH + "resources" + "standalone-pre.js").to_s,
$benchmark.to_s,
(LAYOUTTESTS_PATH + "resources" + "standalone-post.js").to_s]
addRunCommand(kind, args, noisyOutputHandler, diffErrorHandler(($benchmarkDirectory.dirname + "#{testName}-expected.txt").to_s))
end
def runLayoutTestDefault
runLayoutTest(nil)
end
def runLayoutTestNoLLInt
runLayoutTest("no-llint", "--useLLInt=false")
end
def runLayoutTestNoCJIT
runLayoutTest("no-cjit", "--enableConcurrentJIT=false")
end
def runLayoutTestDFGEagerNoCJIT
runLayoutTest("dfg-eager-no-cjit", "--enableConcurrentJIT=false", *EAGER_OPTIONS)
end
def defaultRunLayoutTest
runLayoutTestDefault
runLayoutTestNoLLInt
runLayoutTestNoCJIT
runLayoutTestDFGEagerNoCJIT
end
def runMozillaTest(kind, mode, extraFiles, *options)
if kind
kind = "mozilla-" + kind
else
kind = "mozilla"
end
args = [$jscPath.to_s] + options + extraFiles.map{|v| ($benchmarkDirectory + v).to_s} + [$benchmark.to_s]
case mode
when :normal
errorHandler = mozillaErrorHandler
when :negative
errorHandler = mozillaExit3ErrorHandler
when :fail
errorHandler = mozillaFailErrorHandler
when :skip
return
else
raise "Invalid mode: #{mode}"
end
addRunCommand(kind, args, noisyOutputHandler, errorHandler)
end
def runMozillaTestDefault(mode, *extraFiles)
runMozillaTest(nil, mode, extraFiles)
end
def runMozillaTestLLInt(mode, *extraFiles)
runMozillaTest("llint", mode, extraFiles, "--useJIT=false")
end
def runMozillaTestBaselineJIT(mode, *extraFiles)
runMozillaTest("baseline", mode, extraFiles, "--useLLInt=false", "--useDFGJIT=false")
end
def runMozillaTestDFGEagerNoCJIT(mode, *extraFiles)
runMozillaTest("dfg-eager-no-cjit", mode, extraFiles, "--enableConcurrentJIT=false", *EAGER_OPTIONS)
end
def defaultRunMozillaTest(mode, *extraFiles)
runMozillaTestDefault(mode, *extraFiles)
runMozillaTestLLInt(mode, *extraFiles)
runMozillaTestBaselineJIT(mode, *extraFiles)
runMozillaTestDFGEagerNoCJIT(mode, *extraFiles)
end
def skip
puts "Skipping #{$collectionName}/#{$benchmark}"
end
Dir.mkdir($outputDir) unless $outputDir.directory?
begin
File.delete($outputDir + "failed")
rescue
end
$outputDir = $outputDir.realpath
def allJSFiles(path)
if path.file?
[path]
else
result = []
Dir.foreach(path) {
| filename |
next unless filename =~ /\.js$/
next unless (path + filename).file?
result << path + filename
}
result
end
end
def uniqueifyName(names, name)
result = name.to_s
toAdd = 1
while names[result]
result = "#{name}-#{toAdd}"
toAdd += 1
end
names[result] = true
result
end
def simplifyCollectionName(collectionPath)
outerDir = collectionPath.dirname
name = collectionPath.basename
lastName = name
if collectionPath.directory?
while lastName.to_s =~ /test/
lastName = outerDir.basename
name = lastName + name
outerDir = outerDir.dirname
end
end
uniqueifyName($collectionNames, name)
end
def prepareCollection(name)
dir = $outputDir
Pathname.new(name).each_filename {
| filename |
dir = dir + filename
Dir.mkdir(dir) unless dir.directory?
}
end
$collectionNames = {}
def handleCollectionFile(collection)
collectionName = simplifyCollectionName(collection)
paths = {}
subCollections = []
YAML::load(IO::read(collection)).each {
| entry |
if entry["collection"]
subCollections << entry["collection"]
next
end
if Pathname.new(entry["path"]).absolute?
raise "Absolute path: " + entry["path"] + " in #{collection}"
end
if paths[entry["path"]]
raise "Duplicate path: " + entry["path"] + " in #{collection}"
end
subCollection = collection.dirname + entry["path"]
if subCollection.file?
subCollectionName = Pathname.new(entry["path"]).dirname
else
subCollectionName = entry["path"]
end
$collection = subCollection
$collectionName = Pathname.new(collectionName)
Pathname.new(subCollectionName).each_filename {
| filename |
next if filename =~ /^\./
$collectionName += filename
}
$collectionName = $collectionName.to_s
prepareCollection($collectionName)
allJSFiles(subCollection).each {
| path |
path = path.realpath
$benchmark = path.basename
$benchmarkDirectory = path.dirname
$runCommandOptions = {}
eval entry["cmd"]
}
}
subCollections.each {
| subCollection |
handleCollection(collection.dirname + subCollection)
}
end
def handleCollectionDirectory(collection)
collectionName = simplifyCollectionName(collection)
prepareCollection(collectionName)
$collection = collection
$collectionName = collectionName
$benchmarkDirectory = $collection
allJSFiles($collection).each {
| path |
$benchmark = path.basename
$runCommandOptions = {}
defaultRun unless parseRunCommands
}
end
def handleCollection(collection)
collection = Pathname.new(collection)
if collection.file?
handleCollectionFile(collection)
else
handleCollectionDirectory(collection)
end
end
ARGV.each {
| collection |
handleCollection(collection)
}
def appendFailure(plan)
File.open($outputDir + "failed", "a") {
| outp |
outp.puts plan.name
}
$numFailures += 1
end
if $enableFTL and ENV["JSC_timeout"]
# Currently, using the FTL is a performance regression particularly in real
# (i.e. non-loopy) benchmarks. Account for this in the timeout.
ENV["JSC_timeout"] = (ENV["JSC_timeout"].to_i * 2).to_s
end
if ENV["JSC_timeout"]
# In the worst case, the processors just interfere with each other.
# Increase the timeout proportionally to the number of processors.
ENV["JSC_timeout"] = (ENV["JSC_timeout"].to_i.to_f * Math.sqrt($numProcessors)).to_i.to_s
end
# The goals of our parallel test runner are scalability and simplicity. The
# simplicity part is particularly important. We don't want to have to have
# a full-time contributor just philosophising about parallel testing.
#
# As such, we just pass off all of the hard work to 'make'. This creates a
# dummy directory ("$outputDir/.parallel") in which we create a dummy
# Makefile. The Makefile has an 'all' rule that depends on all of the tests.
# That is, for each test we know we will run, there is a rule in the
# Makefile and 'all' depends on it. Running 'make -j <whatever>' on this
# Makefile results in 'make' doing all of the hard work:
#
# - Load balancing just works. Most systems have a great load balancer in
# 'make'. If your system doesn't then just install a real 'make'.
#
# - Interruptions just work. For example Ctrl-C handling in 'make' is
# exactly right. You don't have to worry about zombie processes.
#
# We then do some tricks to make failure detection work and to make this
# totally sound. If a test fails, we don't want the whole 'make' job to
# stop. We also don't have any facility for makefile-escaping of path names.
# We do have such a thing for shell-escaping, though. We fix both problems
# by having the actual work for each of the test rules be done in a shell
# script on the side. There is one such script per test. The script responds
# to failure by printing something on the console and then touching a
# failure file for that test, but then still returns 0. This makes 'make'
# continue past that failure and complete all the tests anyway.
#
# In the end, this script collects all of the failures by searching for
# files in the .parallel directory whose name matches /^test_fail_/, where
# the thing after the 'fail_' is the test index. Those are the files that
# would be created by the test scripts if they detect failure. We're
# basically using the filesystem as a concurrent database of test failures.
# Even if two tests fail at the same time, since they're touching different
# files we won't miss any failures.
runIndices = []
$runlist.each_with_index {
| plan, index |
runIndices << index
plan.index = index
}
parallelDir = $outputDir + ".parallel"
Dir.mkdir(parallelDir) unless parallelDir.directory?
toDelete = []
Dir.foreach(parallelDir) {
| filename |
if filename =~ /^test_/
toDelete << filename
end
}
toDelete.each {
| filename |
File.unlink(parallelDir + filename)
}
puts
$runlist.each {
| plan |
plan.writeRunScript(parallelDir + "test_script_#{plan.index}")
}
File.open(parallelDir + "Makefile", "w") {
| outp |
outp.puts("all: " + runIndices.map{|v| "test_done_#{v}"}.join(' '))
runIndices.each {
| index |
plan = $runlist[index]
outp.puts "test_done_#{index}:"
outp.puts "\tsh test_script_#{plan.index}"
}
}
Dir.chdir(parallelDir) {
unless $progressMeter
mysys("make", "-j", $numProcessors.to_s, "-s", "-f", "Makefile")
else
cmd = "make -j #{$numProcessors} -s -f Makefile"
running = {}
didRun = {}
didFail = {}
blankLine = true
prevStringLength = 0
IO.popen(cmd, "r") {
| inp |
inp.each_line {
| line |
line.chomp!
if line =~ /^Running /
running[$~.post_match] = true
elsif line =~ /^PASS: /
didRun[$~.post_match] = true
elsif line =~ /^FAIL: /
didRun[$~.post_match] = true
didFail[$~.post_match] = true
else
unless blankLine
print("\r" + " " * prevStringLength + "\r")
end
puts line
blankLine = true
end
def lpad(str, chars)
str = str.to_s
if str.length > chars
str
else
"%#{chars}s"%(str)
end
end
string = ""
string += "\r#{lpad(didRun.size, $runlist.size.to_s.size)}/#{$runlist.size}"
unless didFail.empty?
string += " (failed #{didFail.size})"
end
string += " "
(running.size - didRun.size).times {
string += "."
}
if string.length < prevStringLength
print string
print(" " * (prevStringLength - string.length))
end
print string
prevStringLength = string.length
blankLine = false
$stdout.flush
}
}
puts
raise "Failed to run #{cmd}: #{$?.inspect}" unless $?.success?
end
}
# Delete empty .out files to make life less confusing.
$runlist.each {
| plan |
outputFilename = $outputDir + (plan.name + ".out")
File.unlink outputFilename if FileTest.size(outputFilename) == 0
}
Dir.foreach(parallelDir) {
| filename |
next unless filename =~ /test_fail_/
appendFailure($runlist[$~.post_match.to_i])
}