| #!/usr/bin/env ruby |
| |
| # Copyright (C) 2013-2021 Apple Inc. All rights reserved. |
| # |
| # Redistribution and use in source and binary forms, with or without |
| # modification, are permitted provided that the following conditions |
| # are met: |
| # |
| # 1. Redistributions of source code must retain the above copyright |
| # notice, this list of conditions and the following disclaimer. |
| # 2. Redistributions in binary form must reproduce the above copyright |
| # notice, this list of conditions and the following disclaimer in the |
| # documentation and/or other materials provided with the distribution. |
| # |
| # THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
| # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| # DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
| # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| require 'fileutils' |
| require 'getoptlong' |
| require 'ostruct' |
| require 'pathname' |
| require 'rbconfig' |
| require 'set' |
| require 'tempfile' |
| require 'uri' |
| require 'yaml' |
| |
| require_relative "webkitruby/jsc-stress-test/test-result-evaluator" |
| require_relative "webkitruby/jsc-stress-test/executor" |
| |
| module URI |
| class SSH < Generic |
| DEFAULT_PORT = 22 |
| end |
| unless defined?(self.register_scheme) |
| def self.register_scheme(scheme, klass) |
| @@schemes[scheme] = klass |
| end |
| end |
| register_scheme 'SSH', SSH |
| end |
| |
| class String |
| def scrub |
| encode("UTF-16be", :invalid=>:replace, :replace=>"?").encode('UTF-8') |
| end |
| end |
| |
| Signal.trap("TERM") { |
| # Occasionally, the jscore-test step fails to produce any output for 10' at |
| # which point it gets killed by the buildbot code. Try to diagnose. |
| puts("Received SIGTERM") |
| puts(Thread.current.backtrace) |
| } |
| |
| RemoteHost = Struct.new(:name, :user, :host, :port, :remoteDirectory, :identity_file_path) |
| |
| THIS_SCRIPT_PATH = Pathname.new(__FILE__).realpath |
| SCRIPTS_PATH = THIS_SCRIPT_PATH.dirname |
| WEBKIT_PATH = SCRIPTS_PATH.dirname.dirname |
| LAYOUTTESTS_PATH = WEBKIT_PATH + "LayoutTests" |
| WASMTESTS_PATH = WEBKIT_PATH + "JSTests/wasm" |
| JETSTREAM2_PATH = WEBKIT_PATH + "PerformanceTests/JetStream2" |
| CHAKRATESTS_PATH = WEBKIT_PATH + "JSTests/ChakraCore/test" |
| raise unless SCRIPTS_PATH.basename.to_s == "Scripts" |
| raise unless SCRIPTS_PATH.dirname.basename.to_s == "Tools" |
| |
| HELPERS_PATH = SCRIPTS_PATH + "jsc-stress-test-helpers" |
| STATUS_FILE_PREFIX = "test_status_" |
| STATUS_FILE_PASS = "P" |
| STATUS_FILE_FAIL = "F" |
| |
| # There are all random and adequately large to be unlikely to appear |
| # in practice, except as used in this file. |
| PARALLEL_REMOTE_WRAPPER_MARK_BEGIN = "5d65329bd1a3" |
| PARALLEL_REMOTE_WRAPPER_MARK_END = "a9aea5c3b843" |
| PARALLEL_REMOTE_STATE_LOST_MARKER = "709fb7a77c45231918eb118a" |
| ITERATION_LIMITS = OpenStruct.new(:infraIterationsFloor => 3, |
| :iterationsCeiling => 50) |
| |
| REMOTE_TIMEOUT = 120 |
| SSH_OPTIONS_DEFAULT = [ |
| "-o", |
| "NoHostAuthenticationForLocalhost=yes", |
| "-o", |
| "ServerAliveInterval=30", |
| ] |
| |
| begin |
| require 'shellwords' |
| rescue Exception => e |
| $stderr.puts "Warning: did not find shellwords, not running any tests." |
| exit 0 |
| end |
| |
| $canRunDisplayProfilerOutput = false |
| |
| begin |
| require 'rubygems' |
| require 'json' |
| require 'highline' |
| $canRunDisplayProfilerOutput = true |
| rescue Exception => e |
| $stderr.puts "Warning: did not find json or highline; some features will be disabled." |
| $stderr.puts "Run \"sudo gem install json highline\" to fix the issue." |
| $stderr.puts "Error: #{e.inspect}" |
| end |
| |
| def printCommandArray(*cmd) |
| begin |
| commandArray = cmd.each{|value| Shellwords.shellescape(value.to_s)}.join(' ') |
| rescue |
| commandArray = cmd.join(' ') |
| end |
| $stderr.puts ">> #{commandArray}" |
| end |
| |
| class CommandExecutionFailed < Exception |
| end |
| |
| def mysys(commandArray, options={}) |
| printCommandArray(commandArray) if $verbosity >= 1 |
| successful = system(*commandArray) |
| status = $? |
| if successful or options[:ignoreFailure] |
| return |
| end |
| raise CommandExecutionFailed, "Command failed: #{status.inspect}" |
| end |
| |
| def escapeAll(array) |
| array.map { |
| | v | |
| raise "Detected a non-string in #{inspect}" unless v.is_a? String |
| Shellwords.shellescape(v) |
| }.join(' ') |
| end |
| |
| $jscPath = nil |
| $doNotMessWithVMPath = false |
| $jitTests = true |
| $memoryLimited = false |
| $outputDir = Pathname.new("results") |
| $verbosity = 0 |
| $bundle = nil |
| $tarball = false |
| $tarFileName = "payload.tar.gz" |
| $copyVM = false |
| $testRunnerType = nil |
| $testWriter = "default" |
| $remoteHosts = [] |
| $architecture = nil |
| $forceArchitecture = nil |
| $hostOS = nil |
| $model = nil |
| $filter = nil |
| $envVars = [] |
| $mode = "full" |
| $buildType = "release" |
| $forceCollectContinuously = false |
| $reportExecutionTime = false |
| $ldd = nil |
| $artifact_exec_wrapper = nil |
| $numChildProcessesSetByUser = false |
| $runUniqueId = Random.new.bytes(16).unpack("H*")[0] |
| $gnuParallelChunkSize = 1 |
| $testsDebugStream = nil # set to $stderr for debugging |
| |
| def putd(s) |
| $testsDebugStream.puts(s) unless $testsDebugStream.nil? |
| end |
| |
| def usage |
| puts "run-jsc-stress-tests -j <shell path> <collections path> [<collections path> ...]" |
| puts |
| puts "--jsc (-j) Path to JavaScriptCore build product. This option is required." |
| puts "--no-copy Do not copy the JavaScriptCore build product before testing." |
| puts " --jsc specifies an already present JavaScriptCore to test." |
| puts "--memory-limited Indicate that we are targeting the test for a memory limited device." |
| puts " Skip tests tagged with //@skip if $memoryLimited" |
| puts "--no-jit Do not run JIT specific tests." |
| puts "--force-collectContinuously Enable the collectContinuously mode even if disabled on this" |
| puts " platform." |
| puts "--output-dir (-o) Path where to put results. Default is #{$outputDir}." |
| puts "--verbose (-v) Print more things while running." |
| puts "--run-bundle Runs a bundle previously created by run-jsc-stress-tests." |
| puts "--tarball [fileName] Creates a tarball of the final bundle. Use name if supplied for tar file." |
| puts "--arch Specify architecture instead of determining from JavaScriptCore build." |
| puts "--force-architecture Override the architecture to run tests with." |
| puts " e.g. x86, x86_64, arm." |
| puts "--ldd Use alternate ldd" |
| puts "--artifact-exec-wrapper Wrapper for executing a build artifact" |
| puts "--os Specify os instead of determining from JavaScriptCore build." |
| puts " e.g. darwin, linux & windows." |
| puts "--shell-runner Uses the shell-based test runner instead of the default make-based runner." |
| puts " In general the shell runner is slower than the make runner." |
| puts "--make-runner Uses the faster make-based runner." |
| puts "--ruby-runner Uses the ruby runner for machines without unix shell or make." |
| puts "--test-writer [writer] Specifies the test script format." |
| puts " default is to use shell scripts to run the tests" |
| puts " \"ruby\" to use ruby scripts for systems without a unix shell." |
| puts "--remote Specify a remote host on which to run tests from command line argument." |
| puts "--remote-config-file Specify a remote host on which to run tests from JSON file." |
| puts "--report-execution-time Print execution time for each test." |
| puts "--child-processes (-c) Specify the number of child processes." |
| puts "--filter Only run tests whose name matches the given regular expression." |
| puts "--help (-h) Print this message." |
| puts "--env-vars Add a list of environment variables to set before running jsc." |
| puts " Each environment variable should be separated by a space." |
| puts " e.g. \"foo=bar x=y\" (no quotes). Note, if you pass DYLD_FRAMEWORK_PATH" |
| puts " it will override the default value." |
| puts "--quick (-q) Only run with the default and no-cjit-validate modes." |
| puts "--basic Run with default and these additional modes: no-llint," |
| puts " no-cjit-validate-phases, no-cjit-collect-continuously, dfg-eager" |
| puts " and for FTL platforms: no-ftl, ftl-eager-no-cjit and" |
| puts " ftl-no-cjit-small-pool." |
| exit 1 |
| end |
| |
| jscArg = nil |
| |
| GetoptLong.new(['--help', '-h', GetoptLong::NO_ARGUMENT], |
| ['--jsc', '-j', GetoptLong::REQUIRED_ARGUMENT], |
| ['--no-copy', GetoptLong::NO_ARGUMENT], |
| ['--memory-limited', GetoptLong::NO_ARGUMENT], |
| ['--no-jit', GetoptLong::NO_ARGUMENT], |
| ['--force-collectContinuously', GetoptLong::NO_ARGUMENT], |
| ['--output-dir', '-o', GetoptLong::REQUIRED_ARGUMENT], |
| ['--run-bundle', GetoptLong::REQUIRED_ARGUMENT], |
| ['--tarball', GetoptLong::OPTIONAL_ARGUMENT], |
| ['--force-vm-copy', GetoptLong::NO_ARGUMENT], |
| ['--arch', GetoptLong::REQUIRED_ARGUMENT], |
| ['--force-architecture', GetoptLong::REQUIRED_ARGUMENT], |
| ['--ldd', GetoptLong::REQUIRED_ARGUMENT], |
| ['--artifact-exec-wrapper', GetoptLong::REQUIRED_ARGUMENT], |
| ['--os', GetoptLong::REQUIRED_ARGUMENT], |
| ['--shell-runner', GetoptLong::NO_ARGUMENT], |
| ['--make-runner', GetoptLong::NO_ARGUMENT], |
| ['--ruby-runner', GetoptLong::NO_ARGUMENT], |
| ['--gnu-parallel-runner', GetoptLong::NO_ARGUMENT], |
| ['--gnu-parallel-chunk-size', GetoptLong::REQUIRED_ARGUMENT], |
| ['--test-writer', GetoptLong::REQUIRED_ARGUMENT], |
| ['--treat-failing-as-flaky', GetoptLong::REQUIRED_ARGUMENT], |
| ['--remote', GetoptLong::REQUIRED_ARGUMENT], |
| ['--remote-config-file', GetoptLong::REQUIRED_ARGUMENT], |
| ['--report-execution-time', GetoptLong::NO_ARGUMENT], |
| ['--model', GetoptLong::REQUIRED_ARGUMENT], |
| ['--child-processes', '-c', GetoptLong::REQUIRED_ARGUMENT], |
| ['--filter', GetoptLong::REQUIRED_ARGUMENT], |
| ['--verbose', '-v', GetoptLong::NO_ARGUMENT], |
| ['--env-vars', GetoptLong::REQUIRED_ARGUMENT], |
| ['--debug', GetoptLong::NO_ARGUMENT], |
| ['--release', GetoptLong::NO_ARGUMENT], |
| ['--quick', '-q', GetoptLong::NO_ARGUMENT], |
| ['--basic', GetoptLong::NO_ARGUMENT]).each { |
| | opt, arg | |
| case opt |
| when '--help' |
| usage |
| when '--jsc' |
| jscArg = arg |
| when '--no-copy' |
| $doNotMessWithVMPath = true |
| when '--output-dir' |
| $outputDir = Pathname.new(arg) |
| when '--memory-limited' |
| $memoryLimited = true |
| when '--no-jit' |
| $jitTests = false |
| when '--force-collectContinuously' |
| $forceCollectContinuously = true; |
| when '--verbose' |
| $verbosity += 1 |
| when '--run-bundle' |
| $bundle = Pathname.new(arg) |
| when '--tarball' |
| $tarball = true |
| $copyVM = true |
| $tarFileName = arg unless arg == '' |
| when '--force-vm-copy' |
| $copyVM = true |
| when '--shell-runner' |
| $testRunnerType = :shell |
| when '--make-runner' |
| $testRunnerType = :make |
| when '--ruby-runner' |
| $testRunnerType = :ruby |
| when '--gnu-parallel-runner' |
| $testRunnerType = :gnuparallel |
| when '--gnu-parallel-chunk-size' |
| $gnuParallelChunkSize = arg.to_i |
| when '--test-writer' |
| $testWriter = arg |
| when '--treat-failing-as-flaky' |
| md = /^([^,]+),(\d+),(\d+)$/.match(arg) |
| if md.nil? |
| $stderr.puts("Could not parse argument to `--treat-failing-as-flaky`; expected `passPercentage,maxTries,maxFailing`") |
| exit(1) |
| end |
| passPercentage = md[1].to_f |
| if passPercentage.zero? |
| $stderr.puts("Invalid passPercentage `#{md[1]}`") |
| exit(1) |
| end |
| maxTries = md[2].to_i |
| if maxTries == 0 |
| $stderr.puts("Invalid maxTries `#{md[2]}`") |
| exit(1) |
| end |
| maxFailing = md[3].to_i |
| if maxFailing == 0 |
| $stderr.puts("Invalid maxFailing `#{md[3]}`") |
| exit(1) |
| end |
| $treatFailingAsFlaky = OpenStruct.new(:passPercentage => passPercentage, |
| :maxTries => maxTries, |
| :maxFailing => maxFailing) |
| when '--remote' |
| $copyVM = true |
| $tarball = true |
| $remote = true |
| uri = URI("ssh://" + arg) |
| $remoteHosts << RemoteHost.new("default-#{$remoteHosts.length}", uri.user, uri.host, uri.port) |
| when '--remote-config-file' |
| $remoteConfigFile = arg |
| when '--report-execution-time' |
| $reportExecutionTime = true |
| when '--child-processes' |
| $numChildProcesses = arg.to_i |
| $numChildProcessesSetByUser = true |
| when '--filter' |
| $filter = Regexp.new(arg) |
| when '--arch' |
| $architecture = arg |
| when '--force-architecture' |
| $architecture = arg unless $architecture |
| $forceArchitecture = arg |
| when '--ldd' |
| $ldd = arg |
| when '--artifact-exec-wrapper' |
| $artifact_exec_wrapper = arg |
| when '--os' |
| $hostOS = arg |
| when '--model' |
| $model = arg.gsub(/\A['"]+|['"]+\Z/, '') |
| when '--env-vars' |
| $envVars = arg.gsub(/\s+/, ' ').split(' ') |
| when '--quick' |
| $mode = "quick" |
| when '--basic' |
| $mode = "basic" |
| when '--debug' |
| $buildType = "debug" |
| when '--release' |
| $buildType = "release" |
| end |
| } |
| |
| if $remoteConfigFile |
| file = File.read($remoteConfigFile) |
| config = JSON.parse(file) |
| |
| # old style config allowing for only one remote |
| if !$remote and config['remote'] |
| $copyVM = true |
| $tarball = true |
| $remote = true |
| uri = URI("ssh://" + config['remote']) |
| $remoteHosts = [ RemoteHost.new("default", uri.user, uri.host, uri.port) ] |
| if config['remoteDirectory'] |
| $remoteHosts[0].remoteDirectory = config['remoteDirectory'] |
| end |
| if config['idFilePath'] |
| $remoteHosts[0].identity_file_path = config['idFilePath'] |
| end |
| end |
| |
| # we can combine --remote and a new style config |
| if config['remotes'] |
| $copyVM = true |
| $tarball = true |
| $remote = true |
| $remoteHosts += config['remotes'].map { |
| | remote | |
| uri = URI("ssh://" + remote['address']) |
| |
| host = RemoteHost.new(remote['name'], uri.user, uri.host, uri.port) |
| if remote['remoteDirectory'] |
| host.remoteDirectory = remote['remoteDirectory'] |
| end |
| if remote['idFilePath'] |
| host.identity_file_path = remote['idFilePath'] |
| print('Using identity file: ' + host.identity_file_path + "\r") |
| end |
| host |
| } |
| end |
| end |
| |
| unless jscArg |
| # If we're not provided a JSC path, try to come up with a sensible JSC path automagically. |
| command = SCRIPTS_PATH.join("webkit-build-directory").to_s |
| command += ($buildType == "release") ? " --release" : " --debug" |
| command += " --executablePath" |
| |
| output = `#{command}`.split("\n") |
| if !output.length |
| $stderr.puts "Error: must specify --jsc <path>" |
| exit 1 |
| end |
| |
| output = output[0] |
| jscArg = Pathname.new(output).join("jsc") |
| jscArg = Pathname.new(output).join("JavaScriptCore.framework", "Helpers", "jsc") if !File.file?(jscArg) |
| jscArg = Pathname.new(output).join("bin", "jsc") if !File.file?(jscArg) # Support CMake build. |
| if !File.file?(jscArg) |
| $stderr.puts "Error: must specify --jsc <path>" |
| exit 1 |
| end |
| |
| puts "Using the following jsc path: #{jscArg}" |
| end |
| |
| if $doNotMessWithVMPath |
| $jscPath = Pathname.new(jscArg) |
| else |
| $jscPath = Pathname.new(jscArg).realpath |
| end |
| |
| $progressMeter = ($verbosity == 0 and $stdout.tty? and $remoteHosts.length <= 1) |
| |
| if $bundle |
| $jscPath = $bundle + ".vm" + "JavaScriptCore.framework" + "Helpers" + "jsc" |
| $outputDir = $bundle |
| end |
| |
| # Try to determine architecture. Return nil on failure. |
| def machOArchitectureCode |
| begin |
| otoolLines = `otool -afh #{Shellwords.shellescape($jscPath.to_s)}`.split("\n") |
| otoolLines.each_with_index { |
| | value, index | |
| if value =~ /magic/ and value =~ /cputype/ |
| return otoolLines[index + 1].split[1].to_i |
| end |
| } |
| rescue |
| $stderr.puts "Warning: unable to execute otool." |
| end |
| $stderr.puts "Warning: unable to determine architecture." |
| nil |
| end |
| |
| def determineArchitectureFromMachOBinary |
| code = machOArchitectureCode |
| return nil unless code |
| is64BitFlag = 0x01000000 |
| case code |
| when 7 |
| "x86" |
| when 7 | is64BitFlag |
| "x86-64" |
| when 12 |
| "arm" |
| when 12 | is64BitFlag |
| "arm64" |
| else |
| $stderr.puts "Warning: unable to determine architecture from code: #{code}" |
| nil |
| end |
| end |
| |
| def determineArchitectureFromELFBinary |
| f = File.open($jscPath.to_s) |
| data = f.read(20) |
| |
| if !(data[0,4] == "\x7F\x45\x4C\x46") |
| $stderr.puts "Warning: Missing ELF magic in file #{Shellwords.shellescape($jscPath.to_s)}" |
| return nil |
| end |
| |
| # MIPS and PowerPC may be either big- or little-endian. S390 (which includes |
| # S390x) is big-endian. The rest are little-endian. |
| # For RISC-V, to avoid encoding problems, construct the comparison string |
| # by packing a char array matching the ELF's RISC-V machine value. |
| code = data[18, 20] |
| case code |
| when "\x03\0" |
| "x86" |
| when "\x08\0" |
| "mips" |
| when "\0\x08" |
| "mips" |
| when "\x14\0" |
| "powerpc" |
| when "\0\x14" |
| "powerpc" |
| when "\x15\0" |
| "powerpc64" |
| when "\0\x15" |
| "powerpc64" |
| when "\0\x16" |
| "s390" |
| when "\x28\0" |
| "arm" |
| when "\x3E\0" |
| "x86-64" |
| when "\xB7\0" |
| "arm64" |
| when [243, 0].pack("cc") |
| "riscv64" |
| else |
| $stderr.puts "Warning: unable to determine architecture from code: #{code}" |
| nil |
| end |
| end |
| |
| def determineArchitectureFromPEBinary |
| f = File.open($jscPath.to_s) |
| data = f.read(1024) |
| |
| if !(data[0, 2] == "MZ") |
| $stderr.puts "Warning: Missing PE magic in file #{Shellwords.shellescape($jscPath.to_s)}" |
| return nil |
| end |
| |
| peHeaderAddr = data[0x3c, 4].unpack('V').first # 32-bit unsigned int little endian |
| |
| if !(data[peHeaderAddr, 4] == "PE\0\0") |
| $stderr.puts "Warning: Incorrect PE header in file #{Shellwords.shellescape($jscPath.to_s)}" |
| return nil |
| end |
| |
| machine = data[peHeaderAddr + 4, 2].unpack('v').first # 16-bit unsigned short, little endian |
| |
| case machine |
| when 0x014c |
| "x86" |
| when 0x8664 |
| "x86-64" |
| else |
| $stderr.puts "Warning: unsupported machine type: #{machine}" |
| nil |
| end |
| end |
| |
| def determineArchitecture |
| case $hostOS |
| when "darwin" |
| determineArchitectureFromMachOBinary |
| when "linux" |
| determineArchitectureFromELFBinary |
| when "windows" |
| determineArchitectureFromPEBinary |
| when "playstation" |
| "x86-64" |
| else |
| $stderr.puts "Warning: unable to determine architecture on this platform." |
| nil |
| end |
| end |
| |
| def determineOS |
| case RbConfig::CONFIG["host_os"] |
| when /darwin/i |
| "darwin" |
| when /linux/i |
| "linux" |
| when /mswin|mingw|cygwin/ |
| "windows" |
| else |
| $stderr.puts "Warning: unable to determine host operating system" |
| nil |
| end |
| end |
| |
| $hostOS = determineOS unless $hostOS |
| $architecture = determineArchitecture unless $architecture |
| $isFTLPlatform = !($architecture == "x86" || $architecture == "arm" || $architecture == "mips" || $architecture == "riscv64" || $hostOS == "windows" || $hostOS == "playstation") |
| # Special case armv7, we want to run the wasm tests temporarily without B3/Air support |
| $isWasmPlatform = $isFTLPlatform || $architecture == "arm" |
| |
| if $architecture == "x86" |
| # The JIT is temporarily disabled on this platform since |
| # https://trac.webkit.org/changeset/237547 |
| $jitTests = false |
| end |
| |
| def isFTLEnabled |
| $jitTests && $isFTLPlatform |
| end |
| |
| if !$testRunnerType |
| if $remote and $hostOS == "darwin" |
| $testRunnerType = :shell |
| else |
| $testRunnerType = :make |
| end |
| end |
| |
| if $remoteHosts.length > 1 and ($testRunnerType != :make) and ($testRunnerType != :gnuparallel) |
| raise "Multiple remote hosts only supported with the make or gnu-parallel runners" |
| end |
| |
| if $hostOS == "playstation" && $testWriter == "default" |
| $testWriter = "playstation" |
| end |
| |
| if $testWriter |
| if /[^-a-zA-Z0-9_]/.match($testWriter) |
| raise "Invalid test writer #{$testWriter} given" |
| end |
| end |
| |
| # We force all tests to use a smaller (1.5M) stack so that stack overflow tests can run faster. |
| BASE_OPTIONS = ["--useFTLJIT=false", "--useFunctionDotArguments=true", "--validateExceptionChecks=true", "--useDollarVM=true", "--maxPerThreadStackUsage=1572864"] |
| EAGER_OPTIONS = ["--thresholdForJITAfterWarmUp=10", "--thresholdForJITSoon=10", "--thresholdForOptimizeAfterWarmUp=20", "--thresholdForOptimizeAfterLongWarmUp=20", "--thresholdForOptimizeSoon=20", "--thresholdForFTLOptimizeAfterWarmUp=20", "--thresholdForFTLOptimizeSoon=20", "--thresholdForOMGOptimizeAfterWarmUp=20", "--thresholdForOMGOptimizeSoon=20", "--maximumEvalCacheableSourceLength=150000", "--useEagerCodeBlockJettisonTiming=true", "--repatchBufferingCountdown=0"] |
| # NOTE: Tests rely on this using scribbleFreeCells. |
| NO_CJIT_OPTIONS = ["--useConcurrentJIT=false", "--thresholdForJITAfterWarmUp=100", "--scribbleFreeCells=true"] |
| B3O1_OPTIONS = ["--defaultB3OptLevel=1", "--useDataICInFTL=1", "--forceUnlinkedDFG=1"] |
| B3O0_OPTIONS = ["--maxDFGNodesInBasicBlockForPreciseAnalysis=100", "--defaultB3OptLevel=0"] |
| FTL_OPTIONS = ["--useFTLJIT=true"] |
| FORCE_LLINT_EXIT_OPTIONS = ["--forceOSRExitToLLInt=true"] |
| EXECUTABLE_FUZZER_OPTIONS = ["--useExecutableAllocationFuzz=true", "--fireExecutableAllocationFuzzRandomly=true"] |
| |
| class BasePlan |
| attr_reader :directory, :arguments, :family, :name, :outputHandler, :errorHandler, :additionalEnv, :index |
| attr_accessor :retryParameters |
| |
| @@index = 0 |
| def initialize(directory, arguments, family, name, outputHandler, errorHandler, retryParameters) |
| @directory = directory |
| @arguments = argumentsMapper(arguments) |
| @family = family |
| @name = name |
| @outputHandler = outputHandler |
| @errorHandler = errorHandler |
| # A plan for which @retryParameters is not nil is being |
| # treated as potentially flaky. |
| @retryParameters = retryParameters |
| @additionalEnv = [] |
| @index = @@index |
| @@index += 1 |
| end |
| def self.mock(family, name, retryParameters=nil) |
| self.new("/none", [], family, name, nil, nil, retryParameters) |
| end |
| def self.create(directory, arguments, family, name, outputHandler, errorHandler) |
| if $runCommandOptions[:crashOK] |
| outputHandler = noisyOutputHandler |
| end |
| self.new(directory, arguments, family, name, outputHandler, errorHandler, $runCommandOptions[:flaky]) |
| end |
| def argumentsMapper(args) |
| args |
| end |
| # We regularly place Plans in containers, but may modify @retryParameters |
| # after the fact; only hash on @index instead. |
| def hash |
| @index |
| end |
| def to_s |
| "#{@index}" |
| end |
| end |
| |
| class TestRunner |
| def initialize(testRunnerType, runnerDir) |
| @testRunnerType = testRunnerType |
| @runnerDir = runnerDir |
| end |
| def prepare(runlist, serialPlans, completedPlans, remoteHosts) |
| prepareScripts(runlist) |
| prepareRunner(runlist, serialPlans, completedPlans, remoteHosts) |
| end |
| def prepareScripts(runlist) |
| Dir.mkdir(@runnerDir) unless @runnerDir.directory? |
| toDelete = [] |
| Dir.foreach(@runnerDir) { |
| | filename | |
| if filename =~ /^test_/ |
| toDelete << filename |
| end |
| } |
| |
| toDelete.each { |
| | filename | |
| File.unlink(@runnerDir + filename) |
| } |
| |
| # Write test scripts in parallel as this is both an expensive and a |
| # highly IO intensive operation, but each script is independent and |
| # the operation is pure other than writing the unique run script. |
| parallelEach(runlist) do | plan | |
| plan.writeRunScript(@runnerDir + "test_script_#{plan.index}") |
| end |
| end |
| def self.create(testRunnerType, runnerDir) |
| cls = nil |
| case testRunnerType |
| when :shell |
| cls = TestRunnerShell |
| when :make |
| cls = TestRunnerMake |
| when :ruby |
| cls = TestRunnerRuby |
| when :gnuparallel |
| cls = TestRunnerGnuParallel |
| else |
| raise "Unknown test runner type: #{testRunnerType.to_s}" |
| end |
| return cls.new(testRunnerType, runnerDir) |
| end |
| end |
| |
| require_relative "webkitruby/jsc-stress-test-writer-#{$testWriter}" |
| |
| def shouldCollectContinuously? |
| $buildType == "release" or $forceCollectContinuously |
| end |
| |
| COLLECT_CONTINUOUSLY_OPTIONS = shouldCollectContinuously? ? ["--collectContinuously=true", "--useGenerationalGC=false", "--verifyGC=true"] : [] |
| |
| $serialPlans = Set.new |
| $runlist = [] |
| |
| def frameworkFromJSCPath(jscPath) |
| parentDirectory = jscPath.dirname |
| if (parentDirectory.basename.to_s == "Resources" or parentDirectory.basename.to_s == "Helpers") and parentDirectory.dirname.basename.to_s == "JavaScriptCore.framework" |
| parentDirectory.dirname |
| elsif $hostOS == "playstation" |
| jscPath.dirname |
| elsif parentDirectory.basename.to_s =~ /^Debug/ or parentDirectory.basename.to_s =~ /^Release/ |
| jscPath.dirname + "JavaScriptCore.framework" |
| else |
| $stderr.puts "Warning: cannot identify JSC framework, doing generic VM copy." |
| nil |
| end |
| end |
| |
| |
| # Frequently repeated path computations. Cache results for speed. |
| $bundleResourcePathCache = Hash.new do |h, key| |
| resourcePath, benchmarkDirectory, dir = key |
| benchmarkDirectory.each_filename { |
| | pathComponent | |
| dir = dir.parent |
| } |
| h[key] = dir + resourcePath |
| end |
| |
| def pathToBundleResourceFromBenchmarkDirectory(resourcePath) |
| $bundleResourcePathCache[[resourcePath, $benchmarkDirectory, Pathname.new(".")]] |
| end |
| |
| def pathToVM |
| pathToBundleResourceFromBenchmarkDirectory($jscPath) |
| end |
| |
| def vmCommand |
| cmd = [pathToVM.to_s] |
| if not $artifact_exec_wrapper.nil? |
| cmd.unshift($artifact_exec_wrapper) |
| end |
| if ($forceArchitecture) |
| cmd = ["/usr/bin/arch", "-" + $forceArchitecture] + cmd |
| end |
| return cmd |
| end |
| |
| def pathToHelpers |
| pathToBundleResourceFromBenchmarkDirectory(".helpers") |
| end |
| |
| $runCommandOptions = {} |
| $testSpecificRequiredOptions = [] |
| |
| $uniqueFilenameCounter = 0 |
| def uniqueFilename(extension) |
| payloadDir = $outputDir + "_payload" |
| Dir.mkdir payloadDir unless payloadDir.directory? |
| result = payloadDir.realpath + "temp-#{$uniqueFilenameCounter}#{extension}" |
| $uniqueFilenameCounter += 1 |
| result |
| end |
| |
| def baseOutputName(kind) |
| "#{$collectionName}/#{$benchmark}.#{kind}" |
| end |
| |
| def addRunCommandCfg(cfg, *additionalEnv) |
| [:kind, :command, :outputHandler, :errorHandler].each { |key| |
| if not cfg.has_key?(key) |
| raise "Missing #{key} in #{cfg}" |
| end |
| } |
| $didAddRunCommand = true |
| name = baseOutputName(cfg[:kind]) |
| if $filter and name !~ $filter |
| return |
| end |
| plan = Plan.create( |
| $benchmarkDirectory, cfg[:command], "#{$collectionName}/#{$benchmark}", name, cfg[:outputHandler], |
| cfg[:errorHandler]) |
| if cfg.has_key?(:additionalEnv) |
| plan.additionalEnv.push(*(cfg[:additionalEnv])) |
| end |
| if $runCommandOptions[:serial] |
| # Add this to the list of tests to be run on their own, so |
| # that we can treat them specially when scheduling, but keep |
| # it in the $runlist for code that dosn't care about |
| # scheduling. |
| $serialPlans.add(plan) |
| end |
| |
| if $numChildProcesses > 1 and $runCommandOptions[:isSlow] |
| $runlist.unshift plan |
| else |
| $runlist << plan |
| end |
| end |
| |
| def addRunCommand(kind, command, outputHandler, errorHandler, *additionalEnv) |
| cfg = { |
| :kind => kind, |
| :command => command, |
| :outputHandler => outputHandler, |
| :errorHandler => errorHandler, |
| :additionalEnv => additionalEnv, |
| } |
| addRunCommandCfg(cfg) |
| end |
| |
| # Returns true if there were run commands found in the file ($benchmarkDirectory + |
| # $benchmark), in which case those run commands have already been executed. Otherwise |
| # returns false, in which case you're supposed to add your own run commands. |
| def parseRunCommands |
| oldDidAddRunCommand = $didAddRunCommand |
| $didAddRunCommand = false |
| $skipped = false |
| |
| Dir.chdir($outputDir) { |
| File.open($benchmarkDirectory + $benchmark) { |
| | inp | |
| inp.each_line { |
| | line | |
| begin |
| doesMatch = line =~ /^\/\/@/ |
| rescue Exception => e |
| # Apparently this happens in the case of some UTF8 stuff in some files, where |
| # Ruby tries to be strict and throw exceptions. |
| next |
| end |
| next unless doesMatch |
| eval $~.post_match |
| if $skipped |
| break |
| end |
| } |
| } |
| } |
| |
| result = $didAddRunCommand |
| $didAddRunCommand = result or oldDidAddRunCommand |
| result |
| end |
| |
| def slow! |
| $runCommandOptions[:isSlow] = true |
| skip() if ($mode == "quick") |
| end |
| |
| def crashOK! |
| $testSpecificRequiredOptions += ["-s"] |
| $runCommandOptions[:crashOK] = true |
| end |
| |
| def serial! |
| $runCommandOptions[:serial] = true |
| end |
| |
| |
| # Retry parameters for tests that we treat as flaky, either because |
| # they've been explicitly marked so (via flaky!) or because we were |
| # asked to --treat-failing-as-flaky. |
| class RetryParameters |
| attr_reader :passPercentage, :maxTries |
| def initialize(passPercentage, maxTries) |
| @passPercentage = passPercentage |
| @maxTries = maxTries |
| end |
| def to_s |
| "RetryParameters(#{@passPercentage}, #{@maxTries})" |
| end |
| def result(statuses) |
| if statuses.length > @maxTries |
| # This could happen if (because of remotes disappearing |
| # and reappearing) there are multiple runs for the last |
| # try for a flaky test. Just ignore any extra statuses. |
| statuses = statuses.take(@maxTries) |
| end |
| successes = successfulStatuses(statuses) |
| remaining = @maxTries - statuses.length |
| requiredSuccesses = (@passPercentage * @maxTries).ceil |
| ret = nil # "has not completed yet" |
| if successes >= requiredSuccesses |
| ret = true |
| elsif (successes + remaining) < requiredSuccesses |
| ret = false |
| end |
| putd("#{self}.result(#{statuses}) => #{ret}") |
| ret |
| end |
| private |
| def successfulStatuses(statuses) |
| statuses.count { |
| |status| |
| status == STATUS_FILE_PASS |
| } |
| end |
| end |
| |
| def flaky!(passPercentage, maxTries) |
| $runCommandOptions[:flaky] = RetryParameters.new(passPercentage, maxTries) |
| end |
| |
| def requireOptions(*options) |
| $testSpecificRequiredOptions += options |
| end |
| |
| def runWithOptions(cfg, *options) |
| baseOptions = BASE_OPTIONS |
| if cfg.has_key?(:no_base_options) |
| baseOptions = [] |
| end |
| commandPrefix = cfg.fetch(:command_prefix, []) |
| if cfg.has_key?(:place_benchmark_early) |
| cfg[:command] = commandPrefix + vmCommand + [$benchmark.to_s] + baseOptions + options + $testSpecificRequiredOptions |
| else |
| cfg[:command] = commandPrefix + vmCommand + baseOptions + options + $testSpecificRequiredOptions + [$benchmark.to_s] |
| end |
| addRunCommandCfg(cfg) |
| end |
| |
| def runWithOutputHandler(kind, outputHandler, *options) |
| cfg = { |
| :kind => kind, |
| :outputHandler => outputHandler, |
| :errorHandler => simpleErrorHandler, |
| } |
| runWithOptions(cfg, *options) |
| end |
| |
| def runWithOutputHandlerWithoutBaseOption(kind, outputHandler, *options) |
| cfg = { |
| :kind => kind, |
| :outputHandler => outputHandler, |
| :errorHandler => simpleErrorHandler, |
| :no_base_options => true, |
| } |
| runWithOptions(cfg, *options) |
| end |
| |
| def run(kind, *options) |
| runWithOutputHandler(kind, silentOutputHandler, *options) |
| end |
| |
| def runInner(cfg, *options) |
| cfg = cfg.dup |
| if not cfg.has_key?(:outputHandler) |
| cfg[:outputHandler] = silentOutputHandler |
| end |
| if not cfg.has_key?(:errorHandler) |
| cfg[:errorHandler] = simpleErrorHandler |
| end |
| runWithOptions(cfg, *options) |
| end |
| |
| def runWithoutBaseOptionCfg(cfg, *options) |
| cfg = cfg.dup |
| cfg[:no_base_options] = true |
| runWithOptions(cfg, *options) |
| end |
| |
| def runWithoutBaseOption(kind, *options) |
| runWithOutputHandlerWithoutBaseOption(kind, silentOutputHandler, *options) |
| end |
| |
| def runOneLargeHeap(*optionalTestSpecificOptions) |
| if $memoryLimited |
| $didAddRunCommand = true |
| puts "Skipping #{$collectionName}/#{$benchmark}" |
| else |
| run("default", *optionalTestSpecificOptions) |
| end |
| end |
| |
| def bytecodeCacheTemplate |
| if ($hostOS == "darwin") |
| return "bytecode-cache" |
| elsif ($hostOS == "linux") |
| return "bytecode-cacheXXXXXX" |
| end |
| nil |
| end |
| |
| def runBytecodeCacheImpl(optionalTestSpecificOptions, *additionalEnv) |
| fileTemplate = bytecodeCacheTemplate |
| if fileTemplate.nil? |
| return nil |
| end |
| { |
| :cfg => { |
| :command_prefix => [ |
| "sh", |
| (pathToHelpers + "bytecode-cache-test-helper.sh").to_s, |
| fileTemplate.to_s, |
| ], |
| :place_benchmark_early => true, |
| :additionalEnv => additionalEnv, |
| }, |
| :testSpecificOptions => FTL_OPTIONS + optionalTestSpecificOptions, |
| } |
| end |
| |
| |
| def cfgInitializerPlain |
| Proc.new { |cfg, kind| |
| { :kind => kind} |
| } |
| end |
| |
| def cfgInitializerCfg |
| Proc.new { |cfg, kind| |
| cfg = cfg.dup |
| cfg[:kind] = kind |
| cfg |
| } |
| end |
| |
| # For each base mode (defined below) we generate two kinds of functions: |
| # |
| # - a version which takes a cfg argument and passes it along, only |
| # setting the kind field |
| # - a "plain" version which starts out with an empty cfg |
| # |
| # The plain version is intended for use in the testcase definitions |
| # (in `//@` comments and the like). |
| # |
| # The former version is used for plumbing. The caller may set various |
| # fields in the cfg which will be respected. |
| # |
| # This way, we can |
| # - define a set of test modes in defaultRunCfg |
| # - have defaultRunCfg propagate the cfg argument to the run*Cfg |
| # functions it calls |
| # - call defaultRunCfg from e.g. defaultRunNoisyTest with the output |
| # handlers appropriately set, in order to make sure we're running |
| # the exact same of tests. |
| CfgKind = Struct.new(:extension, :expectCfg, :initializer) |
| cfgKinds = [ |
| CfgKind.new("", false, cfgInitializerPlain), |
| CfgKind.new("Cfg", true, cfgInitializerCfg), |
| ] |
| |
| # Define base test modes. Each mode is an array of [name, kind, |
| # options]. The name is used to derive the ruby method names, the kind |
| # is used for reporting (i.e. what you'd see in this script's |
| # output). In the common case, options is a static array; if not, it's |
| # a Proc that returns a dict that needs to be unpacked (see its use |
| # site for a more detailed description). |
| BASE_MODES = [ |
| [ |
| "NoCJIT", |
| "ftl-no-cjit", |
| [ |
| "--validateBytecode=true", "--validateGraphAtEachPhase=true" |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS |
| ], |
| [ |
| "FTLNoCJIT", |
| "misc-ftl-no-cjit", |
| [ |
| "--useDataICInFTL=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "FTLNoCJITB3O0", |
| "ftl-no-cjit-b3o0", |
| [ |
| "--useArrayAllocationProfiling=false", |
| "--forcePolyProto=true", |
| "--useRandomizingExecutableIslandAllocation=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS + |
| B3O0_OPTIONS + |
| FORCE_LLINT_EXIT_OPTIONS |
| ], |
| [ |
| "FTLNoCJITValidate", |
| "ftl-no-cjit-validate-sampling-profiler", |
| [ |
| "--validateGraph=true", |
| "--validateBCE=true", |
| "--useSamplingProfiler=true", |
| "--airForceIRCAllocator=true", |
| "--useDataICInFTL=true", |
| "--forceUnlinkedDFG=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "FTLNoCJITNoPutStackValidate", |
| "ftl-no-cjit-no-put-stack-validate", |
| [ |
| "--validateGraph=true", |
| "--usePutStackSinking=false", |
| "--airForceIRCAllocator=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "FTLNoCJITNoInlineValidate", |
| "ftl-no-cjit-no-inline-validate", |
| [ |
| "--validateGraph=true", |
| "--maximumInliningDepth=1", |
| "--airForceBriggsAllocator=true", |
| "--useB3HoistLoopInvariantValues=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "FTLNoCJITOSRValidation", |
| "ftl-no-cjit-osr-validation", |
| [ |
| "--validateFTLOSRExitLiveness=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "DFGEager", |
| "dfg-eager", |
| EAGER_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS + |
| FORCE_LLINT_EXIT_OPTIONS |
| ], |
| [ |
| "DFGEagerNoCJITValidate", |
| "dfg-eager-no-cjit-validate", |
| [ |
| "--validateGraph=true", |
| ] + |
| NO_CJIT_OPTIONS + |
| EAGER_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS |
| ], |
| [ |
| "FTLEager", |
| "ftl-eager", |
| [ |
| "--airForceBriggsAllocator=true", |
| "--useRandomizingExecutableIslandAllocation=true", |
| "--forcePolyProto=true", |
| "--useDataICInFTL=true", |
| ] + |
| FTL_OPTIONS + |
| EAGER_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS |
| ], |
| [ |
| "FTLEagerNoCJITValidate", |
| "ftl-eager-no-cjit", |
| [ |
| "--validateGraph=true", |
| "--validateBCE=true", |
| "--airForceIRCAllocator=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS + |
| EAGER_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS + |
| FORCE_LLINT_EXIT_OPTIONS + |
| EXECUTABLE_FUZZER_OPTIONS |
| ], |
| [ |
| "FTLEagerNoCJITB3O1", |
| "ftl-eager-no-cjit-b3o1", |
| [ |
| "--validateGraph=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS + |
| EAGER_OPTIONS + |
| B3O1_OPTIONS |
| ], |
| [ |
| "FTLEagerNoCJITOSRValidation", |
| "ftl-eager-no-cjit-osr-validation", |
| [ |
| "--validateFTLOSRExitLiveness=true", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS + |
| EAGER_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS |
| ], |
| [ |
| "NoCJITNoASO", |
| "no-cjit-no-aso", |
| [ |
| "--useArchitectureSpecificOptimizations=false", |
| ] + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "NoCJITNoAccessInlining", |
| "no-cjit-no-access-inlining", |
| [ |
| "--useAccessInlining=false", |
| ] + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "FTLNoCJITNoAccessInlining", |
| "ftl-no-cjit-no-access-inlining", |
| [ |
| "--useAccessInlining=false", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "FTLNoCJITSmallPool", |
| "ftl-no-cjit-small-pool", |
| [ |
| "--jitMemoryReservationSize=202400", |
| ] + |
| FTL_OPTIONS + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "NoCJIT", |
| "no-cjit", |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "EagerJettisonNoCJIT", |
| "eager-jettison-no-cjit", |
| [ |
| "--useRandomizingExecutableIslandAllocation=true", |
| "--forceCodeBlockToJettisonDueToOldAge=true", |
| "--verifyGC=true", |
| ] + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "ShadowChicken", |
| "shadow-chicken", |
| [ |
| "--useDFGJIT=false", |
| "--alwaysUseShadowChicken=true", |
| ] |
| ], |
| [ |
| "MiniMode", |
| "mini-mode", |
| [ |
| "--forceMiniVMMode=true", |
| ] |
| ], |
| [ |
| "LogicalAssignmentOperatorsEnabled", |
| "logical-assignment-operators-enabled", |
| [ |
| "--useLogicalAssignmentOperators=true", |
| ] + |
| FTL_OPTIONS |
| ], |
| [ |
| "NoJIT", |
| "no-jit", |
| [ |
| "--useJIT=false", |
| ] |
| ], |
| [ |
| # NOTE: Tests rely on this using scribbleFreeCells. |
| "NoCJITValidate", |
| "no-cjit", |
| [ |
| "--validateBytecode=true", |
| "--validateGraph=true", |
| ] + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "NoCJITValidatePhases", |
| "no-cjit-validate-phases", |
| [ |
| "--validateBytecode=true", |
| "--validateGraphAtEachPhase=true", |
| "--useSourceProviderCache=false", |
| "--useRandomizingExecutableIslandAllocation=true", |
| "--useLLIntICs=false", |
| ] + |
| NO_CJIT_OPTIONS |
| ], |
| [ |
| "NoCJITCollectContinuously", |
| "no-cjit-collect-continuously", |
| NO_CJIT_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS |
| ], |
| [ |
| "Default", |
| "default", |
| FTL_OPTIONS |
| ], |
| [ |
| "NoFTL", |
| "no-ftl", |
| [] |
| ], |
| [ |
| "WithRAMSize", |
| nil, # Not used |
| Proc.new { |size, *optionalTestSpecificOptions| |
| { |
| :cfg => { |
| :kind => "ram-size-#{size}", |
| }, |
| :testSpecificOptions => [ |
| "--forceRAMSize=#{size}", |
| ] + optionalTestSpecificOptions |
| } |
| } |
| ], |
| [ |
| "BytecodeCache", |
| "bytecode-cache", |
| Proc.new { |*optionalTestSpecificOptions| |
| runBytecodeCacheImpl(optionalTestSpecificOptions) |
| } |
| ], |
| [ |
| "BytecodeCacheNoAssertion", |
| "bytecode-cache", |
| Proc.new { |*optionalTestSpecificOptions| |
| runBytecodeCacheImpl(optionalTestSpecificOptions, "JSC_forceDiskCache=false") |
| } |
| ], |
| [ |
| "FTLEagerWatchdog", |
| nil, |
| Proc.new { |*optionalTestSpecificOptions| |
| timeout = rand(100) |
| { |
| :cfg => { |
| :kind => "ftl-eager-watchdog-#{timeout}", |
| }, |
| :testSpecificOptions => [ |
| "--watchdog=#{timeout}", |
| "--watchdog-exception-ok", |
| ] + |
| FTL_OPTIONS + |
| EAGER_OPTIONS + |
| COLLECT_CONTINUOUSLY_OPTIONS + |
| optionalTestSpecificOptions |
| } |
| } |
| ], |
| [ |
| "NoLLInt", |
| "no-llint", |
| Proc.new { |*optionalTestSpecificOptions| |
| if $jitTests |
| { |
| :cfg => { |
| }, |
| :testSpecificOptions => [ |
| "--useLLInt=false", |
| ] + optionalTestSpecificOptions |
| } |
| else |
| nil |
| end |
| } |
| ], |
| [ |
| "OneLangeHeap", |
| "default", |
| Proc.new { |*optionalTestSpecificOptions| |
| if $memoryLimited |
| nil |
| else |
| { |
| :testSpecificOptions => optionalTestSpecificOptions |
| } |
| end |
| } |
| ] |
| ] |
| |
| BASE_MODES.each { |mode| |
| name = "run#{mode[0]}" |
| kind = mode[1] |
| options = mode[2] |
| |
| # We need to define two variants, one expecting a cfg as the first |
| # argument, one not. |
| cfgKinds.each { |cfgKind| |
| methodName = "#{name}#{cfgKind.extension}".to_sym |
| define_method(methodName) { |*args| |
| cfg = nil |
| if cfgKind.expectCfg |
| # If we're defining a method that expects a cfg |
| # argument, pick it out of the args to pass to the |
| # initializer. |
| cfg = args.shift |
| end |
| # The cfg is initialized differently depending on whether |
| # we're in a run*Cfg method or not. |
| cfg = cfgKind.initializer.call(cfg, kind) |
| finalOptions = nil |
| if options.respond_to?(:call) |
| dynamicOptions = options.call(*args) |
| if dynamicOptions.nil? |
| skip |
| return |
| end |
| # The Proc object may override any cfg option passed |
| # in. This is used e.g. for dynamic test names as used |
| # by WithRAMSize and FTLEagerWatchdog. |
| cfg.merge!(dynamicOptions[:cfg]) |
| # As the Proc may consume arguments, it's responsible |
| # for returning the final option list. Needed e.g. by |
| # WithRAMSize. |
| finalOptions = dynamicOptions[:testSpecificOptions] |
| else |
| finalOptions = options + args |
| end |
| runInner(cfg, *finalOptions) |
| } |
| } |
| } |
| |
| CFG_NOISY = { |
| :outputHandler => noisyOutputHandler, |
| :errorHandler => noisyErrorHandler, |
| }.freeze |
| |
| BASE_MODES.each { |mode| |
| name = "runNoisyTest#{mode[0]}".to_sym |
| define_method(name) { |*args| |
| # For each base mode, define the "noisy" variant which simply |
| # calls the respective run#{name}Cfg, passing in the "noisy" |
| # cfg. |
| send("run#{mode[0]}Cfg", CFG_NOISY, "--validateBytecode=true", "--validateGraphAtEachPhase=true", *args) |
| } |
| } |
| |
| # Default set of tests to run; propagates the cfg to every callee. |
| def defaultRunCfg(cfg, subsetOptions, *optionalTestSpecificOptions) |
| cfg.freeze |
| if not subsetOptions.has_key?(:ignoreQuickMode) and $mode == "quick" |
| defaultQuickRunCfg(cfg, *optionalTestSpecificOptions) |
| else |
| runDefaultCfg(cfg, *optionalTestSpecificOptions) |
| runBytecodeCacheCfg(cfg, *optionalTestSpecificOptions) |
| runMiniModeCfg(cfg, *optionalTestSpecificOptions) |
| if $jitTests |
| if not subsetOptions.has_key?(:skipNoLLInt) |
| runNoLLIntCfg(cfg, *optionalTestSpecificOptions) |
| end |
| runNoCJITValidatePhasesCfg(cfg, *optionalTestSpecificOptions) |
| runNoCJITCollectContinuouslyCfg(cfg, *optionalTestSpecificOptions) if shouldCollectContinuously? |
| if not subsetOptions.has_key?(:skipEager) |
| runDFGEagerCfg(cfg) |
| if $mode != "basic" |
| runDFGEagerNoCJITValidateCfg(cfg, *optionalTestSpecificOptions) |
| runEagerJettisonNoCJITCfg(cfg, *optionalTestSpecificOptions) |
| end |
| end |
| |
| return if !$isFTLPlatform |
| |
| runNoFTLCfg(cfg, *optionalTestSpecificOptions) |
| if not subsetOptions.has_key?(:skipEager) |
| runFTLEagerCfg(cfg, *optionalTestSpecificOptions) |
| runFTLEagerNoCJITValidateCfg(cfg, *optionalTestSpecificOptions) if $buildType == "release" |
| end |
| runFTLNoCJITSmallPoolCfg(cfg, *optionalTestSpecificOptions) |
| |
| return if $mode == "basic" |
| |
| runFTLNoCJITValidateCfg(cfg, *optionalTestSpecificOptions) |
| runFTLNoCJITB3O0Cfg(cfg, *optionalTestSpecificOptions) |
| runFTLNoCJITNoPutStackValidateCfg(cfg, *optionalTestSpecificOptions) |
| runFTLNoCJITNoInlineValidateCfg(cfg, *optionalTestSpecificOptions) |
| if not subsetOptions.has_key?(:skipEager) |
| runFTLEagerNoCJITB3O1Cfg(cfg, *optionalTestSpecificOptions) |
| end |
| end |
| end |
| end |
| |
| def defaultRun |
| defaultRunCfg({}, {}) |
| end |
| |
| def defaultNoNoLLIntRun |
| defaultRunCfg({}, {:skipNoLLInt => true}) |
| end |
| |
| def defaultQuickRunCfg(cfg, *optionalTestSpecificOptions) |
| runDefaultCfg(cfg, *optionalTestSpecificOptions) |
| if $jitTests |
| runNoCJITValidateCfg(cfg, *optionalTestSpecificOptions) |
| |
| return if !$isFTLPlatform |
| |
| runNoFTLCfg(cfg, *optionalTestSpecificOptions) |
| runFTLNoCJITValidateCfg(cfg, *optionalTestSpecificOptions) |
| end |
| end |
| |
| def defaultQuickRun |
| defaultQuickRunCfg({}) |
| end |
| |
| def defaultSpotCheckNoMaximalFlush |
| defaultQuickRun |
| runNoCJITNoAccessInlining |
| |
| return if !$isFTLPlatform |
| |
| runFTLNoCJITOSRValidation |
| runFTLNoCJITNoAccessInlining |
| runFTLNoCJITB3O0 |
| end |
| |
| def defaultSpotCheck |
| defaultSpotCheckNoMaximalFlush |
| runEagerJettisonNoCJIT |
| end |
| |
| # This is expected to not do eager runs because eager runs can have a lot of recompilations |
| # for reasons that don't arise in the real world. It's used for tests that assert convergence |
| # by counting recompilations. |
| def defaultNoEagerRun(*optionalTestSpecificOptions) |
| defaultRunCfg({}, { :skipEager => true }, *optionalTestSpecificOptions) |
| end |
| |
| def defaultNoSamplingProfilerRun |
| defaultRunCfg({}, { :ignoreQuickMode => true }) |
| end |
| |
| def runProfiler |
| if $remote or $memoryLimited or ($hostOS == "windows") or ($hostOS == "playstation") |
| skip |
| return |
| end |
| |
| profilerOutput = uniqueFilename(".json") |
| if $canRunDisplayProfilerOutput |
| addRunCommand("profiler", ["ruby", (pathToHelpers + "profiler-test-helper").to_s, (SCRIPTS_PATH + "display-profiler-output").to_s, profilerOutput.to_s, *vmCommand, "--useConcurrentJIT=false", "-p", profilerOutput.to_s, $benchmark.to_s], silentOutputHandler, simpleErrorHandler) |
| else |
| puts "Running simple version of #{$collectionName}/#{$benchmark} because some required Ruby features are unavailable." |
| run("profiler-simple", "--useConcurrentJIT=false", "-p", profilerOutput.to_s) |
| end |
| end |
| |
| def runExceptionFuzz |
| subCommand = escapeAll(vmCommand + ["--useDollarVM=true", "--useExceptionFuzz=true", $benchmark.to_s]) |
| addRunCommand("exception-fuzz", ["perl", (pathToHelpers + "js-exception-fuzz").to_s, subCommand], silentOutputHandler, simpleErrorHandler) |
| end |
| |
| def runExecutableAllocationFuzz(name, *options) |
| subCommand = escapeAll(vmCommand + ["--useDollarVM=true", $benchmark.to_s] + options) |
| addRunCommand("executable-allocation-fuzz-" + name, ["perl", (pathToHelpers + "js-executable-allocation-fuzz").to_s, subCommand], silentOutputHandler, simpleErrorHandler) |
| end |
| |
| def runTypeProfiler |
| if !$jitTests |
| return |
| end |
| |
| run("ftl-type-profiler", "--useTypeProfiler=true", *(FTL_OPTIONS)) |
| run("ftl-no-cjit-type-profiler-force-poly-proto", "--useTypeProfiler=true", "--forcePolyProto=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| |
| return if !$isFTLPlatform |
| |
| run("ftl-type-profiler-ftl-eager", "--useTypeProfiler=true", *(FTL_OPTIONS + EAGER_OPTIONS)) |
| end |
| |
| def runControlFlowProfiler |
| if !$jitTests |
| return |
| end |
| |
| return if !$isFTLPlatform |
| |
| run("ftl-no-cjit-type-profiler", "--useControlFlowProfiler=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| end |
| |
| def runTest262(mode, exception, includeFiles, flags) |
| failsWithException = exception != "NoException" |
| isStrict = false |
| isModule = false |
| isAsync = false |
| |
| flags.each { |
| | flag | |
| case flag |
| when :strict |
| isStrict = true |
| when :module |
| isModule = true |
| when :async |
| isAsync = true |
| else |
| raise "Invalid flag for runTest262, #{flag}" |
| end |
| } |
| |
| prepareExtraRelativeFiles(includeFiles.map { |f| "../" + f }, $collection) |
| |
| args = vmCommand + BASE_OPTIONS |
| args << "--exception=" + exception if failsWithException |
| args << "--test262-async" if isAsync |
| args += $testSpecificRequiredOptions |
| args += includeFiles |
| |
| case mode |
| when :normal |
| errorHandler = simpleErrorHandler |
| outputHandler = silentOutputHandler |
| when :fail |
| errorHandler = expectedFailErrorHandler |
| outputHandler = noisyOutputHandler |
| when :failDueToOutdatedOrBadTest |
| errorHandler = expectedFailErrorHandler |
| outputHandler = noisyOutputHandler |
| when :skip |
| return |
| else |
| raise "Invalid mode: #{mode}" |
| end |
| |
| if isStrict |
| kind = "default-strict" |
| args << "--strict-file=#{$benchmark}" |
| else |
| kind = "default" |
| if isModule |
| args << "--module-file=#{$benchmark}" |
| else |
| args << $benchmark.to_s |
| end |
| end |
| |
| addRunCommand(kind, args, outputHandler, errorHandler) |
| end |
| |
| def prepareTest262Fixture |
| # This function is used to add the files used by Test262 modules tests. |
| prepareExtraRelativeFiles([""], $collection) |
| end |
| |
| def runES6(mode) |
| args = vmCommand + BASE_OPTIONS + $testSpecificRequiredOptions + [$benchmark.to_s] |
| case mode |
| when :normal |
| errorHandler = simpleErrorHandler |
| when :fail |
| errorHandler = expectedFailErrorHandler |
| when :failDueToOutdatedOrBadTest |
| errorHandler = expectedFailErrorHandler |
| when :skip |
| return |
| else |
| raise "Invalid mode: #{mode}" |
| end |
| addRunCommand("default", args, noisyOutputHandler, errorHandler) |
| end |
| |
| def defaultRunModules(noLLInt: true) |
| run("default-modules", "-m") |
| |
| if !$jitTests |
| return |
| end |
| |
| run("no-llint-modules", "-m", "--useLLInt=false") if noLLInt |
| run("no-cjit-validate-phases-modules", "-m", "--validateBytecode=true", "--validateGraphAtEachPhase=true", *NO_CJIT_OPTIONS) |
| run("dfg-eager-modules", "-m", *EAGER_OPTIONS) |
| run("dfg-eager-no-cjit-validate-modules", "-m", "--validateGraph=true", *(NO_CJIT_OPTIONS + EAGER_OPTIONS)) |
| |
| return if !$isFTLPlatform |
| |
| run("default-ftl-modules", "-m", *FTL_OPTIONS) |
| run("ftl-no-cjit-validate-modules", "-m", "--validateGraph=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| run("ftl-no-cjit-no-inline-validate-modules", "-m", "--validateGraph=true", "--maximumInliningDepth=1", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| run("ftl-eager-modules", "-m", *(FTL_OPTIONS + EAGER_OPTIONS)) |
| run("ftl-eager-no-cjit-modules", "-m", "--validateGraph=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS)) |
| run("ftl-no-cjit-small-pool-modules", "-m", "--jitMemoryReservationSize=202400", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| end |
| |
| def noNoLLIntRunModules |
| defaultRunModules(noLLInt: false) |
| end |
| |
| def runWebAssembly |
| return if !$jitTests |
| return if !$isWasmPlatform |
| run("default-wasm", "-m", *FTL_OPTIONS) |
| if $mode != "quick" |
| run("wasm-no-cjit-yes-tls-context", "-m", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| run("wasm-eager", "-m", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + EAGER_OPTIONS)) |
| run("wasm-eager-jettison", "-m", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *FTL_OPTIONS) |
| run("wasm-no-tls-context", "-m", "--useFastTLSForWasmContext=false", *FTL_OPTIONS) |
| run("wasm-slow-memory", "-m", "--useWebAssemblyFastMemory=false", *FTL_OPTIONS) |
| run("wasm-collect-continuously", "-m", "--collectContinuously=true", "--verifyGC=true", *FTL_OPTIONS) if shouldCollectContinuously? |
| if $isFTLPlatform |
| run("wasm-b3", "-m", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *FTL_OPTIONS) |
| run("wasm-air", "-m", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *FTL_OPTIONS) |
| end |
| end |
| end |
| |
| def runWebAssemblyJetStream2 |
| return if !$jitTests |
| return if !$isWasmPlatform |
| |
| if $memoryLimited |
| skip |
| return |
| end |
| |
| prepareExtraAbsoluteFiles(JETSTREAM2_PATH, ["JetStreamDriver.js"]) |
| prepareExtraRelativeFilesWithBaseDirectory(Dir[JETSTREAM2_PATH + "wasm" + "*.js"].map { |f| "wasm/" + File.basename(f) }, $collection.dirname, $extraFilesBaseDir.dirname) |
| prepareExtraRelativeFilesWithBaseDirectory(Dir[JETSTREAM2_PATH + "wasm" + "*.wasm"].map { |f| "wasm/" + File.basename(f) }, $collection.dirname, $extraFilesBaseDir.dirname) |
| |
| run("default-wasm", *FTL_OPTIONS) |
| |
| if $mode != "quick" |
| run("wasm-no-cjit-yes-tls-context", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| run("wasm-eager", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + EAGER_OPTIONS)) |
| run("wasm-eager-jettison", "--forceCodeBlockToJettisonDueToOldAge=true", "--verifyGC=true", *FTL_OPTIONS) |
| run("wasm-no-tls-context", "--useFastTLSForWasmContext=false", *FTL_OPTIONS) |
| run("wasm-slow-memory", "--useWebAssemblyFastMemory=false", *FTL_OPTIONS) |
| run("wasm-collect-continuously", "--collectContinuously=true", "--verifyGC=true", *FTL_OPTIONS) if shouldCollectContinuously? |
| if $isFTLPlatform |
| run("wasm-b3", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *FTL_OPTIONS) |
| run("wasm-air", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *FTL_OPTIONS) |
| end |
| end |
| end |
| |
| def runWebAssemblySuite(*optionalTestSpecificOptions) |
| return if !$jitTests |
| return if !$isWasmPlatform |
| modules = Dir[WASMTESTS_PATH + "*.js"].map { |f| File.basename(f) } |
| prepareExtraAbsoluteFiles(WASMTESTS_PATH, ["wasm.json"]) |
| prepareExtraRelativeFiles(modules.map { |f| "../" + f }, $collection) |
| if optionalTestSpecificOptions[0] == :no_module |
| optionalTestSpecificOptions.shift |
| else |
| optionalTestSpecificOptions.unshift "-m" |
| end |
| run("default-wasm", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| if $mode != "quick" |
| run("wasm-no-cjit-yes-tls-context", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions)) |
| run("wasm-eager", *(FTL_OPTIONS + EAGER_OPTIONS + optionalTestSpecificOptions)) |
| run("wasm-eager-jettison", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| run("wasm-no-tls-context", "--useFastTLSForWasmContext=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| run("wasm-slow-memory", "--useWebAssemblyFastMemory=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| run("wasm-collect-continuously", "--collectContinuously=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) if shouldCollectContinuously? |
| if $isFTLPlatform |
| run("wasm-b3", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| run("wasm-air", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| end |
| end |
| end |
| |
| def runHarnessTest(kind, *options) |
| wasmFiles = allWasmFiles($collection) |
| wasmFiles.each { |
| | file | |
| basename = file.basename.to_s |
| addRunCommand("(" + basename + ")-" + kind, vmCommand + options + $testSpecificRequiredOptions + [$benchmark.to_s, "--", basename], silentOutputHandler, simpleErrorHandler) |
| } |
| end |
| |
| def runWebAssemblyWithHarness(*optionalTestSpecificOptions) |
| raise unless $benchmark.to_s =~ /harness\.m?js/ |
| return if !$jitTests |
| return if !$isWasmPlatform |
| # Skip this completely on armv7 for now, since most of the tests are related to OSR and it's a bit hairy to skip them manually |
| return if $architecture == "arm" |
| |
| wasmFiles = allWasmFiles($collection) |
| prepareExtraRelativeFiles(wasmFiles.map { |f| f.basename }, $collection) |
| |
| runHarnessTest("default-wasm", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| if $mode != "quick" |
| runHarnessTest("wasm-no-cjit-yes-tls-context", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions)) |
| runHarnessTest("wasm-eager", *(FTL_OPTIONS + EAGER_OPTIONS + optionalTestSpecificOptions)) |
| runHarnessTest("wasm-eager-jettison", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| runHarnessTest("wasm-no-tls-context", "--useFastTLSForWasmContext=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| runHarnessTest("wasm-slow-memory", "--useWebAssemblyFastMemory=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| runHarnessTest("wasm-collect-continuously", "--collectContinuously=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) if shouldCollectContinuously? |
| if $isFTLPlatform |
| runHarnessTest("wasm-b3", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| runHarnessTest("wasm-no-air", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| end |
| end |
| end |
| |
| def runWebAssemblyEmscripten(mode) |
| case mode |
| when :skip |
| return |
| end |
| return if !$jitTests |
| return if !$isWasmPlatform |
| wasm = $benchmark.to_s.sub! '.js', '.wasm' |
| prepareExtraRelativeFiles([Pathname('..') + wasm], $collection) |
| run("default-wasm", *FTL_OPTIONS) |
| if $mode != "quick" |
| run("wasm-no-cjit-yes-tls-context", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| run("wasm-eager-jettison", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *FTL_OPTIONS) |
| run("wasm-no-tls-context", "--useFastTLSForWasmContext=false", *FTL_OPTIONS) |
| run("wasm-collect-continuously", "--collectContinuously=true", "--verifyGC=true", *FTL_OPTIONS) if shouldCollectContinuously? |
| if $isFTLPlatform |
| run("wasm-b3", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *FTL_OPTIONS) |
| run("wasm-air", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *FTL_OPTIONS) |
| end |
| end |
| end |
| |
| def runWebAssemblySpecTestBase(mode, specHarnessPath, *optionalTestSpecificOptions) |
| case mode |
| when :skip |
| return |
| end |
| return if !$jitTests |
| return if !$isWasmPlatform |
| prepareExtraAbsoluteFiles(WASMTESTS_PATH, ["wasm.json"]) |
| |
| modules = Dir[WASMTESTS_PATH + "*.js"].map { |f| File.basename(f) } |
| prepareExtraRelativeFiles(modules.map { |f| "../../" + f }, $collection) |
| |
| harness = Dir[WASMTESTS_PATH + (specHarnessPath + "/") + "*.js"].map { |f| File.basename(f) } |
| prepareExtraRelativeFiles(harness.map { |f| ("../../" + specHarnessPath + "/") + f }, $collection) |
| |
| specHarnessJsPath = "../" + specHarnessPath + ".js" |
| runWithOutputHandler("default-wasm", noisyOutputHandler, specHarnessJsPath, *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| if $mode != "quick" |
| runWithOutputHandler("wasm-no-cjit-yes-tls-context", noisyOutputHandler, specHarnessJsPath, "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions)) |
| runWithOutputHandler("wasm-eager-jettison", noisyOutputHandler, specHarnessJsPath, "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| runWithOutputHandler("wasm-no-tls-context", noisyOutputHandler, specHarnessJsPath, "--useFastTLSForWasmContext=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| runWithOutputHandler("wasm-collect-continuously", noisyOutputHandler, specHarnessJsPath, "--collectContinuously=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) if shouldCollectContinuously? |
| if $isFTLPlatform |
| runWithOutputHandler("wasm-b3", noisyOutputHandler, specHarnessJsPath, "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| runWithOutputHandler("wasm-air", noisyOutputHandler, specHarnessJsPath, "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) |
| end |
| end |
| end |
| |
| def runWebAssemblySpecTest(mode) |
| runWebAssemblySpecTestBase(mode, "spec-harness") |
| end |
| |
| def runWebAssemblyReferenceSpecTest(mode) |
| runWebAssemblySpecTestBase(mode, "ref-spec-harness") |
| end |
| |
| def runWebAssemblyFunctionReferenceSpecTest(mode) |
| runWebAssemblySpecTestBase(mode, "spec-harness", "--useWebAssemblyTypedFunctionReferences=true") |
| end |
| |
| def runWebAssemblyLowExecutableMemory(*optionalTestSpecificOptions) |
| return if !$jitTests |
| return if !$isWasmPlatform |
| modules = Dir[WASMTESTS_PATH + "*.js"].map { |f| File.basename(f) } |
| prepareExtraAbsoluteFiles(WASMTESTS_PATH, ["wasm.json"]) |
| prepareExtraRelativeFiles(modules.map { |f| "../" + f }, $collection) |
| # Only let WebAssembly get executable memory. |
| run("default-wasm", "--useConcurrentGC=0" , "--useConcurrentJIT=0", "--jitMemoryReservationSize=20000", "--useBaselineJIT=0", "--useDFGJIT=0", "--useFTLJIT=0", "-m") |
| end |
| |
| def runChakra(mode, exception, baselineFile, extraFiles) |
| raise unless $benchmark.to_s =~ /\.js$/ |
| failsWithException = exception != "NoException" |
| testName = $~.pre_match |
| |
| prepareExtraAbsoluteFiles(CHAKRATESTS_PATH, ["jsc-lib.js"]) |
| prepareExtraRelativeFiles(extraFiles.map { |f| "../" + f }, $collection) |
| |
| args = vmCommand + BASE_OPTIONS |
| args += FTL_OPTIONS if $isFTLPlatform |
| args += EAGER_OPTIONS |
| args << "--exception=" + exception if failsWithException |
| args << "--dumpException" if failsWithException |
| args += $testSpecificRequiredOptions |
| args += ["jsc-lib.js"] |
| |
| case mode |
| when :baseline |
| prepareExtraRelativeFiles([(Pathname("..") + baselineFile).to_s], $collection) |
| errorHandler = diffErrorHandler(($benchmarkDirectory + baselineFile).to_s) |
| outputHandler = noisyOutputHandler |
| when :pass |
| errorHandler = chakraPassFailErrorHandler |
| outputHandler = noisyOutputHandler |
| when :skipDueToOutdatedOrBadTest |
| return |
| when :skip |
| return |
| else |
| raise "Invalid mode: #{mode}" |
| end |
| |
| kind = "default" |
| args << $benchmark.to_s |
| |
| addRunCommand(kind, args, outputHandler, errorHandler) |
| end |
| |
| def runLayoutTest(kind, *options) |
| raise unless $benchmark.to_s =~ /\.js$/ |
| testName = $~.pre_match |
| if kind |
| kind = "layout-" + kind |
| else |
| kind = "layout" |
| end |
| |
| prepareExtraRelativeFiles(["../#{testName}-expected.txt"], $benchmarkDirectory) |
| prepareExtraAbsoluteFiles(LAYOUTTESTS_PATH, ["resources/standalone-pre.js", "resources/standalone-post.js"]) |
| |
| args = vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions + |
| [(Pathname.new("resources") + "standalone-pre.js").to_s, |
| $benchmark.to_s, |
| (Pathname.new("resources") + "standalone-post.js").to_s] |
| addRunCommand(kind, args, noisyOutputHandler, diffErrorHandler(($benchmarkDirectory + "../#{testName}-expected.txt").to_s)) |
| end |
| |
| def runLayoutTestNoFTL |
| runLayoutTest("no-ftl") |
| end |
| |
| def runLayoutTestNoLLInt |
| runLayoutTest("no-llint", "--useLLInt=false") |
| end |
| |
| def runLayoutTestNoCJIT |
| runLayoutTest("no-cjit", *NO_CJIT_OPTIONS) |
| end |
| |
| def runLayoutTestDFGEagerNoCJIT |
| runLayoutTest("dfg-eager-no-cjit", *(NO_CJIT_OPTIONS + EAGER_OPTIONS)) |
| end |
| |
| def runLayoutTestDefault |
| runLayoutTest(nil, "--testTheFTL=true", *FTL_OPTIONS) |
| end |
| |
| def runLayoutTestFTLNoCJIT |
| runLayoutTest("ftl-no-cjit", "--testTheFTL=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS)) |
| end |
| |
| def runLayoutTestFTLEagerNoCJIT |
| runLayoutTest("ftl-eager-no-cjit", "--testTheFTL=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS)) |
| end |
| |
| def runLayoutTestFTLEagerNoCJITB3O1 |
| runLayoutTest("ftl-eager-no-cjit-b3o1", "--testTheFTL=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + B3O1_OPTIONS)) |
| end |
| |
| def noFTLRunLayoutTest |
| if !$jitTests |
| return |
| end |
| |
| runLayoutTestNoLLInt |
| runLayoutTestNoCJIT |
| runLayoutTestDFGEagerNoCJIT |
| end |
| |
| def defaultQuickRunLayoutTest |
| runLayoutTestDefault |
| if $jitTests |
| if $isFTLPlatform |
| runLayoutTestNoFTL |
| runLayoutTestFTLNoCJIT |
| runLayoutTestFTLEagerNoCJIT |
| else |
| noFTLRunLayoutTest |
| end |
| end |
| end |
| |
| def defaultRunLayoutTest |
| if $mode == "quick" |
| defaultQuickRunLayoutTest |
| else |
| runLayoutTestDefault |
| if $jitTests |
| noFTLRunLayoutTest |
| |
| return if !$isFTLPlatform |
| |
| runLayoutTestNoFTL |
| runLayoutTestFTLNoCJIT |
| runLayoutTestFTLEagerNoCJIT |
| end |
| end |
| end |
| |
| def noEagerNoNoLLIntTestsRunLayoutTest |
| runLayoutTestDefault |
| if $jitTests |
| runLayoutTestNoCJIT |
| |
| return if !$isFTLPlatform |
| |
| runLayoutTestNoFTL |
| runLayoutTestFTLNoCJIT |
| end |
| end |
| |
| def noNoLLIntRunLayoutTest |
| runLayoutTestDefault |
| if $jitTests |
| runLayoutTestNoCJIT |
| runLayoutTestDFGEagerNoCJIT |
| |
| return if !$isFTLPlatform |
| |
| runLayoutTestNoFTL |
| runLayoutTestFTLNoCJIT |
| runLayoutTestFTLEagerNoCJIT |
| end |
| end |
| |
| def prepareExtraRelativeFilesWithBaseDirectory(extraFiles, destination, baseDirectory) |
| Dir.chdir($outputDir) { |
| extraFiles.each { |
| | file | |
| dest = destination + file |
| FileUtils.mkdir_p(dest.dirname) |
| FileUtils.cp baseDirectory + file, dest |
| } |
| } |
| end |
| |
| def prepareExtraRelativeFiles(extraFiles, destination) |
| prepareExtraRelativeFilesWithBaseDirectory(extraFiles, destination, $extraFilesBaseDir) |
| end |
| |
| def baseDirForCollection(collectionName) |
| Pathname(".tests") + collectionName |
| end |
| |
| def prepareExtraAbsoluteFiles(absoluteBase, extraFiles) |
| raise unless absoluteBase.absolute? |
| Dir.chdir($outputDir) { |
| collectionBaseDir = baseDirForCollection($collectionName) |
| extraFiles.each { |
| | file | |
| destination = collectionBaseDir + file |
| FileUtils.mkdir_p destination.dirname unless destination.directory? |
| FileUtils.cp absoluteBase + file, destination |
| } |
| } |
| end |
| |
| def runComplexTest(before, after, additionalEnv, *options) |
| prepareExtraRelativeFiles(before.map{|v| (Pathname("..") + v).to_s}, $collection) |
| prepareExtraRelativeFiles(after.map{|v| (Pathname("..") + v).to_s}, $collection) |
| args = vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions + before.map{|v| v.to_s} + [$benchmark.to_s] + after.map{|v| v.to_s} |
| addRunCommand("complex", args, noisyOutputHandler, simpleErrorHandler, *additionalEnv) |
| end |
| |
| def runMozillaTest(kind, mode, extraFiles, *options) |
| if kind |
| kind = "mozilla-" + kind |
| else |
| kind = "mozilla" |
| end |
| prepareExtraRelativeFiles(extraFiles.map{|v| (Pathname("..") + v).to_s}, $collection) |
| args = vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions + extraFiles.map{|v| v.to_s} + [$benchmark.to_s] |
| case mode |
| when :normal |
| errorHandler = mozillaErrorHandler |
| when :negative |
| errorHandler = mozillaExit3ErrorHandler |
| when :fail |
| errorHandler = mozillaFailErrorHandler |
| when :failDueToOutdatedOrBadTest |
| errorHandler = mozillaFailErrorHandler |
| when :skip |
| return |
| else |
| raise "Invalid mode: #{mode}" |
| end |
| addRunCommand(kind, args, noisyOutputHandler, errorHandler) |
| end |
| |
| def runMozillaTestDefault(mode, *extraFiles) |
| runMozillaTest(nil, mode, extraFiles, *FTL_OPTIONS) |
| end |
| |
| def runMozillaTestNoFTL(mode, *extraFiles) |
| runMozillaTest("no-ftl", mode, extraFiles) |
| end |
| |
| def runMozillaTestLLInt(mode, *extraFiles) |
| runMozillaTest("llint", mode, extraFiles, "--useJIT=false") |
| end |
| |
| def runMozillaTestBaselineJIT(mode, *extraFiles) |
| runMozillaTest("baseline", mode, extraFiles, "--useLLInt=false", "--useDFGJIT=false") |
| end |
| |
| def runMozillaTestDFGEagerNoCJITValidatePhases(mode, *extraFiles) |
| runMozillaTest("dfg-eager-no-cjit-validate-phases", mode, extraFiles, "--validateBytecode=true", "--validateGraphAtEachPhase=true", *(NO_CJIT_OPTIONS + EAGER_OPTIONS)) |
| end |
| |
| def runMozillaTestFTLEagerNoCJITValidatePhases(mode, *extraFiles) |
| runMozillaTest("ftl-eager-no-cjit-validate-phases", mode, extraFiles, "--validateBytecode=true", "--validateGraphAtEachPhase=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS)) |
| end |
| |
| def defaultQuickRunMozillaTest(mode, *extraFiles) |
| if $jitTests |
| runMozillaTestDefault(mode, *extraFiles) |
| runMozillaTestFTLEagerNoCJITValidatePhases(mode, *extraFiles) |
| else |
| runMozillaTestNoFTL(mode, *extraFiles) |
| if $jitTests |
| runMozillaTestDFGEagerNoCJITValidatePhases(mode, *extraFiles) |
| end |
| end |
| end |
| |
| def defaultRunMozillaTest(mode, *extraFiles) |
| if $mode == "quick" |
| defaultQuickRunMozillaTest(mode, *extraFiles) |
| else |
| runMozillaTestNoFTL(mode, *extraFiles) |
| if $jitTests |
| runMozillaTestLLInt(mode, *extraFiles) |
| runMozillaTestBaselineJIT(mode, *extraFiles) |
| runMozillaTestDFGEagerNoCJITValidatePhases(mode, *extraFiles) |
| runMozillaTestDefault(mode, *extraFiles) |
| runMozillaTestFTLEagerNoCJITValidatePhases(mode, *extraFiles) if $isFTLPlatform |
| end |
| end |
| end |
| |
| def runNoisyTestWithEnv(kind, *additionalEnv) |
| cfg = CFG_NOISY.dup |
| cfg[:kind] = kind |
| cfg[:additionalEnv] = additionalEnv |
| runDefaultCfg(cfg) |
| end |
| |
| def defaultRunNoisyTest |
| cfg = { |
| :outputHandler => noisyOutputHandler, |
| :errorHandler => noisyErrorHandler, |
| } |
| defaultRunCfg(cfg, {}) |
| end |
| |
| def skip |
| $didAddRunCommand = true |
| $skipped = true |
| puts "Skipping #{$collectionName}/#{$benchmark}" |
| end |
| |
| def allWasmFiles(path) |
| if path.file? |
| [path] |
| else |
| result = [] |
| Dir.foreach(path) { |
| | filename | |
| next unless filename =~ /\.m?wasm$/ |
| next unless (path + filename).file? |
| result << path + filename |
| } |
| result |
| end |
| end |
| |
| def allJSFiles(path) |
| if path.file? |
| [path] |
| else |
| result = [] |
| Dir.foreach(path) { |
| | filename | |
| next unless filename =~ /\.m?js$/ |
| next unless (path + filename).file? |
| result << path + filename |
| } |
| result |
| end |
| end |
| |
| def uniqueifyName(names, name) |
| result = name.to_s |
| toAdd = 1 |
| while names[result] |
| result = "#{name}-#{toAdd}" |
| toAdd += 1 |
| end |
| names[result] = true |
| result |
| end |
| |
| def simplifyCollectionName(collectionPath) |
| outerDir = collectionPath.dirname |
| name = collectionPath.basename |
| lastName = name |
| if collectionPath.directory? |
| while lastName.to_s =~ /test/ |
| lastName = outerDir.basename |
| name = lastName + name |
| outerDir = outerDir.dirname |
| end |
| end |
| uniqueifyName($collectionNames, name) |
| end |
| |
| def prepareCollection(name) |
| FileUtils.mkdir_p $outputDir + name |
| |
| absoluteCollection = $collection.realpath |
| |
| Dir.chdir($outputDir) { |
| bundleDir = baseDirForCollection(name) |
| |
| # Create the proper directory structures. |
| FileUtils.mkdir_p bundleDir |
| if bundleDir.basename == $collection.basename |
| FileUtils.cp_r absoluteCollection, bundleDir.dirname |
| $collection = bundleDir |
| else |
| FileUtils.cp_r absoluteCollection, bundleDir |
| $collection = bundleDir + $collection.basename |
| end |
| |
| $extraFilesBaseDir = absoluteCollection |
| } |
| end |
| |
| $collectionNames = {} |
| |
| def handleCollectionFile(collection) |
| collectionName = simplifyCollectionName(collection) |
| |
| paths = {} |
| subCollections = [] |
| YAML::load(IO::read(collection)).each { |
| | entry | |
| if entry["collection"] |
| subCollections << entry["collection"] |
| next |
| end |
| |
| if Pathname.new(entry["path"]).absolute? |
| raise "Absolute path: " + entry["path"] + " in #{collection}" |
| end |
| |
| if paths[entry["path"]] |
| raise "Duplicate path: " + entry["path"] + " in #{collection}" |
| end |
| |
| subCollection = collection.dirname + entry["path"] |
| |
| if subCollection.file? |
| subCollectionName = Pathname.new(entry["path"]).dirname |
| else |
| subCollectionName = entry["path"] |
| end |
| |
| $collection = subCollection |
| $collectionName = Pathname.new(collectionName) |
| Pathname.new(subCollectionName).each_filename { |
| | filename | |
| next if filename =~ /^\./ |
| $collectionName += filename |
| } |
| $collectionName = $collectionName.to_s |
| |
| prepareCollection($collectionName) |
| |
| Dir.chdir($outputDir) { |
| pathsToSearch = [$collection] |
| if entry["tests"] |
| if entry["tests"].is_a? Array |
| pathsToSearch = entry["tests"].map { |
| | testName | |
| pathsToSearch[0] + testName |
| } |
| else |
| pathsToSearch[0] += entry["tests"] |
| end |
| end |
| pathsToSearch.each { |
| | pathToSearch | |
| allJSFiles(pathToSearch).each { |
| | path | |
| |
| $benchmark = path.basename |
| $benchmarkDirectory = path.dirname |
| |
| $runCommandOptions = {} |
| $testSpecificRequiredOptions = [] |
| eval entry["cmd"] |
| } |
| } |
| } |
| } |
| |
| subCollections.each { |
| | subCollection | |
| handleCollection(collection.dirname + subCollection) |
| } |
| end |
| |
| def handleCollectionDirectory(collection) |
| collectionName = simplifyCollectionName(collection) |
| |
| $collection = collection |
| $collectionName = collectionName |
| prepareCollection(collectionName) |
| |
| Dir.chdir($outputDir) { |
| $benchmarkDirectory = $collection |
| allJSFiles($collection).each { |
| | path | |
| |
| $benchmark = path.basename |
| |
| $runCommandOptions = {} |
| $testSpecificRequiredOptions = [] |
| defaultRun unless parseRunCommands |
| } |
| } |
| end |
| |
| def handleCollection(collection) |
| collection = Pathname.new(collection) |
| |
| if collection.file? |
| handleCollectionFile(collection) |
| else |
| handleCollectionDirectory(collection) |
| end |
| end |
| |
| def prepareBundle |
| raise if $bundle |
| |
| if $doNotMessWithVMPath |
| if !$remote and !$tarball |
| $testingFrameworkPath = (frameworkFromJSCPath($jscPath) || $jscPath.dirname).realpath |
| $jscPath = Pathname.new($jscPath).realpath |
| else |
| $testingFrameworkPath = frameworkFromJSCPath($jscPath) |
| end |
| else |
| originalJSCPath = $jscPath |
| vmDir = $outputDir + ".vm" |
| FileUtils.mkdir_p vmDir |
| |
| frameworkPath = frameworkFromJSCPath($jscPath) |
| destinationFrameworkPath = Pathname.new(".vm") + "JavaScriptCore.framework" |
| $jscPath = destinationFrameworkPath + "Helpers" + "jsc" |
| $testingFrameworkPath = Pathname.new("..") + destinationFrameworkPath |
| |
| if frameworkPath |
| source = frameworkPath |
| destination = Pathname.new(".vm") |
| elsif $hostOS == "windows" |
| # Make sure to copy dll along with jsc on Windows |
| originalJSCDir = File.dirname(originalJSCPath) |
| source = [originalJSCPath] + [originalJSCDir + "/jscLib.dll"] |
| |
| # Check for and copy JavaScriptCore.dll and WTF.dll for dynamic builds |
| javaScriptCoreDLLPath = File.join(originalJSCDir, "JavaScriptCore.dll") |
| wtfDLLPath = File.join(originalJSCDir, "WTF.dll") |
| if (File.exists?(javaScriptCoreDLLPath)) |
| source = source + [javaScriptCoreDLLPath] |
| end |
| if (File.exists?(wtfDLLPath)) |
| source = source + [wtfDLLPath] |
| end |
| |
| destination = $jscPath.dirname |
| |
| Dir.chdir($outputDir) { |
| FileUtils.mkdir_p destination |
| } |
| else |
| source = originalJSCPath |
| destination = $jscPath |
| |
| Dir.chdir($outputDir) { |
| FileUtils.mkdir_p $jscPath.dirname |
| } |
| end |
| |
| Dir.chdir($outputDir) { |
| if $copyVM |
| FileUtils.cp_r source, destination |
| else |
| begin |
| FileUtils.ln_s source, destination |
| rescue Exception |
| $stderr.puts "Warning: unable to create soft link, trying to copy." |
| FileUtils.cp_r source, destination |
| end |
| end |
| |
| if $remote and $hostOS == "linux" |
| bundle_binary = (Pathname.new(THIS_SCRIPT_PATH).dirname + 'bundle-binary').realpath |
| Dir.mktmpdir { |
| | tmpdir | |
| # Generate bundle in a temporary directory so that |
| # we can safely pick it up regardless of its name |
| # (it's the only zip file there). |
| cmdline = [ |
| bundle_binary.to_s, |
| "--dest-dir=#{$jscPath.dirname}", |
| "--log-level=debug", |
| $jscPath.to_s |
| ] |
| if not $ldd.nil? |
| cmdline << "--ldd=#{$ldd}" |
| end |
| mysys(cmdline) |
| } |
| end |
| } |
| end |
| |
| Dir.chdir($outputDir) { |
| FileUtils.cp_r HELPERS_PATH, ".helpers" |
| } |
| |
| ARGV.each { |
| | collection | |
| handleCollection(collection) |
| } |
| |
| if $runlist.size == 0 |
| $stderr.puts("No tests selected for execution (overzealous --filter?)") |
| exit(2) |
| end |
| |
| puts |
| end |
| |
| def cleanOldResults |
| raise unless $bundle |
| |
| eachResultFile($outputDir) { |
| | path | |
| FileUtils.rm_f path |
| } |
| end |
| |
| def cleanEmptyResultFiles |
| eachResultFile($outputDir) { |
| | path | |
| next unless path.basename.to_s =~ /\.out$/ |
| next unless FileTest.size(path) == 0 |
| FileUtils.rm_f path |
| } |
| end |
| |
| def eachResultFile(startingDir, &block) |
| dirsToClean = [startingDir] |
| until dirsToClean.empty? |
| nextDir = dirsToClean.pop |
| Dir.foreach(nextDir) { |
| | entry | |
| next if entry =~ /^\./ |
| path = nextDir + entry |
| if path.directory? |
| dirsToClean.push(path) |
| else |
| block.call(path) |
| end |
| } |
| end |
| end |
| |
| def parallelEach(array, &block) |
| # Short of using 'require "parallel"' we use a simple statically |
| # partitioned multiprocess dispatch for processing fixed chunks of the |
| # given array in parallel. We use Process rather than Thread to |
| # parallelise CPU load as well as IO (due to the GIL). |
| |
| # Some platforms (notably Windows) do not support Process.fork, so work |
| # serially on these. |
| nWorkers = Process.respond_to?(:fork) ? $numChildProcesses : 1 |
| |
| # Chunk size is rounded up |
| chunkSize = (array.size + (nWorkers - 1)) / nWorkers |
| |
| # If chunk size is too small, work serially |
| if chunkSize <= 1 |
| nWorkers = 1 |
| chunkSize = array.size |
| end |
| |
| childPIDs = [] |
| |
| # Chunks 1 to nWorkers-1 run in the worker processes |
| for i in 1...nWorkers do |
| chunkStart = i*chunkSize |
| break if chunkStart >= array.size |
| pid = Process.fork |
| if pid.nil? |
| # Worker process. Process chunk i. |
| array.slice(chunkStart, chunkSize).each(&block) |
| Process.exit!(true) |
| else |
| childPIDs << pid |
| end |
| end |
| |
| # Main process. Process chunk 0. |
| array.slice(0, chunkSize).each(&block) |
| |
| # Wait for workers |
| for pid in childPIDs do |
| _, status = Process.waitpid2(pid) |
| raise "Child process still running" unless status.exited? |
| if status.exitstatus != 0 |
| STDERR.puts "Child process failed with status: #{status.exitstatus}" |
| exit(status.exitstatus) |
| end |
| end |
| end |
| |
| def cleanRunnerDirectory |
| raise unless $bundle |
| Dir.foreach($runnerDir) { |
| | filename | |
| next unless filename =~ /^#{STATUS_FILE_PREFIX}/ |
| FileUtils.rm_f $runnerDir + filename |
| } |
| end |
| |
| def sshRead(cmd, remoteHost, options={}) |
| raise unless $remote |
| |
| result = "" |
| ssh_cmd = ["ssh"] + |
| SSH_OPTIONS_DEFAULT + |
| ["-p", remoteHost.port.to_s] + |
| (remoteHost.identity_file_path ? ["-i", remoteHost.identity_file_path] : []) + |
| ["#{remoteHost.user}@#{remoteHost.host}", cmd] |
| IO.popen(ssh_cmd, "r") { |inp| |
| result = inp.read |
| } |
| raise "#{$?}" unless $?.success? or options[:ignoreFailure] |
| result |
| end |
| |
| def runCommandOnTester(cmd) |
| if $remote |
| $remoteHosts.each { |remoteHost| |
| begin |
| # Return first successful value. Obviously, this |
| # assumes the remotes are homogeneous. |
| return sshRead(cmd, remoteHost) |
| rescue Exception => e |
| $stderr.puts("Error running `#{cmd}` on #{remoteHost.host}: #{e}") |
| end |
| } |
| return "0" |
| end |
| `#{cmd}` |
| end |
| |
| def commandToGetNumberOfProcessors |
| if $hostOS == "windows" |
| "cmd /c echo %NUMBER_OF_PROCESSORS%" |
| else |
| "sysctl -n hw.activecpu 2>/dev/null || nproc --all 2>/dev/null || getconf _NPROCESSORS_ONLN" |
| end |
| end |
| |
| def numberOfProcessors |
| begin |
| numProcessors = runCommandOnTester(commandToGetNumberOfProcessors).to_i |
| rescue |
| numProcessors = 0 |
| end |
| |
| if numProcessors == 0 |
| $stderr.puts("Warning: could not determine the number of remote CPUs, defaulting to 1") |
| numProcessors = 1 |
| end |
| return numProcessors |
| end |
| |
| def runAndMonitorCommandOutput(cmd, &blk) |
| cmd = cmd.collect { |a| |
| a.to_s |
| } |
| IO.popen(cmd, "r") { |
| | p | |
| p.each_line { |
| | line | |
| blk.call(p.pid, line) |
| puts(line) |
| $stdout.flush |
| } |
| } |
| end |
| |
| $runnerDirMutex = Mutex.new |
| def runAndMonitorTestRunnerCommand(cmd, options={}) |
| numberOfTests = 0 |
| $runnerDirMutex.synchronize { |
| Dir.chdir($runnerDir) { |
| # -1 for the runscript, and -2 for '..' and '.' |
| numberOfTests = Dir.entries(".").count - 3 |
| } |
| } |
| unless $progressMeter |
| mysys(cmd.join(' '), options) |
| else |
| running = {} |
| didRun = {} |
| didFail = {} |
| blankLine = true |
| prevStringLength = 0 |
| IO.popen(cmd.join(' '), mode="r") { |
| | inp | |
| inp.each_line { |
| | line | |
| line = line.scrub.chomp |
| if line =~ /^Running / |
| running[$~.post_match] = true |
| elsif line =~ /^PASS: / |
| didRun[$~.post_match] = true |
| elsif line =~ /^FAIL: / |
| didRun[$~.post_match] = true |
| didFail[$~.post_match] = true |
| else |
| unless blankLine |
| print("\r" + " " * prevStringLength + "\r") |
| end |
| puts line |
| blankLine = true |
| end |
| |
| def lpad(str, chars) |
| str = str.to_s |
| if str.length > chars |
| str |
| else |
| "%#{chars}s"%(str) |
| end |
| end |
| |
| string = "" |
| string += "\r#{lpad(didRun.size, numberOfTests.to_s.size)}/#{numberOfTests}" |
| unless didFail.empty? |
| string += " (failed #{didFail.size})" |
| end |
| string += " " |
| (running.size - didRun.size).times { |
| string += "." |
| } |
| if string.length < prevStringLength |
| print string |
| print(" " * (prevStringLength - string.length)) |
| end |
| print string |
| prevStringLength = string.length |
| blankLine = false |
| $stdout.flush |
| } |
| } |
| puts |
| if not $?.success? and not options[:ignoreFailure] |
| raise "Failed to run #{cmd}: #{$?.inspect}" |
| end |
| end |
| end |
| |
| def getRemoteDirectoryIfNeeded(remoteHost) |
| if !remoteHost.remoteDirectory |
| remoteHost.remoteDirectory = JSON::parse(sshRead("cat ~/.bencher", remoteHost))["tempPath"] |
| end |
| end |
| |
| def copyBundleToRemote(remoteHost) |
| mysys(["ssh"] + SSH_OPTIONS_DEFAULT + (remoteHost.identity_file_path ? ["-i", remoteHost.identity_file_path] : []) + ["-p", remoteHost.port.to_s, "#{remoteHost.user}@#{remoteHost.host}", "mkdir -p #{remoteHost.remoteDirectory}"]) |
| mysys(["scp"] + SSH_OPTIONS_DEFAULT + (remoteHost.identity_file_path ? ["-i", remoteHost.identity_file_path] : []) + ["-P", remoteHost.port.to_s, ($outputDir.dirname + $tarFileName).to_s, "#{remoteHost.user}@#{remoteHost.host}:#{remoteHost.remoteDirectory}"]) |
| end |
| |
| def exportBaseEnvironmentVariables(escape) |
| if escape |
| dyldFrameworkPath = "\\$(cd #{$testingFrameworkPath.dirname}; pwd)" |
| ldLibraryPath = "\\$(cd #{$testingFrameworkPath.dirname}/..; pwd)/#{$jscPath.dirname}" |
| else |
| dyldFrameworkPath = "\$(cd #{$testingFrameworkPath.dirname}; pwd)" |
| ldLibraryPath = "\$(cd #{$testingFrameworkPath.dirname}/..; pwd)/#{$jscPath.dirname}" |
| end |
| [ |
| "export DYLD_FRAMEWORK_PATH=#{dyldFrameworkPath} && ", |
| "export LD_LIBRARY_PATH=#{ldLibraryPath} &&", |
| "export JSCTEST_timeout=#{Shellwords.shellescape(ENV['JSCTEST_timeout'])} && ", |
| "export JSCTEST_hardTimeout=#{Shellwords.shellescape(ENV['JSCTEST_hardTimeout'])} && ", |
| "export JSCTEST_memoryLimit=#{Shellwords.shellescape(ENV['JSCTEST_memoryLimit'])} && ", |
| "export TZ=#{Shellwords.shellescape(ENV['TZ'])} && ", |
| ].join("") |
| end |
| |
| def runTestRunner(testRunner, remoteHosts, remoteIndex=0) |
| if not remoteHosts.nil? |
| remoteHost = remoteHosts[remoteIndex] |
| getRemoteDirectoryIfNeeded(remoteHost) |
| remoteScript = "\"" |
| remoteScript += "cd #{remoteHost.remoteDirectory}/#{$outputDir.basename}/.runner && " |
| remoteScript += exportBaseEnvironmentVariables(true) |
| $envVars.each { |var| remoteScript += "export " << var << "\n" } |
| remoteScript += "#{testRunner.command(remoteIndex)}\"" |
| runAndMonitorTestRunnerCommand(["ssh"] + SSH_OPTIONS_DEFAULT + (remoteHost.identity_file_path ? ["-i", remoteHost.identity_file_path] : []) + ["-p", remoteHost.port.to_s, "#{remoteHost.user}@#{remoteHost.host}", remoteScript]) |
| else |
| Dir.chdir($runnerDir) { |
| runAndMonitorTestRunnerCommand(Shellwords.shellsplit(testRunner.command)) |
| } |
| end |
| end |
| |
| STATUS_RE = /^[.]\/#{STATUS_FILE_PREFIX}(?<index>\d+)\s(?<runId>\h+)\s(?<exitCode>\d+)\s(?<result>#{STATUS_FILE_PASS}|#{STATUS_FILE_FAIL})$/ |
| |
| def processStatusLine(map, line) |
| md = STATUS_RE.match(line) |
| if md.nil? |
| # Malfromed lines can legitimately occur when the remote fails at an |
| # inopportune moment. Ignore those lines but print them out, so that we |
| # can easily diagnose actual issues with the remote output. |
| $stderr.puts("Ignoring malformed status line `#{line}`") |
| return |
| end |
| index = md[:index].to_i |
| runId = md[:runId] |
| result = md[:result] |
| if runId != $runUniqueId |
| if $bundle |
| # The scripts in the bundle have their own runId embedded, |
| # just ignore the value. |
| else |
| # This may conceivably happen if a remote goes |
| # away in the middle of a run and comes back |
| # online in the middle of a different run. |
| $stderr.puts("Ignoring stale status file for #{index} (ID #{runId} but current ID is #{$runUniqueId})") |
| return |
| end |
| end |
| if map.has_key?(index) |
| map[index].push(result) |
| else |
| map[index] = [result] |
| end |
| end |
| |
| def getStatusMap(map={}) |
| find_cmd = "find . -maxdepth 1 -name \"#{STATUS_FILE_PREFIX}*\" -a -size +0c -exec sh -c \"printf \\\"%s \\\" {}; cat {}\" \\;" |
| if $remote |
| # Note: here we're using $remoteHosts (instead of getting the |
| # list of live remoteHosts from the caller, because there may |
| # well be test results on a remoteHost that got rebooted |
| # (note, the test results are tagged with a run ID, so we'll |
| # ignore any stale results from a previous run). |
| forEachRemote($remoteHosts, :dropOnFailure => true, :timeout => 8 * REMOTE_TIMEOUT) { |_, host| |
| runnerDir = "#{host.remoteDirectory}/#{$outputDir.basename}/.runner" |
| output = sshRead("if test -d #{runnerDir}; then cd #{runnerDir}; else false; fi && " + find_cmd, host, :ignoreFailure => true) |
| output.split(/\n/).each { |
| | line | |
| processStatusLine(map, line) |
| } |
| } |
| else |
| Dir.chdir($runnerDir) { |
| Dir.glob("#{STATUS_FILE_PREFIX}*").each do |name| |
| if File.size(name) > 0 |
| File.open(name, 'r') { |f| |
| line = f.first |
| processStatusLine(map, "./#{name} #{line}") |
| } |
| end |
| end |
| } |
| end |
| map |
| end |
| |
| def detectFailures(statusMap={}) |
| raise if $bundle |
| if statusMap.size == 0 |
| statusMap = getStatusMap |
| end |
| |
| evaluator = TestResultEvaluatorFinal.new($runlist, statusMap) |
| evaluator.visit!({:treatIncompleteAsFailed => true}) |
| evaluator.validate |
| |
| if evaluator.noresult > 0 |
| $stderr.puts("Could not get the exit status for #{evaluator.noresult} tests") |
| # We can't change our exit code, as run-javascriptcore-tests |
| # expects 0 even when there are failures. |
| end |
| |
| familyMap = evaluator.familyMap |
| |
| File.open($outputDir + "resultsByFamily", "w") { |
| | outp | |
| first = true |
| familyMap.keys.sort.each { |
| | familyName | |
| if first |
| first = false |
| else |
| outp.puts |
| end |
| |
| outp.print "#{familyName}:" |
| |
| numPassed = 0 |
| familyMap[familyName].each { |
| | entry | |
| if entry[:result] == "PASS" |
| numPassed += 1 |
| end |
| } |
| |
| if numPassed == familyMap[familyName].size |
| outp.puts " PASSED" |
| elsif numPassed == 0 |
| outp.puts " FAILED" |
| else |
| outp.puts |
| familyMap[familyName].each { |
| | entry | |
| outp.puts " #{entry[:plan].name}: #{entry[:result]}" |
| } |
| end |
| } |
| } |
| end |
| |
| def compressBundle |
| cmd = "cd #{$outputDir}/.. && tar -czf #{$tarFileName} #{$outputDir.basename}" |
| $stderr.puts ">> #{cmd}" if $verbosity >= 2 |
| raise unless system(cmd) |
| end |
| |
| def clean(file) |
| FileUtils.rm_rf file unless $bundle |
| end |
| |
| clean($outputDir + "failed") |
| clean($outputDir + "passed") |
| clean($outputDir + "noresult") |
| clean($outputDir + "flaky") |
| clean($outputDir + "results") |
| clean($outputDir + "resultsByFamily") |
| clean($outputDir + ".vm") |
| clean($outputDir + ".helpers") |
| clean($outputDir + ".runner") |
| clean($outputDir + ".tests") |
| clean($outputDir + "_payload") |
| |
| Dir.mkdir($outputDir) unless $outputDir.directory? |
| |
| $outputDir = $outputDir.realpath |
| $runnerDir = $outputDir + ".runner" |
| |
| if !$numChildProcesses |
| if ENV["WEBKIT_TEST_CHILD_PROCESSES"] |
| $numChildProcesses = ENV["WEBKIT_TEST_CHILD_PROCESSES"].to_i |
| $numChildProcessesSetByUser = true |
| else |
| $numChildProcesses = numberOfProcessors |
| end |
| end |
| |
| if ENV["JSCTEST_timeout"] |
| # In the worst case, the processors just interfere with each other. |
| # Increase the timeout proportionally to the number of processors. |
| ENV["JSCTEST_timeout"] = (ENV["JSCTEST_timeout"].to_i.to_f * Math.sqrt($numChildProcesses)).to_i.to_s |
| end |
| |
| # We do not adjust hardTimeout. If we are not producing any results during 1200 seconds, buildbot terminates the tests. So we should terminate hung tests. |
| |
| if !ENV["JSCTEST_memoryLimit"] && $memoryLimited |
| ENV["JSCTEST_memoryLimit"] = (600 * 1024 * 1024).to_s |
| end |
| |
| # Some tests fail if the time zone is not set to US/Pacific |
| # https://webkit.org/b/136363 |
| # Set as done in run-javascript-tests |
| ENV["TZ"] = "US/Pacific"; |
| |
| def runBundle |
| raise unless $bundle |
| |
| testRunner = TestRunner.create($testRunnerType, $runnerDir) |
| evaluator = BundleTestsExecutor.new($runlist, $serialPlans, ITERATION_LIMITS, $treatFailingAsFlaky, testRunner) |
| evaluator.loop |
| end |
| |
| def runTarball |
| raise unless $tarball |
| |
| prepareBundle |
| testRunner = TestRunner.create($testRunnerType, $runnerDir) |
| testRunner.prepare($runlist, $serialPlans, Set.new, nil) |
| compressBundle |
| end |
| |
| def forEachRemote(remoteHosts, options={}, &blk) |
| threads = [] |
| remoteHosts.each_index { |
| | index | |
| remoteHost = remoteHosts[index] |
| threads << Thread.new { |
| blk.call(index, remoteHost) |
| } |
| } |
| |
| etime = nil |
| if options.has_key?(:timeout) |
| etime = Time.now + options[:timeout] |
| end |
| liveRemotes = [] |
| threads.each_index { |
| | index | |
| thread = threads[index] |
| begin |
| if options.has_key?(:timeout) |
| if etime.nil? |
| # If a timeout has been requested and etime is nil, |
| # that means the timeout has expired and we shouldn't |
| # wait at all. |
| timeout = 0 |
| else |
| timeout = etime - Time.now |
| if timeout < 0 |
| timeout = 0 |
| end |
| end |
| if thread.join(timeout).nil? |
| if $verbosity > 0 |
| $stderr.puts("Timeout joining thread for remote #{remoteHosts[index]}") |
| end |
| # Timeout expired, so we can't block waiting for |
| # any other threads. Either they're done or |
| # they've also timed out. |
| etime = nil |
| raise CommandExecutionFailed |
| end |
| else |
| thread.join # No timeout requested, just block. |
| end |
| liveRemotes << remoteHosts[index] |
| rescue CommandExecutionFailed |
| if options[:dropOnFailure] |
| if $verbosity > 0 |
| $stderr.puts("Dropping failed remote #{remoteHosts[index]}") |
| end |
| else |
| raise |
| end |
| end |
| } |
| liveRemotes |
| end |
| |
| def each_chunk(e, chunkSize) |
| accumulator = [] |
| e.each { |element| |
| accumulator << element |
| if accumulator.size == chunkSize |
| yield accumulator |
| accumulator = [] |
| end |
| } |
| if accumulator.size > 0 |
| yield accumulator |
| end |
| end |
| |
| class TestRunnerGnuParallel < TestRunner |
| def prepareGnuParallelRunnerJobs(name, runlist, completedPlans) |
| path = @runnerDir + name |
| FileUtils.mkdir_p(@runnerDir) |
| |
| File.open(path, "w") { |
| | outp | |
| runlist = runlist.select { |plan| |
| not completedPlans.include?(plan) |
| } |
| each_chunk(runlist, $gnuParallelChunkSize) { |plans| |
| job = plans.collect { |plan| |
| "sh ./test_script_#{plan.index}" |
| }.join("; ") |
| outp.puts(job) |
| } |
| } |
| end |
| def prepareRunner(runlist, serialPlans, completedPlans, remoteHosts) |
| prepareGnuParallelRunnerJobs("parallel-tests", runlist, completedPlans | serialPlans) |
| prepareGnuParallelRunnerJobs("serial-tests", serialPlans, completedPlans) |
| end |
| end |
| |
| |
| def withGnuParallelSshWrapper(&blk) |
| Tempfile.open('ssh-wrapper', $runnerDir) { |
| | wrapper | |
| head = |
| <<'EOF' |
| #!/bin/sh |
| |
| remotedir="$1" |
| shift |
| |
| remoteport="$1" |
| shift |
| |
| remoteuser="$1" |
| shift |
| |
| remotehost="$1" |
| shift |
| |
| if test "x$1" != "x--"; then |
| echo "Expected '--' at this position, instead got $1" 1>&2 |
| exit 3 |
| fi |
| shift |
| EOF |
| extraOptions = [ |
| # Many of our jobs are short, ensure GNU parallel won't need to set |
| # up an ssh connection from scratch. |
| "ControlPath=./%C", |
| "ControlMaster=auto", |
| "ControlPersist=10m", |
| # Treat remote boards as volatile. Don't check the host keys, don't |
| # save them locally. |
| "StrictHostKeyChecking=no", |
| "UserKnownHostsFile=/dev/null" |
| ].collect { |opt| |
| ["-o", opt] |
| }.flatten |
| sshOptions = (SSH_OPTIONS_DEFAULT + extraOptions).join(" ") |
| wrapper.puts(head + |
| "echo \"$@\" | ssh #{sshOptions} -p \"$remoteport\" -l \"$remoteuser\" -o RemoteCommand=\"if test -d '$remotedir'; then cd '$remotedir'; else echo '#{PARALLEL_REMOTE_WRAPPER_MARK_BEGIN}${remotehost}#{PARALLEL_REMOTE_WRAPPER_MARK_END}'; false; fi && sh -s\" \"$remotehost\"" |
| ) |
| FileUtils.chmod("ugo=rx", wrapper.path) |
| wrapper.close # Avoid ETXTBUSY |
| blk.call(wrapper.path) |
| } |
| end |
| |
| def withGnuParallelSshLoginFile(remoteHosts, &blk) |
| withGnuParallelSshWrapper { |
| | wrapper | |
| Tempfile.open('slf', $runnerDir) { |
| | tf | |
| remoteHosts.each { |
| | remoteHost | |
| tf.puts("#{wrapper} #{remoteHost.remoteDirectory} #{remoteHost.port} #{remoteHost.user} #{remoteHost.host}") |
| } |
| tf.flush |
| blk.call(tf.path) |
| } |
| } |
| end |
| |
| def unpackBundleOnRemoteHosts(remoteHosts) |
| forEachRemote(remoteHosts, :dropOnFailure => true, :timeout => REMOTE_TIMEOUT) { |
| | _, remoteHost | |
| mysys(["ssh"] + SSH_OPTIONS_DEFAULT + |
| (remoteHost.identity_file_path ? ["-i", remoteHost.identity_file_path] : []) + |
| ["-p", remoteHost.port.to_s, |
| "#{remoteHost.user}@#{remoteHost.host}", |
| "cd #{Shellwords.shellescape(remoteHost.remoteDirectory)} && rm -rf #{$outputDir.basename} && tar xzf #{$tarFileName}"]) |
| } |
| end |
| |
| def prepareRemotes(remoteHosts) |
| # If the preparatory steps fail, drop the remote host from our |
| # list. Otherwise, if it comes back online in the middle of an |
| # iteration, we'll try to run test jobs on it, possibly using |
| # an unrelated bundle from a previous run. |
| remoteHosts = forEachRemote(remoteHosts, {:dropOnFailure => true, :timeout => REMOTE_TIMEOUT}) { |
| | _, remoteHost | |
| getRemoteDirectoryIfNeeded(remoteHost) |
| copyBundleToRemote(remoteHost) |
| } |
| return unpackBundleOnRemoteHosts(remoteHosts) |
| end |
| |
| def runGnuParallelRunner(remoteHosts, inputs, options={}) |
| timeout = 300 |
| if ENV["JSCTEST_timeout"] |
| timeout = ENV["JSCTEST_timeout"].to_f.ceil.to_i |
| end |
| # Keep ncpus + 1 jobs running by default to avoid any stalls due |
| # to ssh latency. |
| parallelJobsOnEachHost = "+1" |
| if $numChildProcessesSetByUser |
| parallelJobsOnEachHost = $numChildProcesses |
| end |
| if options[:parallelJobsOnEachHost] |
| parallelJobsOnEachHost = options[:parallelJobsOnEachHost] |
| end |
| markerWithHost = Regexp.new(".*#{PARALLEL_REMOTE_WRAPPER_MARK_BEGIN}(.*)#{PARALLEL_REMOTE_WRAPPER_MARK_END}.*") |
| markerWithoutHost = Regexp.new(".*#{PARALLEL_REMOTE_STATE_LOST_MARKER}.*") |
| withGnuParallelSshLoginFile(remoteHosts) { |
| | slf | |
| cmd = [ |
| "parallel", |
| "-j", "#{parallelJobsOnEachHost}", |
| # NB: the tests exit with 0 regardless of whether they |
| # passed or failed, so this will only retry tests that we |
| # weren't able to get a result for, likely because the |
| # connection went down or the remote OOM'd/crashed). |
| "--retries", "5", |
| "--line-buffer", # we know our output is line-oriented |
| "--slf", slf, |
| "--timeout", timeout.to_s, |
| "-a", inputs, |
| "if test -e #{$outputDir.basename}/.runner; then cd #{$outputDir.basename}/.runner; else echo #{PARALLEL_REMOTE_STATE_LOST_MARKER}; false; fi && " + |
| exportBaseEnvironmentVariables(false) + |
| $envVars.collect { |var | "export #{var} &&"}.join("") + |
| "sh -c " |
| ] |
| runAndMonitorCommandOutput(cmd) { |
| | pid, line | |
| host = "<unknown host>" |
| md = markerWithoutHost.match(line) |
| if md.nil? |
| md = markerWithHost.match(line) |
| host = md[1] unless md.nil? |
| end |
| if not md.nil? |
| if $verbosity > 0 |
| $stderr.puts("Remote host lost state, triggering high-level retry: #{host}") |
| end |
| # We could try to reprovision this specific remote |
| # host, but that seems needlessly complicated (we |
| # don't expect the remotes to go down every |
| # minute...). Simply kill the GNU parallel process. |
| Process.kill("TERM", pid) |
| end |
| } |
| } |
| end |
| |
| # Unconditionally run the selftests on every execution, they're pretty cheap. |
| ExecutorSelfTests.run |
| TestResultEvaluatorSelfTests.run |
| |
| class NonRemoteTestsExecutor < BaseTestsExecutor |
| def initialize(runlist, serialPlans, maxIterationsBounds, treatFailingAsFlaky, testRunner) |
| super(runlist, serialPlans, |
| nil, |
| maxIterationsBounds, |
| treatFailingAsFlaky |
| ) |
| @testRunner = testRunner |
| end |
| def prepareExecution(remoteHosts) |
| remoteHosts |
| end |
| def refreshExecution(runlist, serialPlans, completedTests, remoteHosts) |
| if not @remoteHosts.nil? |
| raise "remoteHosts #{@remoteHosts}, expected nil" |
| end |
| @testRunner.prepare(runlist, serialPlans, completedTests, remoteHosts) |
| end |
| end |
| |
| class NormalTestsExecutor < NonRemoteTestsExecutor |
| def prepareArtifacts(remoteHosts) |
| raise "remoteHosts not nil" unless remoteHosts.nil? |
| prepareBundle |
| @testRunner.prepare(@runlist, @serialPlans, Set.new, nil) |
| end |
| def executeTests(remoteHosts) |
| runTestRunner(@testRunner, nil, nil) |
| cleanEmptyResultFiles |
| end |
| def updateStatusMap(iteration, statusMap) |
| return getStatusMap(statusMap) |
| end |
| end |
| |
| class BundleTestsExecutor < NonRemoteTestsExecutor |
| def prepareArtifacts(remoteHosts) |
| raise "remoteHosts not nil" unless remoteHosts.nil? |
| cleanRunnerDirectory |
| cleanOldResults |
| end |
| def executeTests(remoteHosts) |
| runTestRunner(@testRunner, nil, nil) |
| cleanEmptyResultFiles |
| end |
| def updateStatusMap(iteration, statusMap) |
| return getStatusMap(statusMap) |
| end |
| end |
| |
| class BaseRemoteTestsExecutor < BaseTestsExecutor |
| def initialize(runlist, serialPlans, remoteHosts, iterationLimits, |
| treatFailingAsFlaky, testRunner) |
| super(runlist, serialPlans, remoteHosts, iterationLimits, treatFailingAsFlaky) |
| @testRunner = testRunner |
| end |
| def updateStatusMap(iteration, statusMap) |
| return getStatusMap(statusMap) |
| end |
| def refreshExecution(runlist, serialPlans, completedTests, remoteHosts) |
| # Note, when running tests remotely, we always prepare tests |
| # for all the hosts when refreshing execution, as even hosts |
| # that went away during testing may have come back online in |
| # the meantime -- so we don't expect a list of live remote |
| # hosts to be passed in. |
| @testRunner.prepare(runlist, serialPlans, completedTests, remoteHosts) |
| end |
| end |
| |
| class RemoteTestsExecutor < BaseRemoteTestsExecutor |
| def prepareArtifacts(remoteHosts) |
| raise "remoteHosts nil" if remoteHosts.nil? |
| prepareBundle |
| |
| # The make runner statically shards the tests, so do a |
| # liveness check before @testRunner.prepare. Otherwise, we'd |
| # always end up scheduling tests on dead remotes and hamper |
| # our progress. |
| remoteHosts = getLiveRemoteHosts(remoteHosts) |
| @testRunner.prepare(@runlist, @serialPlans, Set.new, remoteHosts) |
| compressBundle |
| remoteHosts |
| end |
| def prepareExecution(remoteHosts) |
| return prepareRemotes(remoteHosts) |
| end |
| def refreshExecution(runlist, serialPlans, completedTests, remoteHosts) |
| # Don't assign tests to remotes that are down. |
| remoteHosts = getLiveRemoteHosts(remoteHosts) |
| super(runlist, serialPlans, completedTests, remoteHosts) |
| end |
| def executeTests(remoteHosts) |
| forEachRemote(remoteHosts, :dropOnFailure => true) { |
| | index | |
| runTestRunner(@testRunner, remoteHosts, index) |
| } |
| end |
| private |
| def getLiveRemoteHosts(remoteHosts) |
| # This command is supposed to work on all remote hosts, so |
| # reuse it for the liveness check. |
| cmd = commandToGetNumberOfProcessors |
| forEachRemote(remoteHosts, :dropOnFailure => true, :timeout => REMOTE_TIMEOUT) { |_, remoteHost| |
| begin |
| sshRead(cmd, remoteHost) |
| rescue |
| # It's what forEachRemote considers an expected failure. |
| raise CommandExecutionFailed |
| end |
| } |
| end |
| end |
| |
| class GnuParallelTestsExecutor < BaseRemoteTestsExecutor |
| def prepareArtifacts(remoteHosts) |
| prepareBundle |
| @testRunner.prepare(@runlist, @serialPlans, Set.new, remoteHosts) |
| compressBundle |
| end |
| def prepareExecution(remoteHosts) |
| return prepareRemotes(remoteHosts) |
| end |
| def executeTests(remoteHosts) |
| runGnuParallelRunner(remoteHosts, $runnerDir + "serial-tests", |
| { :parallelJobsOnEachHost => 1}) |
| runGnuParallelRunner(remoteHosts, $runnerDir + "parallel-tests") |
| end |
| end |
| |
| def runNormal |
| raise if $bundle or $tarball |
| |
| testRunner = TestRunner.create($testRunnerType, $runnerDir) |
| evaluator = NormalTestsExecutor.new($runlist, $serialPlans, ITERATION_LIMITS, |
| $treatFailingAsFlaky, testRunner) |
| statusMap = evaluator.loop |
| detectFailures(statusMap) |
| |
| end |
| |
| def runRemote |
| raise unless $remote |
| |
| testRunner = TestRunner.create($testRunnerType, $runnerDir) |
| executor = RemoteTestsExecutor.new($runlist, $serialPlans, $remoteHosts, ITERATION_LIMITS, $treatFailingAsFlaky, testRunner) |
| statusMap = executor.loop |
| detectFailures(statusMap) |
| end |
| |
| def runGnuParallel |
| raise "Can only use --gnu-parallel-runner with --remote*" unless $remote |
| raise "Can only use --gnu-parallel-runner with the default writer" unless $testWriter == "default" |
| testRunner = TestRunner.create($testRunnerType, $runnerDir) |
| executor = GnuParallelTestsExecutor.new($runlist, $serialPlans, |
| $remoteHosts, ITERATION_LIMITS, |
| $treatFailingAsFlaky, testRunner) |
| statusMap = executor.loop |
| detectFailures(statusMap) |
| end |
| |
| puts |
| |
| if $testRunnerType == :gnuparallel |
| raise unless $remote |
| end |
| |
| if $bundle |
| runBundle |
| elsif $remote |
| if $testRunnerType == :gnuparallel |
| runGnuParallel |
| else |
| runRemote |
| end |
| elsif $tarball |
| runTarball |
| else |
| runNormal |
| end |