2733 lines
92 KiB
Ruby
Executable File
2733 lines
92 KiB
Ruby
Executable File
#!/usr/bin/env ruby
|
|
|
|
# Copyright (C) 2013-2021 Apple Inc. All rights reserved.
|
|
#
|
|
# Redistribution and use in source and binary forms, with or without
|
|
# modification, are permitted provided that the following conditions
|
|
# are met:
|
|
#
|
|
# 1. Redistributions of source code must retain the above copyright
|
|
# notice, this list of conditions and the following disclaimer.
|
|
# 2. Redistributions in binary form must reproduce the above copyright
|
|
# notice, this list of conditions and the following disclaimer in the
|
|
# documentation and/or other materials provided with the distribution.
|
|
#
|
|
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
|
|
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
|
|
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
require 'fileutils'
|
|
require 'getoptlong'
|
|
require 'pathname'
|
|
require 'rbconfig'
|
|
require 'tempfile'
|
|
require 'uri'
|
|
require 'yaml'
|
|
|
|
module URI
|
|
class SSH < Generic
|
|
DEFAULT_PORT = 22
|
|
end
|
|
@@schemes['SSH'] = SSH
|
|
end
|
|
|
|
class String
|
|
def scrub
|
|
encode("UTF-16be", :invalid=>:replace, :replace=>"?").encode('UTF-8')
|
|
end
|
|
end
|
|
|
|
RemoteHost = Struct.new(:name, :user, :host, :port, :remoteDirectory)
|
|
|
|
THIS_SCRIPT_PATH = Pathname.new(__FILE__).realpath
|
|
SCRIPTS_PATH = THIS_SCRIPT_PATH.dirname
|
|
WEBKIT_PATH = SCRIPTS_PATH.dirname.dirname
|
|
LAYOUTTESTS_PATH = WEBKIT_PATH + "LayoutTests"
|
|
WASMTESTS_PATH = WEBKIT_PATH + "JSTests/wasm"
|
|
JETSTREAM2_PATH = WEBKIT_PATH + "PerformanceTests/JetStream2"
|
|
CHAKRATESTS_PATH = WEBKIT_PATH + "JSTests/ChakraCore/test"
|
|
raise unless SCRIPTS_PATH.basename.to_s == "Scripts"
|
|
raise unless SCRIPTS_PATH.dirname.basename.to_s == "Tools"
|
|
|
|
HELPERS_PATH = SCRIPTS_PATH + "jsc-stress-test-helpers"
|
|
STATUS_FILE_PREFIX = "test_status_"
|
|
STATUS_FILE_PASS = "P"
|
|
STATUS_FILE_FAIL = "F"
|
|
|
|
# There are all random and adequately large to be unlikely to appear
|
|
# in practice, except as used in this file.
|
|
PARALLEL_REMOTE_WRAPPER_MARK_BEGIN = "5d65329bd1a3"
|
|
PARALLEL_REMOTE_WRAPPER_MARK_END = "a9aea5c3b843"
|
|
PARALLEL_REMOTE_STATE_LOST_MARKER = "709fb7a77c45231918eb118a"
|
|
|
|
begin
|
|
require 'shellwords'
|
|
rescue Exception => e
|
|
$stderr.puts "Warning: did not find shellwords, not running any tests."
|
|
exit 0
|
|
end
|
|
|
|
$canRunDisplayProfilerOutput = false
|
|
|
|
begin
|
|
require 'rubygems'
|
|
require 'json'
|
|
require 'highline'
|
|
$canRunDisplayProfilerOutput = true
|
|
rescue Exception => e
|
|
$stderr.puts "Warning: did not find json or highline; some features will be disabled."
|
|
$stderr.puts "Run \"sudo gem install json highline\" to fix the issue."
|
|
$stderr.puts "Error: #{e.inspect}"
|
|
end
|
|
|
|
def printCommandArray(*cmd)
|
|
begin
|
|
commandArray = cmd.each{|value| Shellwords.shellescape(value.to_s)}.join(' ')
|
|
rescue
|
|
commandArray = cmd.join(' ')
|
|
end
|
|
$stderr.puts ">> #{commandArray}"
|
|
end
|
|
|
|
class CommandExecutionFailed < Exception
|
|
end
|
|
|
|
def mysys(commandArray, options={})
|
|
printCommandArray(commandArray) if $verbosity >= 1
|
|
successful = system(*commandArray)
|
|
status = $?
|
|
if successful or options[:ignoreFailure]
|
|
return
|
|
end
|
|
raise CommandExecutionFailed, "Command failed: #{status.inspect}"
|
|
end
|
|
|
|
def escapeAll(array)
|
|
array.map {
|
|
| v |
|
|
raise "Detected a non-string in #{inspect}" unless v.is_a? String
|
|
Shellwords.shellescape(v)
|
|
}.join(' ')
|
|
end
|
|
|
|
|
|
$jscPath = nil
|
|
$doNotMessWithVMPath = false
|
|
$jitTests = true
|
|
$memoryLimited = false
|
|
$outputDir = Pathname.new("results")
|
|
$verbosity = 0
|
|
$bundle = nil
|
|
$tarball = false
|
|
$tarFileName = "payload.tar.gz"
|
|
$copyVM = false
|
|
$testRunnerType = nil
|
|
$testWriter = "default"
|
|
$remoteHosts = []
|
|
$architecture = nil
|
|
$forceArchitecture = nil
|
|
$hostOS = nil
|
|
$model = nil
|
|
$filter = nil
|
|
$envVars = []
|
|
$mode = "full"
|
|
$buildType = "release"
|
|
$forceCollectContinuously = false
|
|
$reportExecutionTime = false
|
|
$ldd = nil
|
|
$artifact_exec_wrapper = nil
|
|
$numChildProcessesSetByUser = false
|
|
$runUniqueId = Random.new.bytes(16).unpack("H*")[0]
|
|
|
|
def usage
|
|
puts "run-jsc-stress-tests -j <shell path> <collections path> [<collections path> ...]"
|
|
puts
|
|
puts "--jsc (-j) Path to JavaScriptCore build product. This option is required."
|
|
puts "--no-copy Do not copy the JavaScriptCore build product before testing."
|
|
puts " --jsc specifies an already present JavaScriptCore to test."
|
|
puts "--memory-limited Indicate that we are targeting the test for a memory limited device."
|
|
puts " Skip tests tagged with //@skip if $memoryLimited"
|
|
puts "--no-jit Do not run JIT specific tests."
|
|
puts "--force-collectContinuously Enable the collectContinuously mode even if disabled on this"
|
|
puts " platform."
|
|
puts "--output-dir (-o) Path where to put results. Default is #{$outputDir}."
|
|
puts "--verbose (-v) Print more things while running."
|
|
puts "--run-bundle Runs a bundle previously created by run-jsc-stress-tests."
|
|
puts "--tarball [fileName] Creates a tarball of the final bundle. Use name if supplied for tar file."
|
|
puts "--arch Specify architecture instead of determining from JavaScriptCore build."
|
|
puts "--force-architecture Override the architecture to run tests with."
|
|
puts " e.g. x86, x86_64, arm."
|
|
puts "--ldd Use alternate ldd"
|
|
puts "--artifact-exec-wrapper Wrapper for executing a build artifact"
|
|
puts "--os Specify os instead of determining from JavaScriptCore build."
|
|
puts " e.g. darwin, linux & windows."
|
|
puts "--shell-runner Uses the shell-based test runner instead of the default make-based runner."
|
|
puts " In general the shell runner is slower than the make runner."
|
|
puts "--make-runner Uses the faster make-based runner."
|
|
puts "--ruby-runner Uses the ruby runner for machines without unix shell or make."
|
|
puts "--test-writer [writer] Specifies the test script format."
|
|
puts " default is to use shell scripts to run the tests"
|
|
puts " \"ruby\" to use ruby scripts for systems without a unix shell."
|
|
puts "--remote Specify a remote host on which to run tests from command line argument."
|
|
puts "--remote-config-file Specify a remote host on which to run tests from JSON file."
|
|
puts "--report-execution-time Print execution time for each test."
|
|
puts "--child-processes (-c) Specify the number of child processes."
|
|
puts "--filter Only run tests whose name matches the given regular expression."
|
|
puts "--help (-h) Print this message."
|
|
puts "--env-vars Add a list of environment variables to set before running jsc."
|
|
puts " Each environment variable should be separated by a space."
|
|
puts " e.g. \"foo=bar x=y\" (no quotes). Note, if you pass DYLD_FRAMEWORK_PATH"
|
|
puts " it will override the default value."
|
|
puts "--quick (-q) Only run with the default and no-cjit-validate modes."
|
|
puts "--basic Run with default and these additional modes: no-llint,"
|
|
puts " no-cjit-validate-phases, no-cjit-collect-continuously, dfg-eager"
|
|
puts " and for FTL platforms: no-ftl, ftl-eager-no-cjit and"
|
|
puts " ftl-no-cjit-small-pool."
|
|
exit 1
|
|
end
|
|
|
|
jscArg = nil
|
|
|
|
GetoptLong.new(['--help', '-h', GetoptLong::NO_ARGUMENT],
|
|
['--jsc', '-j', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--no-copy', GetoptLong::NO_ARGUMENT],
|
|
['--memory-limited', GetoptLong::NO_ARGUMENT],
|
|
['--no-jit', GetoptLong::NO_ARGUMENT],
|
|
['--force-collectContinuously', GetoptLong::NO_ARGUMENT],
|
|
['--output-dir', '-o', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--run-bundle', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--tarball', GetoptLong::OPTIONAL_ARGUMENT],
|
|
['--force-vm-copy', GetoptLong::NO_ARGUMENT],
|
|
['--arch', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--force-architecture', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--ldd', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--artifact-exec-wrapper', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--os', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--shell-runner', GetoptLong::NO_ARGUMENT],
|
|
['--make-runner', GetoptLong::NO_ARGUMENT],
|
|
['--ruby-runner', GetoptLong::NO_ARGUMENT],
|
|
['--gnu-parallel-runner', GetoptLong::NO_ARGUMENT],
|
|
['--test-writer', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--remote', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--remote-config-file', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--report-execution-time', GetoptLong::NO_ARGUMENT],
|
|
['--model', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--child-processes', '-c', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--filter', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--verbose', '-v', GetoptLong::NO_ARGUMENT],
|
|
['--env-vars', GetoptLong::REQUIRED_ARGUMENT],
|
|
['--debug', GetoptLong::NO_ARGUMENT],
|
|
['--release', GetoptLong::NO_ARGUMENT],
|
|
['--quick', '-q', GetoptLong::NO_ARGUMENT],
|
|
['--basic', GetoptLong::NO_ARGUMENT]).each {
|
|
| opt, arg |
|
|
case opt
|
|
when '--help'
|
|
usage
|
|
when '--jsc'
|
|
jscArg = arg
|
|
when '--no-copy'
|
|
$doNotMessWithVMPath = true
|
|
when '--output-dir'
|
|
$outputDir = Pathname.new(arg)
|
|
when '--memory-limited'
|
|
$memoryLimited = true
|
|
when '--no-jit'
|
|
$jitTests = false
|
|
when '--force-collectContinuously'
|
|
$forceCollectContinuously = true;
|
|
when '--verbose'
|
|
$verbosity += 1
|
|
when '--run-bundle'
|
|
$bundle = Pathname.new(arg)
|
|
when '--tarball'
|
|
$tarball = true
|
|
$copyVM = true
|
|
$tarFileName = arg unless arg == ''
|
|
when '--force-vm-copy'
|
|
$copyVM = true
|
|
when '--shell-runner'
|
|
$testRunnerType = :shell
|
|
when '--make-runner'
|
|
$testRunnerType = :make
|
|
when '--ruby-runner'
|
|
$testRunnerType = :ruby
|
|
when '--gnu-parallel-runner'
|
|
$testRunnerType = :gnuparallel
|
|
when '--test-writer'
|
|
$testWriter = arg
|
|
when '--remote'
|
|
$copyVM = true
|
|
$tarball = true
|
|
$remote = true
|
|
uri = URI("ssh://" + arg)
|
|
$remoteHosts << RemoteHost.new("default-#{$remoteHosts.length}", uri.user, uri.host, uri.port)
|
|
when '--remote-config-file'
|
|
$remoteConfigFile = arg
|
|
when '--report-execution-time'
|
|
$reportExecutionTime = true
|
|
when '--child-processes'
|
|
$numChildProcesses = arg.to_i
|
|
$numChildProcessesSetByUser = true
|
|
when '--filter'
|
|
$filter = Regexp.new(arg)
|
|
when '--arch'
|
|
$architecture = arg
|
|
when '--force-architecture'
|
|
$architecture = arg unless $architecture
|
|
$forceArchitecture = arg
|
|
when '--ldd'
|
|
$ldd = arg
|
|
when '--artifact-exec-wrapper'
|
|
$artifact_exec_wrapper = arg
|
|
when '--os'
|
|
$hostOS = arg
|
|
when '--model'
|
|
$model = arg.gsub(/\A['"]+|['"]+\Z/, '')
|
|
when '--env-vars'
|
|
$envVars = arg.gsub(/\s+/, ' ').split(' ')
|
|
when '--quick'
|
|
$mode = "quick"
|
|
when '--basic'
|
|
$mode = "basic"
|
|
when '--debug'
|
|
$buildType = "debug"
|
|
when '--release'
|
|
$buildType = "release"
|
|
end
|
|
}
|
|
|
|
if $remoteConfigFile
|
|
file = File.read($remoteConfigFile)
|
|
config = JSON.parse(file)
|
|
|
|
# old style config allowing for only one remote
|
|
if !$remote and config['remote']
|
|
$copyVM = true
|
|
$tarball = true
|
|
$remote = true
|
|
uri = URI("ssh://" + config['remote'])
|
|
$remoteHosts = [ RemoteHost.new("default", uri.user, uri.host, uri.port) ]
|
|
if config['remoteDirectory']
|
|
$remoteHosts[0].remoteDirectory = config['remoteDirectory']
|
|
end
|
|
end
|
|
|
|
# we can combine --remote and a new style config
|
|
if config['remotes']
|
|
$copyVM = true
|
|
$tarball = true
|
|
$remote = true
|
|
$remoteHosts += config['remotes'].map {
|
|
| remote |
|
|
uri = URI("ssh://" + remote['address'])
|
|
|
|
host = RemoteHost.new(remote['name'], uri.user, uri.host, uri.port)
|
|
if remote['remoteDirectory']
|
|
host.remoteDirectory = remote['remoteDirectory']
|
|
end
|
|
host
|
|
}
|
|
end
|
|
end
|
|
|
|
unless jscArg
|
|
# If we're not provided a JSC path, try to come up with a sensible JSC path automagically.
|
|
command = SCRIPTS_PATH.join("webkit-build-directory").to_s
|
|
command += ($buildType == "release") ? " --release" : " --debug"
|
|
command += " --executablePath"
|
|
|
|
output = `#{command}`.split("\n")
|
|
if !output.length
|
|
$stderr.puts "Error: must specify --jsc <path>"
|
|
exit 1
|
|
end
|
|
|
|
output = output[0]
|
|
jscArg = Pathname.new(output).join("jsc")
|
|
jscArg = Pathname.new(output).join("JavaScriptCore.framework", "Helpers", "jsc") if !File.file?(jscArg)
|
|
jscArg = Pathname.new(output).join("bin", "jsc") if !File.file?(jscArg) # Support CMake build.
|
|
if !File.file?(jscArg)
|
|
$stderr.puts "Error: must specify --jsc <path>"
|
|
exit 1
|
|
end
|
|
|
|
puts "Using the following jsc path: #{jscArg}"
|
|
end
|
|
|
|
if $doNotMessWithVMPath
|
|
$jscPath = Pathname.new(jscArg)
|
|
else
|
|
$jscPath = Pathname.new(jscArg).realpath
|
|
end
|
|
|
|
$progressMeter = ($verbosity == 0 and $stdout.tty? and $remoteHosts.length <= 1)
|
|
|
|
if $bundle
|
|
$jscPath = $bundle + ".vm" + "JavaScriptCore.framework" + "Helpers" + "jsc"
|
|
$outputDir = $bundle
|
|
end
|
|
|
|
# Try to determine architecture. Return nil on failure.
|
|
def machOArchitectureCode
|
|
begin
|
|
otoolLines = `otool -afh #{Shellwords.shellescape($jscPath.to_s)}`.split("\n")
|
|
otoolLines.each_with_index {
|
|
| value, index |
|
|
if value =~ /magic/ and value =~ /cputype/
|
|
return otoolLines[index + 1].split[1].to_i
|
|
end
|
|
}
|
|
rescue
|
|
$stderr.puts "Warning: unable to execute otool."
|
|
end
|
|
$stderr.puts "Warning: unable to determine architecture."
|
|
nil
|
|
end
|
|
|
|
def determineArchitectureFromMachOBinary
|
|
code = machOArchitectureCode
|
|
return nil unless code
|
|
is64BitFlag = 0x01000000
|
|
case code
|
|
when 7
|
|
"x86"
|
|
when 7 | is64BitFlag
|
|
"x86-64"
|
|
when 12
|
|
"arm"
|
|
when 12 | is64BitFlag
|
|
"arm64"
|
|
else
|
|
$stderr.puts "Warning: unable to determine architecture from code: #{code}"
|
|
nil
|
|
end
|
|
end
|
|
|
|
def determineArchitectureFromELFBinary
|
|
f = File.open($jscPath.to_s)
|
|
data = f.read(20)
|
|
|
|
if !(data[0,4] == "\x7F\x45\x4C\x46")
|
|
$stderr.puts "Warning: Missing ELF magic in file #{Shellwords.shellescape($jscPath.to_s)}"
|
|
return nil
|
|
end
|
|
|
|
# MIPS and PowerPC may be either big- or little-endian. S390 (which includes
|
|
# S390x) is big-endian. The rest are little-endian.
|
|
# For RISC-V, to avoid encoding problems, construct the comparison string
|
|
# by packing a char array matching the ELF's RISC-V machine value.
|
|
code = data[18, 20]
|
|
case code
|
|
when "\x03\0"
|
|
"x86"
|
|
when "\x08\0"
|
|
"mips"
|
|
when "\0\x08"
|
|
"mips"
|
|
when "\x14\0"
|
|
"powerpc"
|
|
when "\0\x14"
|
|
"powerpc"
|
|
when "\x15\0"
|
|
"powerpc64"
|
|
when "\0\x15"
|
|
"powerpc64"
|
|
when "\0\x16"
|
|
"s390"
|
|
when "\x28\0"
|
|
"arm"
|
|
when "\x3E\0"
|
|
"x86-64"
|
|
when "\xB7\0"
|
|
"arm64"
|
|
when [243, 0].pack("cc")
|
|
"riscv64"
|
|
else
|
|
$stderr.puts "Warning: unable to determine architecture from code: #{code}"
|
|
nil
|
|
end
|
|
end
|
|
|
|
def determineArchitectureFromPEBinary
|
|
f = File.open($jscPath.to_s)
|
|
data = f.read(1024)
|
|
|
|
if !(data[0, 2] == "MZ")
|
|
$stderr.puts "Warning: Missing PE magic in file #{Shellwords.shellescape($jscPath.to_s)}"
|
|
return nil
|
|
end
|
|
|
|
peHeaderAddr = data[0x3c, 4].unpack('V').first # 32-bit unsigned int little endian
|
|
|
|
if !(data[peHeaderAddr, 4] == "PE\0\0")
|
|
$stderr.puts "Warning: Incorrect PE header in file #{Shellwords.shellescape($jscPath.to_s)}"
|
|
return nil
|
|
end
|
|
|
|
machine = data[peHeaderAddr + 4, 2].unpack('v').first # 16-bit unsigned short, little endian
|
|
|
|
case machine
|
|
when 0x014c
|
|
"x86"
|
|
when 0x8664
|
|
"x86-64"
|
|
else
|
|
$stderr.puts "Warning: unsupported machine type: #{machine}"
|
|
nil
|
|
end
|
|
end
|
|
|
|
def determineArchitecture
|
|
case $hostOS
|
|
when "darwin"
|
|
determineArchitectureFromMachOBinary
|
|
when "linux"
|
|
determineArchitectureFromELFBinary
|
|
when "windows"
|
|
determineArchitectureFromPEBinary
|
|
when "playstation"
|
|
"x86-64"
|
|
else
|
|
$stderr.puts "Warning: unable to determine architecture on this platform."
|
|
nil
|
|
end
|
|
end
|
|
|
|
def determineOS
|
|
case RbConfig::CONFIG["host_os"]
|
|
when /darwin/i
|
|
"darwin"
|
|
when /linux/i
|
|
"linux"
|
|
when /mswin|mingw|cygwin/
|
|
"windows"
|
|
else
|
|
$stderr.puts "Warning: unable to determine host operating system"
|
|
nil
|
|
end
|
|
end
|
|
|
|
$hostOS = determineOS unless $hostOS
|
|
$architecture = determineArchitecture unless $architecture
|
|
$isFTLPlatform = !($architecture == "x86" || $architecture == "arm" || $architecture == "mips" || $architecture == "riscv64" || $hostOS == "windows" || $hostOS == "playstation")
|
|
|
|
if $architecture == "x86"
|
|
# The JIT is temporarily disabled on this platform since
|
|
# https://trac.webkit.org/changeset/237547
|
|
$jitTests = false
|
|
end
|
|
|
|
def isFTLEnabled
|
|
$jitTests && $isFTLPlatform
|
|
end
|
|
|
|
if !$testRunnerType
|
|
if $remote and $hostOS == "darwin"
|
|
$testRunnerType = :shell
|
|
else
|
|
$testRunnerType = :make
|
|
end
|
|
end
|
|
|
|
if $remoteHosts.length > 1 and ($testRunnerType != :make) and ($testRunnerType != :gnuparallel)
|
|
raise "Multiple remote hosts only supported with the make or gnu-parallel runners"
|
|
end
|
|
|
|
if $hostOS == "playstation" && $testWriter == "default"
|
|
$testWriter = "playstation"
|
|
end
|
|
|
|
if $testWriter
|
|
if /[^-a-zA-Z0-9_]/.match($testWriter)
|
|
raise "Invalid test writer #{$testWriter} given"
|
|
end
|
|
end
|
|
|
|
# We force all tests to use a smaller (1.5M) stack so that stack overflow tests can run faster.
|
|
BASE_OPTIONS = ["--useFTLJIT=false", "--useFunctionDotArguments=true", "--validateExceptionChecks=true", "--useDollarVM=true", "--maxPerThreadStackUsage=1572864"]
|
|
EAGER_OPTIONS = ["--thresholdForJITAfterWarmUp=10", "--thresholdForJITSoon=10", "--thresholdForOptimizeAfterWarmUp=20", "--thresholdForOptimizeAfterLongWarmUp=20", "--thresholdForOptimizeSoon=20", "--thresholdForFTLOptimizeAfterWarmUp=20", "--thresholdForFTLOptimizeSoon=20", "--thresholdForOMGOptimizeAfterWarmUp=20", "--thresholdForOMGOptimizeSoon=20", "--maximumEvalCacheableSourceLength=150000", "--useEagerCodeBlockJettisonTiming=true", "--repatchBufferingCountdown=0"]
|
|
# NOTE: Tests rely on this using scribbleFreeCells.
|
|
NO_CJIT_OPTIONS = ["--useConcurrentJIT=false", "--thresholdForJITAfterWarmUp=100", "--scribbleFreeCells=true"]
|
|
B3O1_OPTIONS = ["--defaultB3OptLevel=1", "--useDataICInOptimizingJIT=1"]
|
|
B3O0_OPTIONS = ["--maxDFGNodesInBasicBlockForPreciseAnalysis=100", "--defaultB3OptLevel=0"]
|
|
FTL_OPTIONS = ["--useFTLJIT=true"]
|
|
FORCE_LLINT_EXIT_OPTIONS = ["--forceOSRExitToLLInt=true"]
|
|
EXECUTABLE_FUZZER_OPTIONS = ["--useExecutableAllocationFuzz=true", "--fireExecutableAllocationFuzzRandomly=true"]
|
|
|
|
require_relative "webkitruby/jsc-stress-test-writer-#{$testWriter}"
|
|
|
|
def shouldCollectContinuously?
|
|
$buildType == "release" or $forceCollectContinuously
|
|
end
|
|
|
|
COLLECT_CONTINUOUSLY_OPTIONS = shouldCollectContinuously? ? ["--collectContinuously=true", "--useGenerationalGC=false", "--verifyGC=true"] : []
|
|
|
|
$serialRunlist = []
|
|
$runlist = []
|
|
|
|
def frameworkFromJSCPath(jscPath)
|
|
parentDirectory = jscPath.dirname
|
|
if (parentDirectory.basename.to_s == "Resources" or parentDirectory.basename.to_s == "Helpers") and parentDirectory.dirname.basename.to_s == "JavaScriptCore.framework"
|
|
parentDirectory.dirname
|
|
elsif $hostOS == "playstation"
|
|
jscPath.dirname
|
|
elsif parentDirectory.basename.to_s =~ /^Debug/ or parentDirectory.basename.to_s =~ /^Release/
|
|
jscPath.dirname + "JavaScriptCore.framework"
|
|
else
|
|
$stderr.puts "Warning: cannot identify JSC framework, doing generic VM copy."
|
|
nil
|
|
end
|
|
end
|
|
|
|
def pathToBundleResourceFromBenchmarkDirectory(resourcePath)
|
|
dir = Pathname.new(".")
|
|
$benchmarkDirectory.each_filename {
|
|
| pathComponent |
|
|
dir += ".."
|
|
}
|
|
dir + resourcePath
|
|
end
|
|
|
|
def pathToVM
|
|
pathToBundleResourceFromBenchmarkDirectory($jscPath)
|
|
end
|
|
|
|
def vmCommand
|
|
cmd = [pathToVM.to_s]
|
|
if not $artifact_exec_wrapper.nil?
|
|
cmd.unshift($artifact_exec_wrapper)
|
|
end
|
|
if ($forceArchitecture)
|
|
cmd = ["/usr/bin/arch", "-" + $forceArchitecture] + cmd
|
|
end
|
|
return cmd
|
|
end
|
|
|
|
def pathToHelpers
|
|
pathToBundleResourceFromBenchmarkDirectory(".helpers")
|
|
end
|
|
|
|
$runCommandOptions = {}
|
|
$testSpecificRequiredOptions = []
|
|
|
|
$uniqueFilenameCounter = 0
|
|
def uniqueFilename(extension)
|
|
payloadDir = $outputDir + "_payload"
|
|
Dir.mkdir payloadDir unless payloadDir.directory?
|
|
result = payloadDir.realpath + "temp-#{$uniqueFilenameCounter}#{extension}"
|
|
$uniqueFilenameCounter += 1
|
|
result
|
|
end
|
|
|
|
def baseOutputName(kind)
|
|
"#{$collectionName}/#{$benchmark}.#{kind}"
|
|
end
|
|
|
|
def addRunCommand(kind, command, outputHandler, errorHandler, *additionalEnv)
|
|
$didAddRunCommand = true
|
|
name = baseOutputName(kind)
|
|
if $filter and name !~ $filter
|
|
return
|
|
end
|
|
plan = Plan.new(
|
|
$benchmarkDirectory, command, "#{$collectionName}/#{$benchmark}", name, outputHandler,
|
|
errorHandler)
|
|
plan.additionalEnv.push(*additionalEnv)
|
|
if $runCommandOptions[:serial]
|
|
# Add this to the list of tests to be run on their own, so
|
|
# that we can treat them specially when scheduling, but keep
|
|
# it in the $runlist for code that dosn't care about
|
|
# scheduling.
|
|
$serialRunlist << plan
|
|
end
|
|
|
|
if $numChildProcesses > 1 and $runCommandOptions[:isSlow]
|
|
$runlist.unshift plan
|
|
else
|
|
$runlist << plan
|
|
end
|
|
end
|
|
|
|
# Returns true if there were run commands found in the file ($benchmarkDirectory +
|
|
# $benchmark), in which case those run commands have already been executed. Otherwise
|
|
# returns false, in which case you're supposed to add your own run commands.
|
|
def parseRunCommands
|
|
oldDidAddRunCommand = $didAddRunCommand
|
|
$didAddRunCommand = false
|
|
$skipped = false
|
|
|
|
Dir.chdir($outputDir) {
|
|
File.open($benchmarkDirectory + $benchmark) {
|
|
| inp |
|
|
inp.each_line {
|
|
| line |
|
|
begin
|
|
doesMatch = line =~ /^\/\/@/
|
|
rescue Exception => e
|
|
# Apparently this happens in the case of some UTF8 stuff in some files, where
|
|
# Ruby tries to be strict and throw exceptions.
|
|
next
|
|
end
|
|
next unless doesMatch
|
|
eval $~.post_match
|
|
if $skipped
|
|
break
|
|
end
|
|
}
|
|
}
|
|
}
|
|
|
|
result = $didAddRunCommand
|
|
$didAddRunCommand = result or oldDidAddRunCommand
|
|
result
|
|
end
|
|
|
|
def slow!
|
|
$runCommandOptions[:isSlow] = true
|
|
skip() if ($mode == "quick")
|
|
end
|
|
|
|
def crashOK!
|
|
$testSpecificRequiredOptions += ["-s"]
|
|
$runCommandOptions[:crashOK] = true
|
|
end
|
|
|
|
def serial!
|
|
$runCommandOptions[:serial] = true
|
|
end
|
|
|
|
def requireOptions(*options)
|
|
$testSpecificRequiredOptions += options
|
|
end
|
|
|
|
def runWithOutputHandler(kind, outputHandler, *options)
|
|
addRunCommand(kind, vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions + [$benchmark.to_s], outputHandler, simpleErrorHandler)
|
|
end
|
|
|
|
def runWithOutputHandlerWithoutBaseOption(kind, outputHandler, *options)
|
|
addRunCommand(kind, vmCommand + options + $testSpecificRequiredOptions + [$benchmark.to_s], outputHandler, simpleErrorHandler)
|
|
end
|
|
|
|
def run(kind, *options)
|
|
runWithOutputHandler(kind, silentOutputHandler, *options)
|
|
end
|
|
|
|
def runWithoutBaseOption(kind, *options)
|
|
runWithOutputHandlerWithoutBaseOption(kind, silentOutputHandler, *options)
|
|
end
|
|
|
|
def runNoFTL(*optionalTestSpecificOptions)
|
|
run("no-ftl", *optionalTestSpecificOptions)
|
|
end
|
|
|
|
def runWithRAMSize(size, *optionalTestSpecificOptions)
|
|
run("ram-size-#{size}", "--forceRAMSize=#{size}", *optionalTestSpecificOptions)
|
|
end
|
|
|
|
def runOneLargeHeap(*optionalTestSpecificOptions)
|
|
if $memoryLimited
|
|
$didAddRunCommand = true
|
|
puts "Skipping #{$collectionName}/#{$benchmark}"
|
|
else
|
|
run("default", *optionalTestSpecificOptions)
|
|
end
|
|
end
|
|
|
|
def runNoJIT(*optionalTestSpecificOptions)
|
|
run("no-jit", "--useJIT=false", *optionalTestSpecificOptions)
|
|
end
|
|
|
|
def runNoLLInt(*optionalTestSpecificOptions)
|
|
if $jitTests
|
|
run("no-llint", "--useLLInt=false", *optionalTestSpecificOptions)
|
|
end
|
|
end
|
|
|
|
# NOTE: Tests rely on this using scribbleFreeCells.
|
|
def runNoCJITValidate(*optionalTestSpecificOptions)
|
|
run("no-cjit", "--validateBytecode=true", "--validateGraph=true", *(NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runNoCJITValidatePhases(*optionalTestSpecificOptions)
|
|
run("no-cjit-validate-phases", "--validateBytecode=true", "--validateGraphAtEachPhase=true", "--useSourceProviderCache=false", "--useRandomizingExecutableIslandAllocation=true", *(NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runNoCJITCollectContinuously(*optionalTestSpecificOptions)
|
|
run("no-cjit-collect-continuously", *(NO_CJIT_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runDefault(*optionalTestSpecificOptions)
|
|
run("default", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
# FIXME: need to fix https://bugs.webkit.org/show_bug.cgi?id=218703 to enable this on Linux/MIPS.
|
|
def runBytecodeCacheImpl(optionalTestSpecificOptions, *additionalEnv)
|
|
if ($hostOS == "darwin")
|
|
fileTemplate = "bytecode-cache"
|
|
elsif ($hostOS == "linux" && $architecture != "mips")
|
|
fileTemplate = "bytecode-cacheXXXXXX"
|
|
else
|
|
skip
|
|
return
|
|
end
|
|
|
|
options = BASE_OPTIONS + FTL_OPTIONS + optionalTestSpecificOptions + $testSpecificRequiredOptions
|
|
addRunCommand("bytecode-cache", ["sh", (pathToHelpers + "bytecode-cache-test-helper.sh").to_s, fileTemplate.to_s, *vmCommand, $benchmark.to_s] + options, silentOutputHandler, simpleErrorHandler, *additionalEnv)
|
|
end
|
|
|
|
def runBytecodeCache(*optionalTestSpecificOptions)
|
|
runBytecodeCacheImpl(optionalTestSpecificOptions)
|
|
end
|
|
|
|
def runBytecodeCacheNoAssertion(*optionalTestSpecificOptions)
|
|
runBytecodeCacheImpl(optionalTestSpecificOptions, "JSC_forceDiskCache=false")
|
|
end
|
|
|
|
def runFTLNoCJIT(*optionalTestSpecificOptions)
|
|
run("misc-ftl-no-cjit", "--useDataIC=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLNoCJITB3O0(*optionalTestSpecificOptions)
|
|
run("ftl-no-cjit-b3o0", "--useArrayAllocationProfiling=false", "--forcePolyProto=true", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + B3O0_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLNoCJITValidate(*optionalTestSpecificOptions)
|
|
run("ftl-no-cjit-validate-sampling-profiler", "--validateGraph=true", "--validateBCE=true", "--useSamplingProfiler=true", "--airForceIRCAllocator=true", "--useDataIC=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLNoCJITNoPutStackValidate(*optionalTestSpecificOptions)
|
|
run("ftl-no-cjit-no-put-stack-validate", "--validateGraph=true", "--usePutStackSinking=false", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLNoCJITNoInlineValidate(*optionalTestSpecificOptions)
|
|
run("ftl-no-cjit-no-inline-validate", "--validateGraph=true", "--maximumInliningDepth=1", "--airForceBriggsAllocator=true", "--useB3HoistLoopInvariantValues=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLNoCJITOSRValidation(*optionalTestSpecificOptions)
|
|
run("ftl-no-cjit-osr-validation", "--validateFTLOSRExitLiveness=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runDFGEager(*optionalTestSpecificOptions)
|
|
run("dfg-eager", *(EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runDFGEagerNoCJITValidate(*optionalTestSpecificOptions)
|
|
run("dfg-eager-no-cjit-validate", "--validateGraph=true", *(NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLEager(*optionalTestSpecificOptions)
|
|
run("ftl-eager", "--airForceBriggsAllocator=true", "--useRandomizingExecutableIslandAllocation=true", "--forcePolyProto=true", "--useDataIC=true", *(FTL_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLEagerWatchdog(*optionalTestSpecificOptions)
|
|
timeout = rand(100)
|
|
run("ftl-eager-watchdog-#{timeout}", "--watchdog=#{timeout}", "--watchdog-exception-ok", *(FTL_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLEagerNoCJITValidate(*optionalTestSpecificOptions)
|
|
run("ftl-eager-no-cjit", "--validateGraph=true", "--validateBCE=true", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + EXECUTABLE_FUZZER_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLEagerNoCJITB3O1(*optionalTestSpecificOptions)
|
|
run("ftl-eager-no-cjit-b3o1", "--validateGraph=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + B3O1_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLEagerNoCJITOSRValidation(*optionalTestSpecificOptions)
|
|
run("ftl-eager-no-cjit-osr-validation", "--validateFTLOSRExitLiveness=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runNoCJITNoASO(*optionalTestSpecificOptions)
|
|
run("no-cjit-no-aso", "--useArchitectureSpecificOptimizations=false", *(NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runNoCJITNoAccessInlining(*optionalTestSpecificOptions)
|
|
run("no-cjit-no-access-inlining", "--useAccessInlining=false", *(NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLNoCJITNoAccessInlining(*optionalTestSpecificOptions)
|
|
run("ftl-no-cjit-no-access-inlining", "--useAccessInlining=false", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runFTLNoCJITSmallPool(*optionalTestSpecificOptions)
|
|
run("ftl-no-cjit-small-pool", "--jitMemoryReservationSize=102400", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runNoCJIT(*optionalTestSpecificOptions)
|
|
run("no-cjit", *(NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runEagerJettisonNoCJIT(*optionalTestSpecificOptions)
|
|
run("eager-jettison-no-cjit", "--useRandomizingExecutableIslandAllocation=true", "--forceCodeBlockToJettisonDueToOldAge=true", "--verifyGC=true", *(NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def runShadowChicken(*optionalTestSpecificOptions)
|
|
run("shadow-chicken", "--useDFGJIT=false", "--alwaysUseShadowChicken=true", *optionalTestSpecificOptions)
|
|
end
|
|
|
|
def runMiniMode(*optionalTestSpecificOptions)
|
|
run("mini-mode", "--forceMiniVMMode=true", *optionalTestSpecificOptions)
|
|
end
|
|
|
|
def runLogicalAssignmentOperatorsEnabled(*optionalTestSpecificOptions)
|
|
run("logical-assignment-operators-enabled", "--useLogicalAssignmentOperators=true" , *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
end
|
|
|
|
def defaultRun
|
|
if $mode == "quick"
|
|
defaultQuickRun
|
|
else
|
|
runDefault
|
|
runBytecodeCache
|
|
runMiniMode
|
|
if $jitTests
|
|
runNoLLInt
|
|
runNoCJITValidatePhases
|
|
runNoCJITCollectContinuously if shouldCollectContinuously?
|
|
runDFGEager
|
|
if $mode != "basic"
|
|
runDFGEagerNoCJITValidate
|
|
runEagerJettisonNoCJIT
|
|
end
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runNoFTL
|
|
runFTLEager
|
|
runFTLEagerNoCJITValidate if $buildType == "release"
|
|
runFTLNoCJITSmallPool
|
|
|
|
return if $mode == "basic"
|
|
|
|
runFTLNoCJITValidate
|
|
runFTLNoCJITB3O0
|
|
runFTLNoCJITNoPutStackValidate
|
|
runFTLNoCJITNoInlineValidate
|
|
runFTLEagerNoCJITB3O1
|
|
end
|
|
end
|
|
end
|
|
|
|
def defaultNoNoLLIntRun
|
|
if $mode == "quick"
|
|
defaultQuickRun
|
|
else
|
|
runDefault
|
|
if $jitTests
|
|
runNoCJITValidatePhases
|
|
runNoCJITCollectContinuously if shouldCollectContinuously?
|
|
runDFGEager
|
|
if $mode != "basic"
|
|
runDFGEagerNoCJITValidate
|
|
runEagerJettisonNoCJIT
|
|
end
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runNoFTL
|
|
runFTLNoCJITValidate
|
|
runFTLNoCJITSmallPool
|
|
|
|
return if $mode == "basic"
|
|
|
|
runFTLNoCJITB3O0
|
|
runFTLNoCJITNoPutStackValidate
|
|
runFTLNoCJITNoInlineValidate
|
|
runFTLEager
|
|
runFTLEagerNoCJITValidate if $buildType == "release"
|
|
end
|
|
end
|
|
end
|
|
|
|
def defaultQuickRun
|
|
runDefault
|
|
if $jitTests
|
|
runNoCJITValidate
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runNoFTL
|
|
runFTLNoCJITValidate
|
|
end
|
|
end
|
|
|
|
def defaultSpotCheckNoMaximalFlush
|
|
defaultQuickRun
|
|
runNoCJITNoAccessInlining
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runFTLNoCJITOSRValidation
|
|
runFTLNoCJITNoAccessInlining
|
|
runFTLNoCJITB3O0
|
|
end
|
|
|
|
def defaultSpotCheck
|
|
defaultSpotCheckNoMaximalFlush
|
|
runEagerJettisonNoCJIT
|
|
end
|
|
|
|
# This is expected to not do eager runs because eager runs can have a lot of recompilations
|
|
# for reasons that don't arise in the real world. It's used for tests that assert convergence
|
|
# by counting recompilations.
|
|
def defaultNoEagerRun(*optionalTestSpecificOptions)
|
|
runDefault(*optionalTestSpecificOptions)
|
|
if $jitTests
|
|
runNoLLInt(*optionalTestSpecificOptions)
|
|
runNoCJITValidatePhases(*optionalTestSpecificOptions)
|
|
runNoCJITCollectContinuously(*optionalTestSpecificOptions) if shouldCollectContinuously?
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runNoFTL(*optionalTestSpecificOptions)
|
|
runFTLNoCJITValidate(*optionalTestSpecificOptions)
|
|
|
|
return if $mode == "basic"
|
|
|
|
runFTLNoCJITNoInlineValidate(*optionalTestSpecificOptions)
|
|
runFTLNoCJITB3O0(*optionalTestSpecificOptions)
|
|
end
|
|
end
|
|
|
|
def defaultNoSamplingProfilerRun
|
|
runDefault
|
|
if $jitTests
|
|
runNoLLInt
|
|
runNoCJITValidatePhases
|
|
runNoCJITCollectContinuously if shouldCollectContinuously?
|
|
runDFGEager
|
|
runDFGEagerNoCJITValidate
|
|
runEagerJettisonNoCJIT
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runNoFTL
|
|
runFTLNoCJITNoPutStackValidate
|
|
runFTLNoCJITNoInlineValidate
|
|
runFTLEager
|
|
runFTLEagerNoCJITValidate if $buildType == "release"
|
|
runFTLNoCJITSmallPool
|
|
end
|
|
end
|
|
|
|
def runProfiler
|
|
if $remote or $memoryLimited or ($hostOS == "windows") or ($hostOS == "playstation")
|
|
skip
|
|
return
|
|
end
|
|
|
|
profilerOutput = uniqueFilename(".json")
|
|
if $canRunDisplayProfilerOutput
|
|
addRunCommand("profiler", ["ruby", (pathToHelpers + "profiler-test-helper").to_s, (SCRIPTS_PATH + "display-profiler-output").to_s, profilerOutput.to_s, *vmCommand, "--useConcurrentJIT=false", "-p", profilerOutput.to_s, $benchmark.to_s], silentOutputHandler, simpleErrorHandler)
|
|
else
|
|
puts "Running simple version of #{$collectionName}/#{$benchmark} because some required Ruby features are unavailable."
|
|
run("profiler-simple", "--useConcurrentJIT=false", "-p", profilerOutput.to_s)
|
|
end
|
|
end
|
|
|
|
def runExceptionFuzz
|
|
subCommand = escapeAll(vmCommand + ["--useDollarVM=true", "--useExceptionFuzz=true", $benchmark.to_s])
|
|
addRunCommand("exception-fuzz", ["perl", (pathToHelpers + "js-exception-fuzz").to_s, subCommand], silentOutputHandler, simpleErrorHandler)
|
|
end
|
|
|
|
def runExecutableAllocationFuzz(name, *options)
|
|
subCommand = escapeAll(vmCommand + ["--useDollarVM=true", $benchmark.to_s] + options)
|
|
addRunCommand("executable-allocation-fuzz-" + name, ["perl", (pathToHelpers + "js-executable-allocation-fuzz").to_s, subCommand], silentOutputHandler, simpleErrorHandler)
|
|
end
|
|
|
|
def runTypeProfiler
|
|
if !$jitTests
|
|
return
|
|
end
|
|
|
|
run("ftl-type-profiler", "--useTypeProfiler=true", *(FTL_OPTIONS))
|
|
run("ftl-no-cjit-type-profiler-force-poly-proto", "--useTypeProfiler=true", "--forcePolyProto=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
run("ftl-type-profiler-ftl-eager", "--useTypeProfiler=true", *(FTL_OPTIONS + EAGER_OPTIONS))
|
|
end
|
|
|
|
def runControlFlowProfiler
|
|
if !$jitTests
|
|
return
|
|
end
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
run("ftl-no-cjit-type-profiler", "--useControlFlowProfiler=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
end
|
|
|
|
def runTest262(mode, exception, includeFiles, flags)
|
|
failsWithException = exception != "NoException"
|
|
isStrict = false
|
|
isModule = false
|
|
isAsync = false
|
|
|
|
flags.each {
|
|
| flag |
|
|
case flag
|
|
when :strict
|
|
isStrict = true
|
|
when :module
|
|
isModule = true
|
|
when :async
|
|
isAsync = true
|
|
else
|
|
raise "Invalid flag for runTest262, #{flag}"
|
|
end
|
|
}
|
|
|
|
prepareExtraRelativeFiles(includeFiles.map { |f| "../" + f }, $collection)
|
|
|
|
args = vmCommand + BASE_OPTIONS
|
|
args << "--exception=" + exception if failsWithException
|
|
args << "--test262-async" if isAsync
|
|
args += $testSpecificRequiredOptions
|
|
args += includeFiles
|
|
|
|
case mode
|
|
when :normal
|
|
errorHandler = simpleErrorHandler
|
|
outputHandler = silentOutputHandler
|
|
when :fail
|
|
errorHandler = expectedFailErrorHandler
|
|
outputHandler = noisyOutputHandler
|
|
when :failDueToOutdatedOrBadTest
|
|
errorHandler = expectedFailErrorHandler
|
|
outputHandler = noisyOutputHandler
|
|
when :skip
|
|
return
|
|
else
|
|
raise "Invalid mode: #{mode}"
|
|
end
|
|
|
|
if isStrict
|
|
kind = "default-strict"
|
|
args << "--strict-file=#{$benchmark}"
|
|
else
|
|
kind = "default"
|
|
if isModule
|
|
args << "--module-file=#{$benchmark}"
|
|
else
|
|
args << $benchmark.to_s
|
|
end
|
|
end
|
|
|
|
addRunCommand(kind, args, outputHandler, errorHandler)
|
|
end
|
|
|
|
def prepareTest262Fixture
|
|
# This function is used to add the files used by Test262 modules tests.
|
|
prepareExtraRelativeFiles([""], $collection)
|
|
end
|
|
|
|
def runES6(mode)
|
|
args = vmCommand + BASE_OPTIONS + $testSpecificRequiredOptions + [$benchmark.to_s]
|
|
case mode
|
|
when :normal
|
|
errorHandler = simpleErrorHandler
|
|
when :fail
|
|
errorHandler = expectedFailErrorHandler
|
|
when :failDueToOutdatedOrBadTest
|
|
errorHandler = expectedFailErrorHandler
|
|
when :skip
|
|
return
|
|
else
|
|
raise "Invalid mode: #{mode}"
|
|
end
|
|
addRunCommand("default", args, noisyOutputHandler, errorHandler)
|
|
end
|
|
|
|
def defaultRunModules(noLLInt: true)
|
|
run("default-modules", "-m")
|
|
|
|
if !$jitTests
|
|
return
|
|
end
|
|
|
|
run("no-llint-modules", "-m", "--useLLInt=false") if noLLInt
|
|
run("no-cjit-validate-phases-modules", "-m", "--validateBytecode=true", "--validateGraphAtEachPhase=true", *NO_CJIT_OPTIONS)
|
|
run("dfg-eager-modules", "-m", *EAGER_OPTIONS)
|
|
run("dfg-eager-no-cjit-validate-modules", "-m", "--validateGraph=true", *(NO_CJIT_OPTIONS + EAGER_OPTIONS))
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
run("default-ftl-modules", "-m", *FTL_OPTIONS)
|
|
run("ftl-no-cjit-validate-modules", "-m", "--validateGraph=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
run("ftl-no-cjit-no-inline-validate-modules", "-m", "--validateGraph=true", "--maximumInliningDepth=1", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
run("ftl-eager-modules", "-m", *(FTL_OPTIONS + EAGER_OPTIONS))
|
|
run("ftl-eager-no-cjit-modules", "-m", "--validateGraph=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS))
|
|
run("ftl-no-cjit-small-pool-modules", "-m", "--jitMemoryReservationSize=102400", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
end
|
|
|
|
def noNoLLIntRunModules
|
|
defaultRunModules(noLLInt: false)
|
|
end
|
|
|
|
def runWebAssembly
|
|
return if !$jitTests
|
|
return if !$isFTLPlatform
|
|
run("default-wasm", "-m", *FTL_OPTIONS)
|
|
if $mode != "quick"
|
|
run("wasm-no-cjit-yes-tls-context", "-m", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
run("wasm-eager", "-m", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + EAGER_OPTIONS))
|
|
run("wasm-eager-jettison", "-m", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *FTL_OPTIONS)
|
|
run("wasm-no-tls-context", "-m", "--useFastTLSForWasmContext=false", *FTL_OPTIONS)
|
|
run("wasm-slow-memory", "-m", "--useWebAssemblyFastMemory=false", *FTL_OPTIONS)
|
|
run("wasm-b3", "-m", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *FTL_OPTIONS)
|
|
run("wasm-air", "-m", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *FTL_OPTIONS)
|
|
run("wasm-collect-continuously", "-m", "--collectContinuously=true", "--verifyGC=true", *FTL_OPTIONS) if shouldCollectContinuously?
|
|
end
|
|
end
|
|
|
|
def runWebAssemblyJetStream2
|
|
return if !$jitTests
|
|
return if !$isFTLPlatform
|
|
|
|
if $memoryLimited
|
|
skip
|
|
return
|
|
end
|
|
|
|
prepareExtraAbsoluteFiles(JETSTREAM2_PATH, ["JetStreamDriver.js"])
|
|
prepareExtraRelativeFilesWithBaseDirectory(Dir[JETSTREAM2_PATH + "wasm" + "*.js"].map { |f| "wasm/" + File.basename(f) }, $collection.dirname, $extraFilesBaseDir.dirname)
|
|
prepareExtraRelativeFilesWithBaseDirectory(Dir[JETSTREAM2_PATH + "wasm" + "*.wasm"].map { |f| "wasm/" + File.basename(f) }, $collection.dirname, $extraFilesBaseDir.dirname)
|
|
|
|
run("default-wasm", *FTL_OPTIONS)
|
|
|
|
if $mode != "quick"
|
|
run("wasm-no-cjit-yes-tls-context", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
run("wasm-eager", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + EAGER_OPTIONS))
|
|
run("wasm-eager-jettison", "--forceCodeBlockToJettisonDueToOldAge=true", "--verifyGC=true", *FTL_OPTIONS)
|
|
run("wasm-no-tls-context", "--useFastTLSForWasmContext=false", *FTL_OPTIONS)
|
|
run("wasm-slow-memory", "--useWebAssemblyFastMemory=false", *FTL_OPTIONS)
|
|
run("wasm-b3", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *FTL_OPTIONS)
|
|
run("wasm-air", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *FTL_OPTIONS)
|
|
run("wasm-collect-continuously", "--collectContinuously=true", "--verifyGC=true", *FTL_OPTIONS) if shouldCollectContinuously?
|
|
end
|
|
end
|
|
|
|
def runWebAssemblySuite(*optionalTestSpecificOptions)
|
|
return if !$jitTests
|
|
return if !$isFTLPlatform
|
|
modules = Dir[WASMTESTS_PATH + "*.js"].map { |f| File.basename(f) }
|
|
prepareExtraAbsoluteFiles(WASMTESTS_PATH, ["wasm.json"])
|
|
prepareExtraRelativeFiles(modules.map { |f| "../" + f }, $collection)
|
|
run("default-wasm", "-m", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
if $mode != "quick"
|
|
run("wasm-no-cjit-yes-tls-context", "-m", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
run("wasm-eager", "-m", *(FTL_OPTIONS + EAGER_OPTIONS + optionalTestSpecificOptions))
|
|
run("wasm-eager-jettison", "-m", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
run("wasm-no-tls-context", "-m", "--useFastTLSForWasmContext=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
run("wasm-slow-memory", "-m", "--useWebAssemblyFastMemory=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
run("wasm-b3", "-m", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
run("wasm-air", "-m", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
run("wasm-collect-continuously", "-m", "--collectContinuously=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) if shouldCollectContinuously?
|
|
end
|
|
end
|
|
|
|
def runHarnessTest(kind, *options)
|
|
wasmFiles = allWasmFiles($collection)
|
|
wasmFiles.each {
|
|
| file |
|
|
basename = file.basename.to_s
|
|
addRunCommand("(" + basename + ")-" + kind, vmCommand + options + $testSpecificRequiredOptions + [$benchmark.to_s, "--", basename], silentOutputHandler, simpleErrorHandler)
|
|
}
|
|
end
|
|
|
|
def runWebAssemblyWithHarness(*optionalTestSpecificOptions)
|
|
raise unless $benchmark.to_s =~ /harness\.m?js/
|
|
return if !$jitTests
|
|
return if !$isFTLPlatform
|
|
|
|
wasmFiles = allWasmFiles($collection)
|
|
prepareExtraRelativeFiles(wasmFiles.map { |f| f.basename }, $collection)
|
|
|
|
runHarnessTest("default-wasm", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
if $mode != "quick"
|
|
runHarnessTest("wasm-no-cjit-yes-tls-context", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
runHarnessTest("wasm-eager", *(FTL_OPTIONS + EAGER_OPTIONS + optionalTestSpecificOptions))
|
|
runHarnessTest("wasm-eager-jettison", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runHarnessTest("wasm-no-tls-context", "--useFastTLSForWasmContext=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runHarnessTest("wasm-slow-memory", "--useWebAssemblyFastMemory=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runHarnessTest("wasm-b3", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runHarnessTest("wasm-no-air", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runHarnessTest("wasm-collect-continuously", "--collectContinuously=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) if shouldCollectContinuously?
|
|
end
|
|
end
|
|
|
|
def runWebAssemblyEmscripten(mode)
|
|
case mode
|
|
when :skip
|
|
return
|
|
end
|
|
return if !$jitTests
|
|
return if !$isFTLPlatform
|
|
wasm = $benchmark.to_s.sub! '.js', '.wasm'
|
|
prepareExtraRelativeFiles([Pathname('..') + wasm], $collection)
|
|
run("default-wasm", *FTL_OPTIONS)
|
|
if $mode != "quick"
|
|
run("wasm-no-cjit-yes-tls-context", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
run("wasm-eager-jettison", "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *FTL_OPTIONS)
|
|
run("wasm-no-tls-context", "--useFastTLSForWasmContext=false", *FTL_OPTIONS)
|
|
run("wasm-b3", "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *FTL_OPTIONS)
|
|
run("wasm-air", "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *FTL_OPTIONS)
|
|
run("wasm-collect-continuously", "--collectContinuously=true", "--verifyGC=true", *FTL_OPTIONS) if shouldCollectContinuously?
|
|
end
|
|
end
|
|
|
|
def runWebAssemblySpecTestBase(mode, specHarnessPath, *optionalTestSpecificOptions)
|
|
case mode
|
|
when :skip
|
|
return
|
|
end
|
|
return if !$jitTests
|
|
return if !$isFTLPlatform
|
|
prepareExtraAbsoluteFiles(WASMTESTS_PATH, ["wasm.json"])
|
|
|
|
modules = Dir[WASMTESTS_PATH + "*.js"].map { |f| File.basename(f) }
|
|
prepareExtraRelativeFiles(modules.map { |f| "../../" + f }, $collection)
|
|
|
|
harness = Dir[WASMTESTS_PATH + (specHarnessPath + "/") + "*.js"].map { |f| File.basename(f) }
|
|
prepareExtraRelativeFiles(harness.map { |f| ("../../" + specHarnessPath + "/") + f }, $collection)
|
|
|
|
specHarnessJsPath = "../" + specHarnessPath + ".js"
|
|
runWithOutputHandler("default-wasm", noisyOutputHandler, specHarnessJsPath, *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
if $mode != "quick"
|
|
runWithOutputHandler("wasm-no-cjit-yes-tls-context", noisyOutputHandler, specHarnessJsPath, "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
|
|
runWithOutputHandler("wasm-eager-jettison", noisyOutputHandler, specHarnessJsPath, "--forceCodeBlockToJettisonDueToOldAge=true", "--useRandomizingExecutableIslandAllocation=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runWithOutputHandler("wasm-no-tls-context", noisyOutputHandler, specHarnessJsPath, "--useFastTLSForWasmContext=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runWithOutputHandler("wasm-b3", noisyOutputHandler, specHarnessJsPath, "--useWasmLLInt=false", "--wasmBBQUsesAir=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runWithOutputHandler("wasm-air", noisyOutputHandler, specHarnessJsPath, "--useWasmLLInt=false", "--useRandomizingExecutableIslandAllocation=true", *(FTL_OPTIONS + optionalTestSpecificOptions))
|
|
runWithOutputHandler("wasm-collect-continuously", noisyOutputHandler, specHarnessJsPath, "--collectContinuously=true", "--verifyGC=true", *(FTL_OPTIONS + optionalTestSpecificOptions)) if shouldCollectContinuously?
|
|
end
|
|
end
|
|
|
|
def runWebAssemblySpecTest(mode)
|
|
runWebAssemblySpecTestBase(mode, "spec-harness")
|
|
end
|
|
|
|
def runWebAssemblyReferenceSpecTest(mode)
|
|
runWebAssemblySpecTestBase(mode, "ref-spec-harness")
|
|
end
|
|
|
|
def runWebAssemblyLowExecutableMemory(*optionalTestSpecificOptions)
|
|
return if !$jitTests
|
|
return if !$isFTLPlatform
|
|
modules = Dir[WASMTESTS_PATH + "*.js"].map { |f| File.basename(f) }
|
|
prepareExtraAbsoluteFiles(WASMTESTS_PATH, ["wasm.json"])
|
|
prepareExtraRelativeFiles(modules.map { |f| "../" + f }, $collection)
|
|
# Only let WebAssembly get executable memory.
|
|
run("default-wasm", "--useConcurrentGC=0" , "--useConcurrentJIT=0", "--jitMemoryReservationSize=20000", "--useBaselineJIT=0", "--useDFGJIT=0", "--useFTLJIT=0", "-m")
|
|
end
|
|
|
|
def runChakra(mode, exception, baselineFile, extraFiles)
|
|
raise unless $benchmark.to_s =~ /\.js$/
|
|
failsWithException = exception != "NoException"
|
|
testName = $~.pre_match
|
|
|
|
prepareExtraAbsoluteFiles(CHAKRATESTS_PATH, ["jsc-lib.js"])
|
|
prepareExtraRelativeFiles(extraFiles.map { |f| "../" + f }, $collection)
|
|
|
|
args = vmCommand + BASE_OPTIONS
|
|
args += FTL_OPTIONS if $isFTLPlatform
|
|
args += EAGER_OPTIONS
|
|
args << "--exception=" + exception if failsWithException
|
|
args << "--dumpException" if failsWithException
|
|
args += $testSpecificRequiredOptions
|
|
args += ["jsc-lib.js"]
|
|
|
|
case mode
|
|
when :baseline
|
|
prepareExtraRelativeFiles([(Pathname("..") + baselineFile).to_s], $collection)
|
|
errorHandler = diffErrorHandler(($benchmarkDirectory + baselineFile).to_s)
|
|
outputHandler = noisyOutputHandler
|
|
when :pass
|
|
errorHandler = chakraPassFailErrorHandler
|
|
outputHandler = noisyOutputHandler
|
|
when :skipDueToOutdatedOrBadTest
|
|
return
|
|
when :skip
|
|
return
|
|
else
|
|
raise "Invalid mode: #{mode}"
|
|
end
|
|
|
|
kind = "default"
|
|
args << $benchmark.to_s
|
|
|
|
addRunCommand(kind, args, outputHandler, errorHandler)
|
|
end
|
|
|
|
def runLayoutTest(kind, *options)
|
|
raise unless $benchmark.to_s =~ /\.js$/
|
|
testName = $~.pre_match
|
|
if kind
|
|
kind = "layout-" + kind
|
|
else
|
|
kind = "layout"
|
|
end
|
|
|
|
prepareExtraRelativeFiles(["../#{testName}-expected.txt"], $benchmarkDirectory)
|
|
prepareExtraAbsoluteFiles(LAYOUTTESTS_PATH, ["resources/standalone-pre.js", "resources/standalone-post.js"])
|
|
|
|
args = vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions +
|
|
[(Pathname.new("resources") + "standalone-pre.js").to_s,
|
|
$benchmark.to_s,
|
|
(Pathname.new("resources") + "standalone-post.js").to_s]
|
|
addRunCommand(kind, args, noisyOutputHandler, diffErrorHandler(($benchmarkDirectory + "../#{testName}-expected.txt").to_s))
|
|
end
|
|
|
|
def runLayoutTestNoFTL
|
|
runLayoutTest("no-ftl")
|
|
end
|
|
|
|
def runLayoutTestNoLLInt
|
|
runLayoutTest("no-llint", "--useLLInt=false")
|
|
end
|
|
|
|
def runLayoutTestNoCJIT
|
|
runLayoutTest("no-cjit", *NO_CJIT_OPTIONS)
|
|
end
|
|
|
|
def runLayoutTestDFGEagerNoCJIT
|
|
runLayoutTest("dfg-eager-no-cjit", *(NO_CJIT_OPTIONS + EAGER_OPTIONS))
|
|
end
|
|
|
|
def runLayoutTestDefault
|
|
runLayoutTest(nil, "--testTheFTL=true", *FTL_OPTIONS)
|
|
end
|
|
|
|
def runLayoutTestFTLNoCJIT
|
|
runLayoutTest("ftl-no-cjit", "--testTheFTL=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
|
|
end
|
|
|
|
def runLayoutTestFTLEagerNoCJIT
|
|
runLayoutTest("ftl-eager-no-cjit", "--testTheFTL=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS))
|
|
end
|
|
|
|
def runLayoutTestFTLEagerNoCJITB3O1
|
|
runLayoutTest("ftl-eager-no-cjit-b3o1", "--testTheFTL=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + B3O1_OPTIONS))
|
|
end
|
|
|
|
def noFTLRunLayoutTest
|
|
if !$jitTests
|
|
return
|
|
end
|
|
|
|
runLayoutTestNoLLInt
|
|
runLayoutTestNoCJIT
|
|
runLayoutTestDFGEagerNoCJIT
|
|
end
|
|
|
|
def defaultQuickRunLayoutTest
|
|
runLayoutTestDefault
|
|
if $jitTests
|
|
if $isFTLPlatform
|
|
runLayoutTestNoFTL
|
|
runLayoutTestFTLNoCJIT
|
|
runLayoutTestFTLEagerNoCJIT
|
|
else
|
|
noFTLRunLayoutTest
|
|
end
|
|
end
|
|
end
|
|
|
|
def defaultRunLayoutTest
|
|
if $mode == "quick"
|
|
defaultQuickRunLayoutTest
|
|
else
|
|
runLayoutTestDefault
|
|
if $jitTests
|
|
noFTLRunLayoutTest
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runLayoutTestNoFTL
|
|
runLayoutTestFTLNoCJIT
|
|
runLayoutTestFTLEagerNoCJIT
|
|
end
|
|
end
|
|
end
|
|
|
|
def noEagerNoNoLLIntTestsRunLayoutTest
|
|
runLayoutTestDefault
|
|
if $jitTests
|
|
runLayoutTestNoCJIT
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runLayoutTestNoFTL
|
|
runLayoutTestFTLNoCJIT
|
|
end
|
|
end
|
|
|
|
def noNoLLIntRunLayoutTest
|
|
runLayoutTestDefault
|
|
if $jitTests
|
|
runLayoutTestNoCJIT
|
|
runLayoutTestDFGEagerNoCJIT
|
|
|
|
return if !$isFTLPlatform
|
|
|
|
runLayoutTestNoFTL
|
|
runLayoutTestFTLNoCJIT
|
|
runLayoutTestFTLEagerNoCJIT
|
|
end
|
|
end
|
|
|
|
def prepareExtraRelativeFilesWithBaseDirectory(extraFiles, destination, baseDirectory)
|
|
Dir.chdir($outputDir) {
|
|
extraFiles.each {
|
|
| file |
|
|
dest = destination + file
|
|
FileUtils.mkdir_p(dest.dirname)
|
|
FileUtils.cp baseDirectory + file, dest
|
|
}
|
|
}
|
|
end
|
|
|
|
def prepareExtraRelativeFiles(extraFiles, destination)
|
|
prepareExtraRelativeFilesWithBaseDirectory(extraFiles, destination, $extraFilesBaseDir)
|
|
end
|
|
|
|
def baseDirForCollection(collectionName)
|
|
Pathname(".tests") + collectionName
|
|
end
|
|
|
|
def prepareExtraAbsoluteFiles(absoluteBase, extraFiles)
|
|
raise unless absoluteBase.absolute?
|
|
Dir.chdir($outputDir) {
|
|
collectionBaseDir = baseDirForCollection($collectionName)
|
|
extraFiles.each {
|
|
| file |
|
|
destination = collectionBaseDir + file
|
|
FileUtils.mkdir_p destination.dirname unless destination.directory?
|
|
FileUtils.cp absoluteBase + file, destination
|
|
}
|
|
}
|
|
end
|
|
|
|
def runComplexTest(before, after, additionalEnv, *options)
|
|
prepareExtraRelativeFiles(before.map{|v| (Pathname("..") + v).to_s}, $collection)
|
|
prepareExtraRelativeFiles(after.map{|v| (Pathname("..") + v).to_s}, $collection)
|
|
args = vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions + before.map{|v| v.to_s} + [$benchmark.to_s] + after.map{|v| v.to_s}
|
|
addRunCommand("complex", args, noisyOutputHandler, simpleErrorHandler, *additionalEnv)
|
|
end
|
|
|
|
def runMozillaTest(kind, mode, extraFiles, *options)
|
|
if kind
|
|
kind = "mozilla-" + kind
|
|
else
|
|
kind = "mozilla"
|
|
end
|
|
prepareExtraRelativeFiles(extraFiles.map{|v| (Pathname("..") + v).to_s}, $collection)
|
|
args = vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions + extraFiles.map{|v| v.to_s} + [$benchmark.to_s]
|
|
case mode
|
|
when :normal
|
|
errorHandler = mozillaErrorHandler
|
|
when :negative
|
|
errorHandler = mozillaExit3ErrorHandler
|
|
when :fail
|
|
errorHandler = mozillaFailErrorHandler
|
|
when :failDueToOutdatedOrBadTest
|
|
errorHandler = mozillaFailErrorHandler
|
|
when :skip
|
|
return
|
|
else
|
|
raise "Invalid mode: #{mode}"
|
|
end
|
|
addRunCommand(kind, args, noisyOutputHandler, errorHandler)
|
|
end
|
|
|
|
def runMozillaTestDefault(mode, *extraFiles)
|
|
runMozillaTest(nil, mode, extraFiles, *FTL_OPTIONS)
|
|
end
|
|
|
|
def runMozillaTestNoFTL(mode, *extraFiles)
|
|
runMozillaTest("no-ftl", mode, extraFiles)
|
|
end
|
|
|
|
def runMozillaTestLLInt(mode, *extraFiles)
|
|
runMozillaTest("llint", mode, extraFiles, "--useJIT=false")
|
|
end
|
|
|
|
def runMozillaTestBaselineJIT(mode, *extraFiles)
|
|
runMozillaTest("baseline", mode, extraFiles, "--useLLInt=false", "--useDFGJIT=false")
|
|
end
|
|
|
|
def runMozillaTestDFGEagerNoCJITValidatePhases(mode, *extraFiles)
|
|
runMozillaTest("dfg-eager-no-cjit-validate-phases", mode, extraFiles, "--validateBytecode=true", "--validateGraphAtEachPhase=true", *(NO_CJIT_OPTIONS + EAGER_OPTIONS))
|
|
end
|
|
|
|
def runMozillaTestFTLEagerNoCJITValidatePhases(mode, *extraFiles)
|
|
runMozillaTest("ftl-eager-no-cjit-validate-phases", mode, extraFiles, "--validateBytecode=true", "--validateGraphAtEachPhase=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS))
|
|
end
|
|
|
|
def defaultQuickRunMozillaTest(mode, *extraFiles)
|
|
if $jitTests
|
|
runMozillaTestDefault(mode, *extraFiles)
|
|
runMozillaTestFTLEagerNoCJITValidatePhases(mode, *extraFiles)
|
|
else
|
|
runMozillaTestNoFTL(mode, *extraFiles)
|
|
if $jitTests
|
|
runMozillaTestDFGEagerNoCJITValidatePhases(mode, *extraFiles)
|
|
end
|
|
end
|
|
end
|
|
|
|
def defaultRunMozillaTest(mode, *extraFiles)
|
|
if $mode == "quick"
|
|
defaultQuickRunMozillaTest(mode, *extraFiles)
|
|
else
|
|
runMozillaTestNoFTL(mode, *extraFiles)
|
|
if $jitTests
|
|
runMozillaTestLLInt(mode, *extraFiles)
|
|
runMozillaTestBaselineJIT(mode, *extraFiles)
|
|
runMozillaTestDFGEagerNoCJITValidatePhases(mode, *extraFiles)
|
|
runMozillaTestDefault(mode, *extraFiles)
|
|
runMozillaTestFTLEagerNoCJITValidatePhases(mode, *extraFiles) if $isFTLPlatform
|
|
end
|
|
end
|
|
end
|
|
|
|
def runNoisyTestImpl(kind, options, additionalEnv)
|
|
addRunCommand(kind, vmCommand + BASE_OPTIONS + options + $testSpecificRequiredOptions + [$benchmark.to_s], noisyOutputHandler, noisyErrorHandler, *additionalEnv)
|
|
end
|
|
|
|
def runNoisyTest(kind, *options)
|
|
runNoisyTestImpl(kind, options, [])
|
|
end
|
|
|
|
def runNoisyTestWithEnv(kind, *additionalEnv)
|
|
runNoisyTestImpl(kind, [], additionalEnv)
|
|
end
|
|
|
|
def runNoisyTestDefault
|
|
runNoisyTest("default", *FTL_OPTIONS)
|
|
end
|
|
|
|
def runNoisyTestNoFTL
|
|
runNoisyTest("no-ftl")
|
|
end
|
|
|
|
def runNoisyTestNoCJIT
|
|
runNoisyTest("ftl-no-cjit", "--validateBytecode=true", "--validateGraphAtEachPhase=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS))
|
|
end
|
|
|
|
def runNoisyTestNoCJITB3O1
|
|
runNoisyTest("ftl-no-cjit-b3o1", "--validateBytecode=true", "--validateGraphAtEachPhase=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + B3O1_OPTIONS))
|
|
end
|
|
|
|
def runNoisyTestEagerNoCJIT
|
|
runNoisyTest("ftl-eager-no-cjit", "--validateBytecode=true", "--validateGraphAtEachPhase=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS))
|
|
end
|
|
|
|
def defaultRunNoisyTest
|
|
runNoisyTestDefault
|
|
if $jitTests and $isFTLPlatform
|
|
runNoisyTestNoFTL
|
|
runNoisyTestNoCJIT
|
|
runNoisyTestNoCJITB3O1
|
|
runNoisyTestEagerNoCJIT
|
|
end
|
|
end
|
|
|
|
def skip
|
|
$didAddRunCommand = true
|
|
$skipped = true
|
|
puts "Skipping #{$collectionName}/#{$benchmark}"
|
|
end
|
|
|
|
def allWasmFiles(path)
|
|
if path.file?
|
|
[path]
|
|
else
|
|
result = []
|
|
Dir.foreach(path) {
|
|
| filename |
|
|
next unless filename =~ /\.m?wasm$/
|
|
next unless (path + filename).file?
|
|
result << path + filename
|
|
}
|
|
result
|
|
end
|
|
end
|
|
|
|
def allJSFiles(path)
|
|
if path.file?
|
|
[path]
|
|
else
|
|
result = []
|
|
Dir.foreach(path) {
|
|
| filename |
|
|
next unless filename =~ /\.m?js$/
|
|
next unless (path + filename).file?
|
|
result << path + filename
|
|
}
|
|
result
|
|
end
|
|
end
|
|
|
|
def uniqueifyName(names, name)
|
|
result = name.to_s
|
|
toAdd = 1
|
|
while names[result]
|
|
result = "#{name}-#{toAdd}"
|
|
toAdd += 1
|
|
end
|
|
names[result] = true
|
|
result
|
|
end
|
|
|
|
def simplifyCollectionName(collectionPath)
|
|
outerDir = collectionPath.dirname
|
|
name = collectionPath.basename
|
|
lastName = name
|
|
if collectionPath.directory?
|
|
while lastName.to_s =~ /test/
|
|
lastName = outerDir.basename
|
|
name = lastName + name
|
|
outerDir = outerDir.dirname
|
|
end
|
|
end
|
|
uniqueifyName($collectionNames, name)
|
|
end
|
|
|
|
def prepareCollection(name)
|
|
FileUtils.mkdir_p $outputDir + name
|
|
|
|
absoluteCollection = $collection.realpath
|
|
|
|
Dir.chdir($outputDir) {
|
|
bundleDir = baseDirForCollection(name)
|
|
|
|
# Create the proper directory structures.
|
|
FileUtils.mkdir_p bundleDir
|
|
if bundleDir.basename == $collection.basename
|
|
FileUtils.cp_r absoluteCollection, bundleDir.dirname
|
|
$collection = bundleDir
|
|
else
|
|
FileUtils.cp_r absoluteCollection, bundleDir
|
|
$collection = bundleDir + $collection.basename
|
|
end
|
|
|
|
$extraFilesBaseDir = absoluteCollection
|
|
}
|
|
end
|
|
|
|
$collectionNames = {}
|
|
|
|
def handleCollectionFile(collection)
|
|
collectionName = simplifyCollectionName(collection)
|
|
|
|
paths = {}
|
|
subCollections = []
|
|
YAML::load(IO::read(collection)).each {
|
|
| entry |
|
|
if entry["collection"]
|
|
subCollections << entry["collection"]
|
|
next
|
|
end
|
|
|
|
if Pathname.new(entry["path"]).absolute?
|
|
raise "Absolute path: " + entry["path"] + " in #{collection}"
|
|
end
|
|
|
|
if paths[entry["path"]]
|
|
raise "Duplicate path: " + entry["path"] + " in #{collection}"
|
|
end
|
|
|
|
subCollection = collection.dirname + entry["path"]
|
|
|
|
if subCollection.file?
|
|
subCollectionName = Pathname.new(entry["path"]).dirname
|
|
else
|
|
subCollectionName = entry["path"]
|
|
end
|
|
|
|
$collection = subCollection
|
|
$collectionName = Pathname.new(collectionName)
|
|
Pathname.new(subCollectionName).each_filename {
|
|
| filename |
|
|
next if filename =~ /^\./
|
|
$collectionName += filename
|
|
}
|
|
$collectionName = $collectionName.to_s
|
|
|
|
prepareCollection($collectionName)
|
|
|
|
Dir.chdir($outputDir) {
|
|
pathsToSearch = [$collection]
|
|
if entry["tests"]
|
|
if entry["tests"].is_a? Array
|
|
pathsToSearch = entry["tests"].map {
|
|
| testName |
|
|
pathsToSearch[0] + testName
|
|
}
|
|
else
|
|
pathsToSearch[0] += entry["tests"]
|
|
end
|
|
end
|
|
pathsToSearch.each {
|
|
| pathToSearch |
|
|
allJSFiles(pathToSearch).each {
|
|
| path |
|
|
|
|
$benchmark = path.basename
|
|
$benchmarkDirectory = path.dirname
|
|
|
|
$runCommandOptions = {}
|
|
$testSpecificRequiredOptions = []
|
|
eval entry["cmd"]
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
subCollections.each {
|
|
| subCollection |
|
|
handleCollection(collection.dirname + subCollection)
|
|
}
|
|
end
|
|
|
|
def handleCollectionDirectory(collection)
|
|
collectionName = simplifyCollectionName(collection)
|
|
|
|
$collection = collection
|
|
$collectionName = collectionName
|
|
prepareCollection(collectionName)
|
|
|
|
Dir.chdir($outputDir) {
|
|
$benchmarkDirectory = $collection
|
|
allJSFiles($collection).each {
|
|
| path |
|
|
|
|
$benchmark = path.basename
|
|
|
|
$runCommandOptions = {}
|
|
$testSpecificRequiredOptions = []
|
|
defaultRun unless parseRunCommands
|
|
}
|
|
}
|
|
end
|
|
|
|
def handleCollection(collection)
|
|
collection = Pathname.new(collection)
|
|
|
|
if collection.file?
|
|
handleCollectionFile(collection)
|
|
else
|
|
handleCollectionDirectory(collection)
|
|
end
|
|
end
|
|
|
|
def appendFailure(plan)
|
|
File.open($outputDir + "failed", "a") {
|
|
| outp |
|
|
outp.puts plan.name
|
|
}
|
|
end
|
|
|
|
def appendPass(plan)
|
|
File.open($outputDir + "passed", "a") {
|
|
| outp |
|
|
outp.puts plan.name
|
|
}
|
|
end
|
|
|
|
def appendNoResult(plan)
|
|
File.open($outputDir + "noresult", "a") {
|
|
| outp |
|
|
outp.puts plan.name
|
|
}
|
|
end
|
|
|
|
def appendResult(plan, didPass)
|
|
File.open($outputDir + "results", "a") {
|
|
| outp |
|
|
outp.puts "#{plan.name}: #{didPass ? 'PASS' : 'FAIL'}"
|
|
}
|
|
end
|
|
|
|
def prepareBundle
|
|
raise if $bundle
|
|
|
|
if $doNotMessWithVMPath
|
|
if !$remote and !$tarball
|
|
$testingFrameworkPath = (frameworkFromJSCPath($jscPath) || $jscPath.dirname).realpath
|
|
$jscPath = Pathname.new($jscPath).realpath
|
|
else
|
|
$testingFrameworkPath = frameworkFromJSCPath($jscPath)
|
|
end
|
|
else
|
|
originalJSCPath = $jscPath
|
|
vmDir = $outputDir + ".vm"
|
|
FileUtils.mkdir_p vmDir
|
|
|
|
frameworkPath = frameworkFromJSCPath($jscPath)
|
|
destinationFrameworkPath = Pathname.new(".vm") + "JavaScriptCore.framework"
|
|
$jscPath = destinationFrameworkPath + "Helpers" + "jsc"
|
|
$testingFrameworkPath = Pathname.new("..") + destinationFrameworkPath
|
|
|
|
if frameworkPath
|
|
source = frameworkPath
|
|
destination = Pathname.new(".vm")
|
|
elsif $hostOS == "windows"
|
|
# Make sure to copy dll along with jsc on Windows
|
|
originalJSCDir = File.dirname(originalJSCPath)
|
|
source = [originalJSCPath] + [originalJSCDir + "/jscLib.dll"]
|
|
|
|
# Check for and copy JavaScriptCore.dll and WTF.dll for dynamic builds
|
|
javaScriptCoreDLLPath = File.join(originalJSCDir, "JavaScriptCore.dll")
|
|
wtfDLLPath = File.join(originalJSCDir, "WTF.dll")
|
|
if (File.exists?(javaScriptCoreDLLPath))
|
|
source = source + [javaScriptCoreDLLPath]
|
|
end
|
|
if (File.exists?(wtfDLLPath))
|
|
source = source + [wtfDLLPath]
|
|
end
|
|
|
|
destination = $jscPath.dirname
|
|
|
|
Dir.chdir($outputDir) {
|
|
FileUtils.mkdir_p destination
|
|
}
|
|
else
|
|
source = originalJSCPath
|
|
destination = $jscPath
|
|
|
|
Dir.chdir($outputDir) {
|
|
FileUtils.mkdir_p $jscPath.dirname
|
|
}
|
|
end
|
|
|
|
Dir.chdir($outputDir) {
|
|
if $copyVM
|
|
FileUtils.cp_r source, destination
|
|
else
|
|
begin
|
|
FileUtils.ln_s source, destination
|
|
rescue Exception
|
|
$stderr.puts "Warning: unable to create soft link, trying to copy."
|
|
FileUtils.cp_r source, destination
|
|
end
|
|
end
|
|
|
|
if $remote and $hostOS == "linux"
|
|
bundle_binary = (Pathname.new(THIS_SCRIPT_PATH).dirname + 'bundle-binary').realpath
|
|
Dir.mktmpdir {
|
|
| tmpdir |
|
|
# Generate bundle in a temporary directory so that
|
|
# we can safely pick it up regardless of its name
|
|
# (it's the only zip file there).
|
|
cmdline = [
|
|
bundle_binary.to_s,
|
|
"--dest-dir=#{$jscPath.dirname}",
|
|
"--log-level=debug",
|
|
$jscPath.to_s
|
|
]
|
|
if not $ldd.nil?
|
|
cmdline << "--ldd=#{$ldd}"
|
|
end
|
|
mysys(cmdline)
|
|
}
|
|
end
|
|
}
|
|
end
|
|
|
|
Dir.chdir($outputDir) {
|
|
FileUtils.cp_r HELPERS_PATH, ".helpers"
|
|
}
|
|
|
|
ARGV.each {
|
|
| collection |
|
|
handleCollection(collection)
|
|
}
|
|
|
|
puts
|
|
end
|
|
|
|
def cleanOldResults
|
|
raise unless $bundle
|
|
|
|
eachResultFile($outputDir) {
|
|
| path |
|
|
FileUtils.rm_f path
|
|
}
|
|
end
|
|
|
|
def cleanEmptyResultFiles
|
|
eachResultFile($outputDir) {
|
|
| path |
|
|
next unless path.basename.to_s =~ /\.out$/
|
|
next unless FileTest.size(path) == 0
|
|
FileUtils.rm_f path
|
|
}
|
|
end
|
|
|
|
def eachResultFile(startingDir, &block)
|
|
dirsToClean = [startingDir]
|
|
until dirsToClean.empty?
|
|
nextDir = dirsToClean.pop
|
|
Dir.foreach(nextDir) {
|
|
| entry |
|
|
next if entry =~ /^\./
|
|
path = nextDir + entry
|
|
if path.directory?
|
|
dirsToClean.push(path)
|
|
else
|
|
block.call(path)
|
|
end
|
|
}
|
|
end
|
|
end
|
|
|
|
def prepareTestRunner(remoteIndex=0)
|
|
raise if $bundle
|
|
|
|
$runlist.each_with_index {
|
|
| plan, index |
|
|
plan.index = index
|
|
}
|
|
|
|
Dir.mkdir($runnerDir) unless $runnerDir.directory?
|
|
toDelete = []
|
|
Dir.foreach($runnerDir) {
|
|
| filename |
|
|
if filename =~ /^test_/
|
|
toDelete << filename
|
|
end
|
|
}
|
|
|
|
toDelete.each {
|
|
| filename |
|
|
File.unlink($runnerDir + filename)
|
|
}
|
|
|
|
$runlist.each {
|
|
| plan |
|
|
plan.writeRunScript($runnerDir + "test_script_#{plan.index}")
|
|
}
|
|
|
|
case $testRunnerType
|
|
when :make
|
|
prepareMakeTestRunner(remoteIndex)
|
|
when :shell
|
|
prepareShellTestRunner
|
|
when :ruby
|
|
prepareRubyTestRunner
|
|
when :gnuparallel
|
|
prepareGnuParallelTestRunner
|
|
else
|
|
raise "Unknown test runner type: #{$testRunnerType.to_s}"
|
|
end
|
|
end
|
|
|
|
def cleanRunnerDirectory
|
|
raise unless $bundle
|
|
Dir.foreach($runnerDir) {
|
|
| filename |
|
|
next unless filename =~ /^test_fail/
|
|
FileUtils.rm_f $runnerDir + filename
|
|
}
|
|
end
|
|
|
|
def sshRead(cmd, remoteHost, options={})
|
|
raise unless $remote
|
|
|
|
result = ""
|
|
IO.popen("ssh -o NoHostAuthenticationForLocalhost=yes -p #{remoteHost.port} #{remoteHost.user}@#{remoteHost.host} '#{cmd}'", "r") {
|
|
| inp |
|
|
inp.each_line {
|
|
| line |
|
|
result += line
|
|
}
|
|
}
|
|
raise "#{$?}" unless $?.success? or options[:ignoreFailure]
|
|
result
|
|
end
|
|
|
|
def runCommandOnTester(cmd)
|
|
if $remote
|
|
result = sshRead(cmd, $remoteHosts[0])
|
|
else
|
|
result = `#{cmd}`
|
|
end
|
|
end
|
|
|
|
def numberOfProcessors
|
|
if $hostOS == "windows"
|
|
numProcessors = runCommandOnTester("cmd /c echo %NUMBER_OF_PROCESSORS%").to_i
|
|
else
|
|
begin
|
|
numProcessors = runCommandOnTester("sysctl -n hw.activecpu 2>/dev/null").to_i
|
|
rescue
|
|
numProcessors = 0
|
|
end
|
|
|
|
if numProcessors == 0
|
|
begin
|
|
numProcessors = runCommandOnTester("nproc --all 2>/dev/null").to_i
|
|
rescue
|
|
numProcessors == 0
|
|
end
|
|
end
|
|
end
|
|
|
|
if numProcessors == 0
|
|
numProcessors = 1
|
|
end
|
|
return numProcessors
|
|
end
|
|
|
|
def runAndMonitorCommandOutput(cmd, &blk)
|
|
cmd = cmd.collect { |a|
|
|
a.to_s
|
|
}
|
|
IO.popen(cmd, "r") {
|
|
| p |
|
|
p.each_line {
|
|
| line |
|
|
blk.call(p.pid, line)
|
|
puts(line)
|
|
$stdout.flush
|
|
}
|
|
}
|
|
end
|
|
|
|
$runnerDirMutex = Mutex.new
|
|
def runAndMonitorTestRunnerCommand(cmd, options={})
|
|
numberOfTests = 0
|
|
$runnerDirMutex.synchronize {
|
|
Dir.chdir($runnerDir) {
|
|
# -1 for the runscript, and -2 for '..' and '.'
|
|
numberOfTests = Dir.entries(".").count - 3
|
|
}
|
|
}
|
|
unless $progressMeter
|
|
mysys(cmd.join(' '), options)
|
|
else
|
|
running = {}
|
|
didRun = {}
|
|
didFail = {}
|
|
blankLine = true
|
|
prevStringLength = 0
|
|
IO.popen(cmd.join(' '), mode="r") {
|
|
| inp |
|
|
inp.each_line {
|
|
| line |
|
|
line = line.scrub.chomp
|
|
if line =~ /^Running /
|
|
running[$~.post_match] = true
|
|
elsif line =~ /^PASS: /
|
|
didRun[$~.post_match] = true
|
|
elsif line =~ /^FAIL: /
|
|
didRun[$~.post_match] = true
|
|
didFail[$~.post_match] = true
|
|
else
|
|
unless blankLine
|
|
print("\r" + " " * prevStringLength + "\r")
|
|
end
|
|
puts line
|
|
blankLine = true
|
|
end
|
|
|
|
def lpad(str, chars)
|
|
str = str.to_s
|
|
if str.length > chars
|
|
str
|
|
else
|
|
"%#{chars}s"%(str)
|
|
end
|
|
end
|
|
|
|
string = ""
|
|
string += "\r#{lpad(didRun.size, numberOfTests.to_s.size)}/#{numberOfTests}"
|
|
unless didFail.empty?
|
|
string += " (failed #{didFail.size})"
|
|
end
|
|
string += " "
|
|
(running.size - didRun.size).times {
|
|
string += "."
|
|
}
|
|
if string.length < prevStringLength
|
|
print string
|
|
print(" " * (prevStringLength - string.length))
|
|
end
|
|
print string
|
|
prevStringLength = string.length
|
|
blankLine = false
|
|
$stdout.flush
|
|
}
|
|
}
|
|
puts
|
|
if not $?.success? and not options[:ignoreFailure]
|
|
raise "Failed to run #{cmd}: #{$?.inspect}"
|
|
end
|
|
end
|
|
end
|
|
|
|
def getRemoteDirectoryIfNeeded(remoteHost)
|
|
if !remoteHost.remoteDirectory
|
|
remoteHost.remoteDirectory = JSON::parse(sshRead("cat ~/.bencher", remoteHost))["tempPath"]
|
|
end
|
|
end
|
|
|
|
def copyBundleToRemote(remoteHost)
|
|
mysys(["ssh", "-o", "NoHostAuthenticationForLocalhost=yes", "-p", remoteHost.port.to_s, "#{remoteHost.user}@#{remoteHost.host}", "mkdir -p #{remoteHost.remoteDirectory}"])
|
|
mysys(["scp", "-o", "NoHostAuthenticationForLocalhost=yes", "-P", remoteHost.port.to_s, ($outputDir.dirname + $tarFileName).to_s, "#{remoteHost.user}@#{remoteHost.host}:#{remoteHost.remoteDirectory}"])
|
|
end
|
|
|
|
def exportBaseEnvironmentVariables(escape)
|
|
if escape
|
|
dyldFrameworkPath = "\\$(cd #{$testingFrameworkPath.dirname}; pwd)"
|
|
ldLibraryPath = "\\$(cd #{$testingFrameworkPath.dirname}/..; pwd)/#{$jscPath.dirname}"
|
|
else
|
|
dyldFrameworkPath = "\$(cd #{$testingFrameworkPath.dirname}; pwd)"
|
|
ldLibraryPath = "\$(cd #{$testingFrameworkPath.dirname}/..; pwd)/#{$jscPath.dirname}"
|
|
end
|
|
[
|
|
"export DYLD_FRAMEWORK_PATH=#{dyldFrameworkPath} && ",
|
|
"export LD_LIBRARY_PATH=#{ldLibraryPath} &&",
|
|
"export JSCTEST_timeout=#{Shellwords.shellescape(ENV['JSCTEST_timeout'])} && ",
|
|
"export JSCTEST_hardTimeout=#{Shellwords.shellescape(ENV['JSCTEST_hardTimeout'])} && ",
|
|
"export JSCTEST_memoryLimit=#{Shellwords.shellescape(ENV['JSCTEST_memoryLimit'])} && ",
|
|
"export TZ=#{Shellwords.shellescape(ENV['TZ'])} && ",
|
|
].join("")
|
|
end
|
|
|
|
def runTestRunner(remoteIndex=0)
|
|
if $remote
|
|
remoteHost = $remoteHosts[remoteIndex]
|
|
getRemoteDirectoryIfNeeded(remoteHost)
|
|
copyBundleToRemote(remoteHost)
|
|
remoteScript = "\""
|
|
remoteScript += "cd #{remoteHost.remoteDirectory} && "
|
|
remoteScript += "rm -rf #{$outputDir.basename} && "
|
|
remoteScript += "tar xzf #{$tarFileName} && "
|
|
remoteScript += "cd #{$outputDir.basename}/.runner && "
|
|
remoteScript += exportBaseEnvironmentVariables(true)
|
|
$envVars.each { |var| remoteScript += "export " << var << "\n" }
|
|
remoteScript += "#{testRunnerCommand(remoteIndex)}\""
|
|
runAndMonitorTestRunnerCommand(["ssh", "-o", "NoHostAuthenticationForLocalhost=yes", "-p", remoteHost.port.to_s, "#{remoteHost.user}@#{remoteHost.host}", remoteScript])
|
|
else
|
|
Dir.chdir($runnerDir) {
|
|
runAndMonitorTestRunnerCommand(Shellwords.shellsplit(testRunnerCommand))
|
|
}
|
|
end
|
|
end
|
|
|
|
STATUS_RE = /^[.]\/#{STATUS_FILE_PREFIX}(?<index>\d+)\s(?<runId>\h+)\s(?<exitCode>\d+)\s(?<result>#{STATUS_FILE_PASS}|#{STATUS_FILE_FAIL})$/
|
|
|
|
def processStatusLine(map, line)
|
|
md = STATUS_RE.match(line)
|
|
if md.nil?
|
|
$stderr.puts("Could not parse line `#{line}`")
|
|
exit(1)
|
|
end
|
|
index = md[:index].to_i
|
|
runId = md[:runId]
|
|
result = md[:result]
|
|
if runId != $runUniqueId
|
|
# This may conceivably happen if a remote goes
|
|
# away in the middle of a run and comes back
|
|
# online in the middle of a different run.
|
|
$stderr.puts("Ignoring stale status file for #{index} (ID #{runId} but current ID is #{$runUniqueId})")
|
|
return
|
|
end
|
|
if map.has_key?(index)
|
|
# One scenario in which this could happen:
|
|
# Test T runs on remote host A and
|
|
# 1. the status file reaches A's disk
|
|
# 2. somehow the gnu parallel runner is not made aware of the test's completion (packet loss?)
|
|
# 3. machine A crashes
|
|
# 4. gnu parallel re-schedules the test to run on remote host B, where it runs to completion
|
|
# 5. B comes back online before the end of the run
|
|
# 6. we collect the status files from all remotes and end up with two status files for T.
|
|
prev = map[index]
|
|
# map[index] holds
|
|
# - a number, if all results codes we've observed for a test are the same
|
|
# - an array, if they diverge.
|
|
if prev.is_a?(Array)
|
|
prev.push(result)
|
|
elsif prev != result
|
|
# If the two results differ, keep them
|
|
# both. This is simply a way to make note of
|
|
# the divergence (for later reporting).
|
|
map[index] = [prev, result]
|
|
else
|
|
# Got the same result, no need to do anything.
|
|
end
|
|
$stderr.puts("Duplicate state file for #{index}: #{map[index]}")
|
|
else
|
|
map[index] = result
|
|
end
|
|
end
|
|
|
|
def getStatusMap(map={})
|
|
find_cmd = "find . -maxdepth 1 -name \"#{STATUS_FILE_PREFIX}*\" -a -size +0c -exec sh -c \"printf \\\"%s \\\" {}; cat {}\" \\;"
|
|
if $remote
|
|
# Note: here we're using $remoteHosts (instead of getting the
|
|
# list of live remoteHosts from the caller, because there may
|
|
# well be test results on a remoteHost that got rebooted
|
|
# (note, the test results are tagged with a run ID, so we'll
|
|
# ignore any stale results from a previous run).
|
|
$remoteHosts.each_with_index {
|
|
| host, remoteIndex |
|
|
runnerDir = "#{host.remoteDirectory}/#{$outputDir.basename}/.runner"
|
|
output = sshRead("if test -d #{runnerDir}; then cd #{runnerDir}; else false; fi && " + find_cmd, host, :ignoreFailure => true)
|
|
output.split(/\n/).each {
|
|
| line |
|
|
processStatusLine(map, line)
|
|
}
|
|
}
|
|
else
|
|
Dir.chdir($runnerDir) {
|
|
IO.popen(find_cmd).each_line {
|
|
| line |
|
|
processStatusLine(map, line)
|
|
}
|
|
}
|
|
end
|
|
map
|
|
end
|
|
|
|
def getCompletedTestsFromStatusMap(statusMap)
|
|
completedTests = {}
|
|
$runlist.each_with_index {
|
|
| plan, index |
|
|
if statusMap.has_key?(index)
|
|
completedTests[plan] = nil
|
|
end
|
|
}
|
|
completedTests
|
|
end
|
|
|
|
def detectFailures(statusMap={})
|
|
raise if $bundle
|
|
noresult = 0
|
|
if statusMap.size == 0
|
|
statusMap = getStatusMap
|
|
end
|
|
familyMap = {}
|
|
|
|
$runlist.each_with_index {
|
|
| plan, index |
|
|
unless familyMap[plan.family]
|
|
familyMap[plan.family] = []
|
|
end
|
|
if not statusMap.has_key?(index)
|
|
appendNoResult(plan)
|
|
noresult += 1
|
|
next
|
|
end
|
|
result = nil
|
|
# If any of the results we got was a pass, consider the test
|
|
# successful. The assumption here is that failures are because
|
|
# of infrastructure issues (which is why the test was run
|
|
# multiple times in the first place).
|
|
testWasSuccessful = statusMap[index].include?(STATUS_FILE_PASS)
|
|
if testWasSuccessful
|
|
appendPass(plan)
|
|
result = "PASS"
|
|
else
|
|
appendFailure(plan)
|
|
result = "FAIL"
|
|
end
|
|
appendResult(plan, testWasSuccessful)
|
|
familyMap[plan.family] << { :result => result, :plan => plan }
|
|
}
|
|
|
|
if noresult > 0
|
|
$stderr.puts("Could not get the exit status for #{noresult} tests")
|
|
# We can't change our exit code, as run-javascriptcore-tests
|
|
# expects 0 even when there are failures.
|
|
end
|
|
|
|
File.open($outputDir + "resultsByFamily", "w") {
|
|
| outp |
|
|
first = true
|
|
familyMap.keys.sort.each {
|
|
| familyName |
|
|
if first
|
|
first = false
|
|
else
|
|
outp.puts
|
|
end
|
|
|
|
outp.print "#{familyName}:"
|
|
|
|
numPassed = 0
|
|
familyMap[familyName].each {
|
|
| entry |
|
|
if entry[:result] == "PASS"
|
|
numPassed += 1
|
|
end
|
|
}
|
|
|
|
if numPassed == familyMap[familyName].size
|
|
outp.puts " PASSED"
|
|
elsif numPassed == 0
|
|
outp.puts " FAILED"
|
|
else
|
|
outp.puts
|
|
familyMap[familyName].each {
|
|
| entry |
|
|
outp.puts " #{entry[:plan].name}: #{entry[:result]}"
|
|
}
|
|
end
|
|
}
|
|
}
|
|
end
|
|
|
|
def compressBundle
|
|
cmd = "cd #{$outputDir}/.. && tar -czf #{$tarFileName} #{$outputDir.basename}"
|
|
$stderr.puts ">> #{cmd}" if $verbosity >= 2
|
|
raise unless system(cmd)
|
|
end
|
|
|
|
def clean(file)
|
|
FileUtils.rm_rf file unless $bundle
|
|
end
|
|
|
|
clean($outputDir + "failed")
|
|
clean($outputDir + "passed")
|
|
clean($outputDir + "noresult")
|
|
clean($outputDir + "results")
|
|
clean($outputDir + "resultsByFamily")
|
|
clean($outputDir + ".vm")
|
|
clean($outputDir + ".helpers")
|
|
clean($outputDir + ".runner")
|
|
clean($outputDir + ".tests")
|
|
clean($outputDir + "_payload")
|
|
|
|
Dir.mkdir($outputDir) unless $outputDir.directory?
|
|
|
|
$outputDir = $outputDir.realpath
|
|
$runnerDir = $outputDir + ".runner"
|
|
|
|
if !$numChildProcesses
|
|
if ENV["WEBKIT_TEST_CHILD_PROCESSES"]
|
|
$numChildProcesses = ENV["WEBKIT_TEST_CHILD_PROCESSES"].to_i
|
|
$numChildProcessesSetByUser = true
|
|
else
|
|
$numChildProcesses = numberOfProcessors
|
|
end
|
|
end
|
|
|
|
if ENV["JSCTEST_timeout"]
|
|
# In the worst case, the processors just interfere with each other.
|
|
# Increase the timeout proportionally to the number of processors.
|
|
ENV["JSCTEST_timeout"] = (ENV["JSCTEST_timeout"].to_i.to_f * Math.sqrt($numChildProcesses)).to_i.to_s
|
|
end
|
|
|
|
# We do not adjust hardTimeout. If we are not producing any results during 1200 seconds, buildbot terminates the tests. So we should terminate hung tests.
|
|
|
|
if !ENV["JSCTEST_memoryLimit"] && $memoryLimited
|
|
ENV["JSCTEST_memoryLimit"] = (600 * 1024 * 1024).to_s
|
|
end
|
|
|
|
# Some tests fail if the time zone is not set to US/Pacific
|
|
# https://webkit.org/b/136363
|
|
# Set as done in run-javascript-tests
|
|
ENV["TZ"] = "US/Pacific";
|
|
|
|
def runBundle
|
|
raise unless $bundle
|
|
|
|
cleanRunnerDirectory
|
|
cleanOldResults
|
|
runTestRunner
|
|
cleanEmptyResultFiles
|
|
end
|
|
|
|
def runNormal
|
|
raise if $bundle or $tarball
|
|
|
|
prepareBundle
|
|
prepareTestRunner
|
|
runTestRunner
|
|
cleanEmptyResultFiles
|
|
detectFailures
|
|
end
|
|
|
|
def runTarball
|
|
raise unless $tarball
|
|
|
|
prepareBundle
|
|
prepareTestRunner
|
|
compressBundle
|
|
end
|
|
|
|
def forEachRemote(remoteHosts, options={}, &blk)
|
|
threads = []
|
|
remoteHosts.each_index {
|
|
| index |
|
|
remoteHost = remoteHosts[index]
|
|
threads << Thread.new {
|
|
blk.call(index, remoteHost)
|
|
}
|
|
}
|
|
|
|
liveRemotes = []
|
|
threads.each_index {
|
|
| index |
|
|
thread = threads[index]
|
|
begin
|
|
thread.join
|
|
liveRemotes << remoteHosts[index]
|
|
rescue CommandExecutionFailed
|
|
if options[:dropOnFailure]
|
|
if $verbosity > 0
|
|
$stderr.puts("Dropping failed remote #{remoteHosts[index]}")
|
|
end
|
|
else
|
|
raise
|
|
end
|
|
end
|
|
}
|
|
liveRemotes
|
|
end
|
|
|
|
def runRemote
|
|
raise unless $remote
|
|
|
|
prepareBundle
|
|
$remoteHosts.each_index {
|
|
| index |
|
|
prepareTestRunner(index)
|
|
}
|
|
compressBundle
|
|
forEachRemote($remoteHosts) {
|
|
| index |
|
|
runTestRunner(index)
|
|
}
|
|
detectFailures
|
|
end
|
|
|
|
def prepareGnuParallelRunnerJobs(name, runlist, exclude)
|
|
path = $runnerDir + name
|
|
FileUtils.mkdir_p($runnerDir)
|
|
|
|
File.open(path, "w") {
|
|
| outp |
|
|
runlist.each {
|
|
| plan |
|
|
if exclude.has_key?(plan)
|
|
next
|
|
end
|
|
outp.puts("./test_script_#{plan.index}")
|
|
}
|
|
}
|
|
end
|
|
|
|
def prepareGnuParallelTestRunner(exclude={})
|
|
serialTests = {}
|
|
$serialRunlist.each { |p| serialTests[p] = nil }
|
|
prepareGnuParallelRunnerJobs("parallel-tests", $runlist, exclude.merge(serialTests))
|
|
prepareGnuParallelRunnerJobs("serial-tests", $serialRunlist, exclude)
|
|
end
|
|
|
|
def withGnuParallelSshWrapper(&blk)
|
|
Tempfile.open('ssh-wrapper', $runnerDir) {
|
|
| wrapper |
|
|
head =
|
|
<<'EOF'
|
|
#!/bin/sh
|
|
|
|
remotedir="$1"
|
|
shift
|
|
|
|
remoteport="$1"
|
|
shift
|
|
|
|
remoteuser="$1"
|
|
shift
|
|
|
|
remotehost="$1"
|
|
shift
|
|
|
|
if test "x$1" != "x--"; then
|
|
echo "Expected '--' at this position, instead got $1" 1>&2
|
|
exit 3
|
|
fi
|
|
shift
|
|
EOF
|
|
wrapper.puts(head +
|
|
"echo \"$@\" | ssh -o ControlPath=./%C -o ControlMaster=auto -o ControlPersist=10m -o NoHostAuthenticationForLocalhost=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p \"$remoteport\" -l \"$remoteuser\" -o RemoteCommand=\"if test -d '$remotedir'; then cd '$remotedir'; else echo '#{PARALLEL_REMOTE_WRAPPER_MARK_BEGIN}${remotehost}#{PARALLEL_REMOTE_WRAPPER_MARK_END}'; false; fi && sh -s\" \"$remotehost\""
|
|
)
|
|
FileUtils.chmod("ugo=rx", wrapper.path)
|
|
wrapper.close # Avoid ETXTBUSY
|
|
blk.call(wrapper.path)
|
|
}
|
|
end
|
|
|
|
def withGnuParallelSshLoginFile(remoteHosts, &blk)
|
|
withGnuParallelSshWrapper {
|
|
| wrapper |
|
|
Tempfile.open('slf', $runnerDir) {
|
|
| tf |
|
|
remoteHosts.each {
|
|
| remoteHost |
|
|
tf.puts("#{wrapper} #{remoteHost.remoteDirectory} #{remoteHost.port} #{remoteHost.user} #{remoteHost.host}")
|
|
}
|
|
tf.flush
|
|
blk.call(tf.path)
|
|
}
|
|
}
|
|
end
|
|
|
|
def unpackBundleGnuParallel(remoteHosts)
|
|
forEachRemote(remoteHosts, :dropOnFailure => true) {
|
|
| _, remoteHost |
|
|
mysys(["ssh", "-o", "NoHostAuthenticationForLocalhost=yes",
|
|
"-p", remoteHost.port.to_s,
|
|
"#{remoteHost.user}@#{remoteHost.host}",
|
|
"cd #{Shellwords.shellescape(remoteHost.remoteDirectory)} && rm -rf #{$outputDir.basename} && tar xzf #{$tarFileName}"])
|
|
}
|
|
end
|
|
|
|
def runGnuParallelRunner(remoteHosts, inputs, options={})
|
|
timeout = 300
|
|
if ENV["JSCTEST_timeout"]
|
|
timeout = ENV["JSCTEST_timeout"].to_f.ceil.to_i
|
|
end
|
|
# Keep ncpus + 1 jobs running by default to avoid any stalls due
|
|
# to ssh latency.
|
|
parallelJobsOnEachHost = "+1"
|
|
if $numChildProcessesSetByUser
|
|
parallelJobsOnEachHost = $numChildProcesses
|
|
end
|
|
if options[:parallelJobsOnEachHost]
|
|
parallelJobsOnEachHost = options[:parallelJobsOnEachHost]
|
|
end
|
|
markerWithHost = Regexp.new(".*#{PARALLEL_REMOTE_WRAPPER_MARK_BEGIN}(.*)#{PARALLEL_REMOTE_WRAPPER_MARK_END}.*")
|
|
markerWithoutHost = Regexp.new(".*#{PARALLEL_REMOTE_STATE_LOST_MARKER}.*")
|
|
withGnuParallelSshLoginFile(remoteHosts) {
|
|
| slf |
|
|
cmd = [
|
|
"parallel",
|
|
"-j", "#{parallelJobsOnEachHost}",
|
|
# NB: the tests exit with 0 regardless of whether they
|
|
# passed or failed, so this will only retry tests that we
|
|
# weren't able to get a result for, likely because the
|
|
# connection went down or the remote OOM'd/crashed).
|
|
"--retries", "5",
|
|
"--line-buffer", # we know our output is line-oriented
|
|
"--slf", slf,
|
|
"--timeout", timeout.to_s,
|
|
"-a", inputs,
|
|
"if test -e #{$outputDir.basename}/.runner; then cd #{$outputDir.basename}/.runner; else echo #{PARALLEL_REMOTE_STATE_LOST_MARKER}; false; fi && " +
|
|
exportBaseEnvironmentVariables(false) +
|
|
$envVars.collect { |var | "export #{var} &&"}.join("") +
|
|
"sh "
|
|
]
|
|
runAndMonitorCommandOutput(cmd) {
|
|
| pid, line |
|
|
host = "<unknown host>"
|
|
md = markerWithoutHost.match(line)
|
|
if md.nil?
|
|
md = markerWithHost.match(line)
|
|
host = md[1] unless md.nil?
|
|
end
|
|
if not md.nil?
|
|
if $verbosity > 0
|
|
$stderr.puts("Remote host lost state, triggering high-level retry: #{host}")
|
|
end
|
|
# We could try to reprovision this specific remote
|
|
# host, but that seems needlessly complicated (we
|
|
# don't expect the remotes to go down every
|
|
# minute...). Simply kill the GNU parallel process.
|
|
Process.kill("TERM", pid)
|
|
end
|
|
}
|
|
}
|
|
end
|
|
|
|
def runGnuParallel
|
|
raise unless $remote
|
|
prepareBundle
|
|
prepareTestRunner
|
|
compressBundle
|
|
statusMap = {}
|
|
# This is a high-level retry loop. If a remote host goes away in
|
|
# the middle of a run and either doesn't come back online or comes
|
|
# back having removed the remote directory (*), getStatusMap will be
|
|
# missing results for these tests, allowing us to retry them in
|
|
# the next iteration.
|
|
#
|
|
# (*) This may well happen when running the tests on embedded
|
|
# boards that regularly corrupt their SD card. Definitely an issue
|
|
# for the ci20 boards used to test MIPS.
|
|
#
|
|
# The number of tries scales with the number of remotes in use,
|
|
# with a reasonable floor.
|
|
tries = [3, $remoteHosts.size / 2].max
|
|
tries.times {
|
|
| iteration |
|
|
remoteHosts = $remoteHosts
|
|
# If the preparatory steps fail, drop the remote host from our
|
|
# list. Otherwise, if it comes back online in the middle of an
|
|
# iteration, we'll try to run test jobs on it, possibly using
|
|
# an unrelated bundle from a previous run.
|
|
remoteHosts = forEachRemote(remoteHosts, {:dropOnFailure => true}) {
|
|
| _, remoteHost |
|
|
getRemoteDirectoryIfNeeded(remoteHost)
|
|
copyBundleToRemote(remoteHost)
|
|
}
|
|
remoteHosts = unpackBundleGnuParallel(remoteHosts)
|
|
if remoteHosts.size == 0
|
|
# Either everything is down, or everything got
|
|
# rebooted. In the latter case, allow enough time for the
|
|
# remote boards to boot up, then retry.
|
|
waitInterval = 60
|
|
$stderr.puts("All remote hosts failed, retrying after #{waitInterval}s")
|
|
sleep(waitInterval)
|
|
next
|
|
end
|
|
|
|
runGnuParallelRunner(remoteHosts, $runnerDir + "serial-tests",
|
|
{ :parallelJobsOnEachHost => 1})
|
|
runGnuParallelRunner(remoteHosts, $runnerDir + "parallel-tests")
|
|
statusMap = getStatusMap(statusMap)
|
|
completedTests = getCompletedTestsFromStatusMap(statusMap)
|
|
break unless completedTests.size != $runlist.size
|
|
if completedTests.size > $runlist.size
|
|
raise "Test count mismatch: #{completedTests.size} > #{$runlist.size}"
|
|
end
|
|
# Regenerate the lists of tests to run
|
|
prepareGnuParallelTestRunner(completedTests)
|
|
$stderr.puts("After try #{iteration + 1}/#{tries}: got results for #{completedTests.size}/#{$runlist.size} tests, #{remoteHosts.size}/#{$remoteHosts.size} hosts live")
|
|
}
|
|
detectFailures(statusMap)
|
|
end
|
|
|
|
puts
|
|
|
|
if $testRunnerType == :gnuparallel
|
|
raise unless $remote
|
|
end
|
|
|
|
if $bundle
|
|
runBundle
|
|
elsif $remote
|
|
if $testRunnerType == :gnuparallel
|
|
runGnuParallel
|
|
else
|
|
runRemote
|
|
end
|
|
elsif $tarball
|
|
runTarball
|
|
else
|
|
runNormal
|
|
end
|