FTL should use LLVM intrinsics for OSR exit, watchpoints, inline caches, and stack layout
https://bugs.webkit.org/show_bug.cgi?id=122318
Reviewed by Geoffrey Garen.
This all now works. This patch just updates our implementation to work with LLVM trunk,
and removes all of the old code that tried to do OSR exits and heap accesses without
the benefit of those intrinsics.
In particular:
- StackMaps parsing now uses the new, less compact, but more future-proof, format.
- Remove the ftlUsesStackmaps() option and hard-code ftlUsesStackmaps = true. Remove
all code for ftlUsesStackmaps = false, since that was only there for back when we
didn't have the intrinsics.
- Remove the other experimental OSR options (useLLVMOSRExitIntrinsic,
ftlTrapsOnOSRExit, and FTLOSRExitOmitsMarshalling).
- Remove LowerDFGToLLVM's use of the ExitThunkGenerator since we don't need to generate
the exit thunks until after we parse the stackmaps.
- Remove all of the exit thunk and compiler code for the no-stackmaps case.
* dfg/DFGDriver.cpp:
(JSC::DFG::compileImpl):
* ftl/FTLCompile.cpp:
(JSC::FTL::mmAllocateDataSection):
* ftl/FTLExitThunkGenerator.cpp:
(JSC::FTL::ExitThunkGenerator::emitThunk):
* ftl/FTLIntrinsicRepository.h:
* ftl/FTLLocation.cpp:
(JSC::FTL::Location::forStackmaps):
* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::LowerDFGToLLVM::LowerDFGToLLVM):
(JSC::FTL::LowerDFGToLLVM::lower):
(JSC::FTL::LowerDFGToLLVM::compileGetById):
(JSC::FTL::LowerDFGToLLVM::compileInvalidationPoint):
(JSC::FTL::LowerDFGToLLVM::appendOSRExit):
(JSC::FTL::LowerDFGToLLVM::emitOSRExitCall):
(JSC::FTL::LowerDFGToLLVM::callStackmap):
(JSC::FTL::LowerDFGToLLVM::addExitArgumentForNode):
* ftl/FTLOSRExitCompilationInfo.h:
(JSC::FTL::OSRExitCompilationInfo::OSRExitCompilationInfo):
* ftl/FTLOSRExitCompiler.cpp:
(JSC::FTL::compileStub):
(JSC::FTL::compileFTLOSRExit):
* ftl/FTLStackMaps.cpp:
(JSC::FTL::StackMaps::Location::parse):
(JSC::FTL::StackMaps::parse):
(WTF::printInternal):
* ftl/FTLStackMaps.h:
* ftl/FTLThunks.cpp:
(JSC::FTL::osrExitGenerationThunkGenerator):
* ftl/FTLThunks.h:
(JSC::FTL::Thunks::getOSRExitGenerationThunk):
* runtime/Options.h:
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@158535 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
index 36fba88..4952a41 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
@@ -42,11 +42,7 @@
using namespace DFG;
-// This implements two flavors of OSR exit: one that involves having LLVM intrinsics to help
-// OSR exit, and one that doesn't. The one that doesn't will get killed off, so we don't attempt
-// to share code between the two.
-
-static void compileStubWithOSRExitStackmap(
+static void compileStub(
unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock)
{
StackMaps::Record* record;
@@ -178,169 +174,6 @@
toCString(*record).data()));
}
-static void compileStubWithoutOSRExitStackmap(
- unsigned exitID, OSRExit& exit, VM* vm, CodeBlock* codeBlock)
-{
- CCallHelpers jit(vm, codeBlock);
-
- // Make ourselves look like a real C function.
- jit.push(MacroAssembler::framePointerRegister);
- jit.move(MacroAssembler::stackPointerRegister, MacroAssembler::framePointerRegister);
-
- // This is actually fairly easy, even though it is horribly gross. We know that
- // LLVM would have passes us all of the state via arguments. We know how to get
- // the arguments. And, we know how to pop stack back to the JIT stack frame, sort
- // of: we know that it's two frames beneath us. This is terrible and I feel
- // ashamed of it, but it will work for now.
-
- CArgumentGetter arguments(jit, 2);
-
- // First recover our call frame and tag thingies.
- arguments.loadNextPtr(GPRInfo::callFrameRegister);
- jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
- jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister);
-
- // Do some value profiling.
- if (exit.m_profileValueFormat != InvalidValueFormat) {
- arguments.loadNextAndBox(exit.m_profileValueFormat, GPRInfo::nonArgGPR0);
-
- if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
- CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
- if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
- jit.loadPtr(MacroAssembler::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR1);
- jit.storePtr(GPRInfo::nonArgGPR1, arrayProfile->addressOfLastSeenStructure());
- jit.load8(MacroAssembler::Address(GPRInfo::nonArgGPR1, Structure::indexingTypeOffset()), GPRInfo::nonArgGPR1);
- jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::nonArgGPR2);
- jit.lshift32(GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2);
- jit.or32(GPRInfo::nonArgGPR2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
- }
- }
-
- if (!!exit.m_valueProfile)
- jit.store64(GPRInfo::nonArgGPR0, exit.m_valueProfile.getSpecFailBucket(0));
- }
-
- // Use a scratch buffer to transfer all values.
- ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(sizeof(EncodedJSValue) * exit.m_values.size());
- EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
-
- // Start by dumping all argument exit values to the stack.
-
- Vector<ExitArgumentForOperand, 16> sortedArguments;
- for (unsigned i = exit.m_values.size(); i--;) {
- ExitValue value = exit.m_values[i];
- int operand = exit.m_values.operandForIndex(i);
-
- if (!value.isArgument())
- continue;
-
- sortedArguments.append(ExitArgumentForOperand(value.exitArgument(), VirtualRegister(operand)));
- }
- std::sort(sortedArguments.begin(), sortedArguments.end(), lesserArgumentIndex);
-
- for (unsigned i = 0; i < sortedArguments.size(); ++i) {
- ExitArgumentForOperand argument = sortedArguments[i];
-
- arguments.loadNextAndBox(argument.exitArgument().format(), GPRInfo::nonArgGPR0);
- jit.store64(
- GPRInfo::nonArgGPR0, scratch + exit.m_values.indexForOperand(argument.operand()));
- }
-
- // All temp registers are free at this point.
-
- // Move anything on the stack into the appropriate place in the scratch buffer.
-
- for (unsigned i = exit.m_values.size(); i--;) {
- ExitValue value = exit.m_values[i];
-
- switch (value.kind()) {
- case ExitValueInJSStack:
- jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0);
- break;
- case ExitValueInJSStackAsInt32:
- jit.load32(
- AssemblyHelpers::addressFor(value.virtualRegister()).withOffset(
- OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
- GPRInfo::regT0);
- jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- break;
- case ExitValueInJSStackAsInt52:
- jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0);
- jit.rshift64(
- AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
- jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
- break;
- case ExitValueInJSStackAsDouble:
- jit.loadDouble(AssemblyHelpers::addressFor(value.virtualRegister()), FPRInfo::fpRegT0);
- jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
- break;
- case ExitValueDead:
- case ExitValueConstant:
- case ExitValueArgument:
- // Don't do anything for these.
- continue;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- jit.store64(GPRInfo::regT0, scratch + i);
- }
-
- // Move everything from the scratch buffer to the stack; this also reifies constants.
-
- for (unsigned i = exit.m_values.size(); i--;) {
- ExitValue value = exit.m_values[i];
- int operand = exit.m_values.operandForIndex(i);
-
- MacroAssembler::Address address = AssemblyHelpers::addressFor(operand);
-
- switch (value.kind()) {
- case ExitValueDead:
- jit.store64(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), address);
- break;
- case ExitValueConstant:
- jit.store64(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), address);
- break;
- case ExitValueInJSStack:
- case ExitValueInJSStackAsInt32:
- case ExitValueInJSStackAsInt52:
- case ExitValueInJSStackAsDouble:
- case ExitValueArgument:
- jit.load64(scratch + i, GPRInfo::regT0);
- jit.store64(GPRInfo::regT0, address);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- handleExitCounts(jit, exit);
- reifyInlinedCallFrames(jit, exit);
-
- jit.pop(MacroAssembler::framePointerRegister);
- jit.move(MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister);
- jit.pop(MacroAssembler::framePointerRegister);
- jit.pop(GPRInfo::nonArgGPR0); // ignore the result.
-
- if (exit.m_lastSetOperand.isValid()) {
- jit.load64(
- AssemblyHelpers::addressFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
- }
-
- adjustAndJumpToTarget(jit, exit);
-
- LinkBuffer patchBuffer(*vm, &jit, codeBlock);
- exit.m_code = FINALIZE_CODE_IF(
- shouldShowDisassembly(),
- patchBuffer,
- ("FTL OSR exit #%u (bc#%u, %s) from %s, with operands = %s",
- exitID, exit.m_codeOrigin.bytecodeIndex,
- exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
- toCString(ignoringContext<DumpContext>(exit.m_values)).data()));
-}
-
extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID)
{
SamplingRegion samplingRegion("FTL OSR Exit Compilation");
@@ -361,10 +194,7 @@
prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
- if (Options::ftlUsesStackmaps())
- compileStubWithOSRExitStackmap(exitID, jitCode, exit, vm, codeBlock);
- else
- compileStubWithoutOSRExitStackmap(exitID, exit, vm, codeBlock);
+ compileStub(exitID, jitCode, exit, vm, codeBlock);
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(