Enhance the MacroAssembler and LinkBuffer to support pointer profiling.
https://bugs.webkit.org/show_bug.cgi?id=183623
<rdar://problem/38443314>
Reviewed by Michael Saboff.
Source/JavaScriptCore:
1. Added a PtrTag argument to indirect call() and indirect jump() MacroAssembler
emitters to support pointer profiling.
2. Also added tagPtr(), untagPtr(), and removePtrTag() placeholder methods.
3. Added a PtrTag to LinkBuffer finalizeCodeWithoutDisassembly() and clients.
4. Updated clients to pass a PtrTag. For the most part, I just apply NoPtrTag as
a placeholder until we have time to analyze what pointer profile each client
site has later.
5. Apply PtrTags to the YarrJIT.
* assembler/ARM64Assembler.h:
(JSC::ARM64Assembler::linkJumpOrCall):
* assembler/AbstractMacroAssembler.h:
(JSC::AbstractMacroAssembler::getLinkerAddress):
(JSC::AbstractMacroAssembler::tagPtr):
(JSC::AbstractMacroAssembler::untagPtr):
(JSC::AbstractMacroAssembler::removePtrTag):
* assembler/LinkBuffer.cpp:
(JSC::LinkBuffer::finalizeCodeWithoutDisassembly):
(JSC::LinkBuffer::finalizeCodeWithDisassembly):
* assembler/LinkBuffer.h:
(JSC::LinkBuffer::link):
(JSC::LinkBuffer::locationOfNearCall):
(JSC::LinkBuffer::locationOf):
* assembler/MacroAssemblerARM.h:
(JSC::MacroAssemblerARM::jump):
(JSC::MacroAssemblerARM::call):
(JSC::MacroAssemblerARM::readCallTarget):
* assembler/MacroAssemblerARM64.h:
(JSC::MacroAssemblerARM64::call):
(JSC::MacroAssemblerARM64::jump):
(JSC::MacroAssemblerARM64::readCallTarget):
(JSC::MacroAssemblerARM64::linkCall):
* assembler/MacroAssemblerARMv7.h:
(JSC::MacroAssemblerARMv7::jump):
(JSC::MacroAssemblerARMv7::relativeTableJump):
(JSC::MacroAssemblerARMv7::call):
(JSC::MacroAssemblerARMv7::readCallTarget):
* assembler/MacroAssemblerCodeRef.cpp:
(JSC::MacroAssemblerCodePtr::createLLIntCodePtr):
(JSC::MacroAssemblerCodeRef::createLLIntCodeRef):
* assembler/MacroAssemblerCodeRef.h:
(JSC::FunctionPtr::FunctionPtr):
(JSC::FunctionPtr::value const):
(JSC::MacroAssemblerCodePtr:: const):
(JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
(JSC::MacroAssemblerCodeRef::retaggedCode const):
* assembler/MacroAssemblerMIPS.h:
(JSC::MacroAssemblerMIPS::jump):
(JSC::MacroAssemblerMIPS::call):
(JSC::MacroAssemblerMIPS::readCallTarget):
* assembler/MacroAssemblerX86.h:
(JSC::MacroAssemblerX86::call):
(JSC::MacroAssemblerX86::jump):
(JSC::MacroAssemblerX86::readCallTarget):
* assembler/MacroAssemblerX86Common.cpp:
(JSC::MacroAssembler::probe):
* assembler/MacroAssemblerX86Common.h:
(JSC::MacroAssemblerX86Common::jump):
(JSC::MacroAssemblerX86Common::call):
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::call):
(JSC::MacroAssemblerX86_64::jump):
(JSC::MacroAssemblerX86_64::readCallTarget):
* assembler/testmasm.cpp:
(JSC::compile):
(JSC::invoke):
* b3/B3Compile.cpp:
(JSC::B3::compile):
* b3/B3LowerMacros.cpp:
* b3/air/AirCCallSpecial.cpp:
(JSC::B3::Air::CCallSpecial::generate):
* b3/air/testair.cpp:
* b3/testb3.cpp:
(JSC::B3::invoke):
(JSC::B3::testInterpreter):
(JSC::B3::testEntrySwitchSimple):
(JSC::B3::testEntrySwitchNoEntrySwitch):
(JSC::B3::testEntrySwitchWithCommonPaths):
(JSC::B3::testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint):
(JSC::B3::testEntrySwitchLoop):
* bytecode/AccessCase.cpp:
(JSC::AccessCase::generateImpl):
* bytecode/AccessCaseSnippetParams.cpp:
(JSC::SlowPathCallGeneratorWithArguments::generateImpl):
* bytecode/InlineAccess.cpp:
(JSC::linkCodeInline):
(JSC::InlineAccess::rewireStubAsJump):
* bytecode/PolymorphicAccess.cpp:
(JSC::AccessGenerationState::emitExplicitExceptionHandler):
(JSC::PolymorphicAccess::regenerate):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compileExceptionHandlers):
(JSC::DFG::JITCompiler::link):
(JSC::DFG::JITCompiler::compileFunction):
(JSC::DFG::JITCompiler::noticeCatchEntrypoint):
* dfg/DFGJITCompiler.h:
(JSC::DFG::JITCompiler::appendCall):
* dfg/DFGJITFinalizer.cpp:
(JSC::DFG::JITFinalizer::finalize):
(JSC::DFG::JITFinalizer::finalizeFunction):
* dfg/DFGOSRExit.cpp:
(JSC::DFG::OSRExit::emitRestoreArguments):
(JSC::DFG::OSRExit::compileOSRExit):
* dfg/DFGOSRExitCompilerCommon.cpp:
(JSC::DFG::handleExitCounts):
(JSC::DFG::osrWriteBarrier):
(JSC::DFG::adjustAndJumpToTarget):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::emitSwitchIntJump):
(JSC::DFG::SpeculativeJIT::emitSwitchImm):
(JSC::DFG::SpeculativeJIT::emitSwitchStringOnString):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGThunks.cpp:
(JSC::DFG::osrExitThunkGenerator):
(JSC::DFG::osrExitGenerationThunkGenerator):
(JSC::DFG::osrEntryThunkGenerator):
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLJITFinalizer.cpp:
(JSC::FTL::JITFinalizer::finalizeCommon):
* ftl/FTLLazySlowPath.cpp:
(JSC::FTL::LazySlowPath::generate):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::lower):
(JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargsSpread):
(JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
(JSC::FTL::DFG::LowerDFGToB3::compileCallEval):
* ftl/FTLOSRExitCompiler.cpp:
(JSC::FTL::compileStub):
(JSC::FTL::compileFTLOSRExit):
* ftl/FTLSlowPathCall.cpp:
(JSC::FTL::SlowPathCallContext::makeCall):
* ftl/FTLThunks.cpp:
(JSC::FTL::genericGenerationThunkGenerator):
(JSC::FTL::osrExitGenerationThunkGenerator):
(JSC::FTL::lazySlowPathGenerationThunkGenerator):
(JSC::FTL::slowPathCallThunkGenerator):
* jit/AssemblyHelpers.cpp:
(JSC::AssemblyHelpers::callExceptionFuzz):
(JSC::AssemblyHelpers::debugCall):
* jit/CCallHelpers.cpp:
(JSC::CCallHelpers::ensureShadowChickenPacket):
* jit/CCallHelpers.h:
(JSC::CCallHelpers::jumpToExceptionHandler):
* jit/ExecutableAllocator.cpp:
(JSC::FixedVMPoolExecutableAllocator::jitWriteThunkGenerator):
* jit/JIT.cpp:
(JSC::JIT::emitEnterOptimizationCheck):
(JSC::JIT::link):
(JSC::JIT::privateCompileExceptionHandlers):
* jit/JIT.h:
(JSC::JIT::appendCall):
* jit/JITMathIC.h:
(JSC::isProfileEmpty):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_switch_imm):
(JSC::JIT::emit_op_switch_char):
(JSC::JIT::emit_op_switch_string):
(JSC::JIT::emitSlow_op_loop_hint):
(JSC::JIT::privateCompileHasIndexedProperty):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_switch_imm):
(JSC::JIT::emit_op_switch_char):
(JSC::JIT::emit_op_switch_string):
(JSC::JIT::privateCompileHasIndexedProperty):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::privateCompileGetByVal):
(JSC::JIT::privateCompileGetByValWithCachedId):
(JSC::JIT::privateCompilePutByVal):
(JSC::JIT::privateCompilePutByValWithCachedId):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::stringGetByValStubGenerator):
* jit/JITStubRoutine.h:
* jit/Repatch.cpp:
(JSC::readCallTarget):
(JSC::appropriateOptimizingPutByIdFunction):
(JSC::linkPolymorphicCall):
(JSC::resetPutByID):
* jit/SlowPathCall.h:
(JSC::JITSlowPathCall::call):
* jit/SpecializedThunkJIT.h:
(JSC::SpecializedThunkJIT::finalize):
(JSC::SpecializedThunkJIT::callDoubleToDouble):
* jit/ThunkGenerators.cpp:
(JSC::throwExceptionFromCallSlowPathGenerator):
(JSC::slowPathFor):
(JSC::linkCallThunkGenerator):
(JSC::linkPolymorphicCallThunkGenerator):
(JSC::virtualThunkFor):
(JSC::nativeForGenerator):
(JSC::arityFixupGenerator):
(JSC::unreachableGenerator):
(JSC::boundThisNoArgsFunctionCallGenerator):
* llint/LLIntThunks.cpp:
(JSC::LLInt::generateThunkWithJumpTo):
(JSC::LLInt::functionForCallEntryThunkGenerator):
(JSC::LLInt::functionForConstructEntryThunkGenerator):
(JSC::LLInt::functionForCallArityCheckThunkGenerator):
(JSC::LLInt::functionForConstructArityCheckThunkGenerator):
(JSC::LLInt::evalEntryThunkGenerator):
(JSC::LLInt::programEntryThunkGenerator):
(JSC::LLInt::moduleProgramEntryThunkGenerator):
* runtime/PtrTag.h:
* wasm/WasmB3IRGenerator.cpp:
(JSC::Wasm::B3IRGenerator::addCall):
(JSC::Wasm::B3IRGenerator::addCallIndirect):
* wasm/WasmBBQPlan.cpp:
(JSC::Wasm::BBQPlan::complete):
* wasm/WasmBinding.cpp:
(JSC::Wasm::wasmToWasm):
* wasm/WasmOMGPlan.cpp:
(JSC::Wasm::OMGPlan::work):
* wasm/WasmThunks.cpp:
(JSC::Wasm::throwExceptionFromWasmThunkGenerator):
(JSC::Wasm::throwStackOverflowFromWasmThunkGenerator):
(JSC::Wasm::triggerOMGTierUpThunkGenerator):
* wasm/js/WasmToJS.cpp:
(JSC::Wasm::handleBadI64Use):
(JSC::Wasm::wasmToJS):
* yarr/YarrJIT.cpp:
(JSC::Yarr::YarrGenerator::loadFromFrameAndJump):
(JSC::Yarr::YarrGenerator::BacktrackingState::linkDataLabels):
(JSC::Yarr::YarrGenerator::generateTryReadUnicodeCharacterHelper):
(JSC::Yarr::YarrGenerator::generateEnter):
(JSC::Yarr::YarrGenerator::YarrGenerator):
(JSC::Yarr::YarrGenerator::compile):
(JSC::Yarr::jitCompile):
* yarr/YarrJIT.h:
(JSC::Yarr::YarrCodeBlock::execute):
Source/WebCore:
No new tests. Just adding PtrTags required by new MacroAssembler API.
* cssjit/FunctionCall.h:
(WebCore::FunctionCall::prepareAndCall):
* cssjit/SelectorCompiler.cpp:
(WebCore::SelectorCompiler::SelectorCodeGenerator::compile):
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@229609 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index a919a77..506dd88 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,3 +1,251 @@
+2018-03-14 Mark Lam <mark.lam@apple.com>
+
+ Enhance the MacroAssembler and LinkBuffer to support pointer profiling.
+ https://bugs.webkit.org/show_bug.cgi?id=183623
+ <rdar://problem/38443314>
+
+ Reviewed by Michael Saboff.
+
+ 1. Added a PtrTag argument to indirect call() and indirect jump() MacroAssembler
+ emitters to support pointer profiling.
+
+ 2. Also added tagPtr(), untagPtr(), and removePtrTag() placeholder methods.
+
+ 3. Added a PtrTag to LinkBuffer finalizeCodeWithoutDisassembly() and clients.
+
+ 4. Updated clients to pass a PtrTag. For the most part, I just apply NoPtrTag as
+ a placeholder until we have time to analyze what pointer profile each client
+ site has later.
+
+ 5. Apply PtrTags to the YarrJIT.
+
+ * assembler/ARM64Assembler.h:
+ (JSC::ARM64Assembler::linkJumpOrCall):
+ * assembler/AbstractMacroAssembler.h:
+ (JSC::AbstractMacroAssembler::getLinkerAddress):
+ (JSC::AbstractMacroAssembler::tagPtr):
+ (JSC::AbstractMacroAssembler::untagPtr):
+ (JSC::AbstractMacroAssembler::removePtrTag):
+ * assembler/LinkBuffer.cpp:
+ (JSC::LinkBuffer::finalizeCodeWithoutDisassembly):
+ (JSC::LinkBuffer::finalizeCodeWithDisassembly):
+ * assembler/LinkBuffer.h:
+ (JSC::LinkBuffer::link):
+ (JSC::LinkBuffer::locationOfNearCall):
+ (JSC::LinkBuffer::locationOf):
+ * assembler/MacroAssemblerARM.h:
+ (JSC::MacroAssemblerARM::jump):
+ (JSC::MacroAssemblerARM::call):
+ (JSC::MacroAssemblerARM::readCallTarget):
+ * assembler/MacroAssemblerARM64.h:
+ (JSC::MacroAssemblerARM64::call):
+ (JSC::MacroAssemblerARM64::jump):
+ (JSC::MacroAssemblerARM64::readCallTarget):
+ (JSC::MacroAssemblerARM64::linkCall):
+ * assembler/MacroAssemblerARMv7.h:
+ (JSC::MacroAssemblerARMv7::jump):
+ (JSC::MacroAssemblerARMv7::relativeTableJump):
+ (JSC::MacroAssemblerARMv7::call):
+ (JSC::MacroAssemblerARMv7::readCallTarget):
+ * assembler/MacroAssemblerCodeRef.cpp:
+ (JSC::MacroAssemblerCodePtr::createLLIntCodePtr):
+ (JSC::MacroAssemblerCodeRef::createLLIntCodeRef):
+ * assembler/MacroAssemblerCodeRef.h:
+ (JSC::FunctionPtr::FunctionPtr):
+ (JSC::FunctionPtr::value const):
+ (JSC::MacroAssemblerCodePtr:: const):
+ (JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
+ (JSC::MacroAssemblerCodeRef::retaggedCode const):
+ * assembler/MacroAssemblerMIPS.h:
+ (JSC::MacroAssemblerMIPS::jump):
+ (JSC::MacroAssemblerMIPS::call):
+ (JSC::MacroAssemblerMIPS::readCallTarget):
+ * assembler/MacroAssemblerX86.h:
+ (JSC::MacroAssemblerX86::call):
+ (JSC::MacroAssemblerX86::jump):
+ (JSC::MacroAssemblerX86::readCallTarget):
+ * assembler/MacroAssemblerX86Common.cpp:
+ (JSC::MacroAssembler::probe):
+ * assembler/MacroAssemblerX86Common.h:
+ (JSC::MacroAssemblerX86Common::jump):
+ (JSC::MacroAssemblerX86Common::call):
+ * assembler/MacroAssemblerX86_64.h:
+ (JSC::MacroAssemblerX86_64::call):
+ (JSC::MacroAssemblerX86_64::jump):
+ (JSC::MacroAssemblerX86_64::readCallTarget):
+ * assembler/testmasm.cpp:
+ (JSC::compile):
+ (JSC::invoke):
+ * b3/B3Compile.cpp:
+ (JSC::B3::compile):
+ * b3/B3LowerMacros.cpp:
+ * b3/air/AirCCallSpecial.cpp:
+ (JSC::B3::Air::CCallSpecial::generate):
+ * b3/air/testair.cpp:
+ * b3/testb3.cpp:
+ (JSC::B3::invoke):
+ (JSC::B3::testInterpreter):
+ (JSC::B3::testEntrySwitchSimple):
+ (JSC::B3::testEntrySwitchNoEntrySwitch):
+ (JSC::B3::testEntrySwitchWithCommonPaths):
+ (JSC::B3::testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint):
+ (JSC::B3::testEntrySwitchLoop):
+ * bytecode/AccessCase.cpp:
+ (JSC::AccessCase::generateImpl):
+ * bytecode/AccessCaseSnippetParams.cpp:
+ (JSC::SlowPathCallGeneratorWithArguments::generateImpl):
+ * bytecode/InlineAccess.cpp:
+ (JSC::linkCodeInline):
+ (JSC::InlineAccess::rewireStubAsJump):
+ * bytecode/PolymorphicAccess.cpp:
+ (JSC::AccessGenerationState::emitExplicitExceptionHandler):
+ (JSC::PolymorphicAccess::regenerate):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::compileExceptionHandlers):
+ (JSC::DFG::JITCompiler::link):
+ (JSC::DFG::JITCompiler::compileFunction):
+ (JSC::DFG::JITCompiler::noticeCatchEntrypoint):
+ * dfg/DFGJITCompiler.h:
+ (JSC::DFG::JITCompiler::appendCall):
+ * dfg/DFGJITFinalizer.cpp:
+ (JSC::DFG::JITFinalizer::finalize):
+ (JSC::DFG::JITFinalizer::finalizeFunction):
+ * dfg/DFGOSRExit.cpp:
+ (JSC::DFG::OSRExit::emitRestoreArguments):
+ (JSC::DFG::OSRExit::compileOSRExit):
+ * dfg/DFGOSRExitCompilerCommon.cpp:
+ (JSC::DFG::handleExitCounts):
+ (JSC::DFG::osrWriteBarrier):
+ (JSC::DFG::adjustAndJumpToTarget):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::emitSwitchIntJump):
+ (JSC::DFG::SpeculativeJIT::emitSwitchImm):
+ (JSC::DFG::SpeculativeJIT::emitSwitchStringOnString):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGThunks.cpp:
+ (JSC::DFG::osrExitThunkGenerator):
+ (JSC::DFG::osrExitGenerationThunkGenerator):
+ (JSC::DFG::osrEntryThunkGenerator):
+ * ftl/FTLCompile.cpp:
+ (JSC::FTL::compile):
+ * ftl/FTLJITFinalizer.cpp:
+ (JSC::FTL::JITFinalizer::finalizeCommon):
+ * ftl/FTLLazySlowPath.cpp:
+ (JSC::FTL::LazySlowPath::generate):
+ * ftl/FTLLink.cpp:
+ (JSC::FTL::link):
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::lower):
+ (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargsSpread):
+ (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
+ (JSC::FTL::DFG::LowerDFGToB3::compileCallEval):
+ * ftl/FTLOSRExitCompiler.cpp:
+ (JSC::FTL::compileStub):
+ (JSC::FTL::compileFTLOSRExit):
+ * ftl/FTLSlowPathCall.cpp:
+ (JSC::FTL::SlowPathCallContext::makeCall):
+ * ftl/FTLThunks.cpp:
+ (JSC::FTL::genericGenerationThunkGenerator):
+ (JSC::FTL::osrExitGenerationThunkGenerator):
+ (JSC::FTL::lazySlowPathGenerationThunkGenerator):
+ (JSC::FTL::slowPathCallThunkGenerator):
+ * jit/AssemblyHelpers.cpp:
+ (JSC::AssemblyHelpers::callExceptionFuzz):
+ (JSC::AssemblyHelpers::debugCall):
+ * jit/CCallHelpers.cpp:
+ (JSC::CCallHelpers::ensureShadowChickenPacket):
+ * jit/CCallHelpers.h:
+ (JSC::CCallHelpers::jumpToExceptionHandler):
+ * jit/ExecutableAllocator.cpp:
+ (JSC::FixedVMPoolExecutableAllocator::jitWriteThunkGenerator):
+ * jit/JIT.cpp:
+ (JSC::JIT::emitEnterOptimizationCheck):
+ (JSC::JIT::link):
+ (JSC::JIT::privateCompileExceptionHandlers):
+ * jit/JIT.h:
+ (JSC::JIT::appendCall):
+ * jit/JITMathIC.h:
+ (JSC::isProfileEmpty):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::emit_op_catch):
+ (JSC::JIT::emit_op_switch_imm):
+ (JSC::JIT::emit_op_switch_char):
+ (JSC::JIT::emit_op_switch_string):
+ (JSC::JIT::emitSlow_op_loop_hint):
+ (JSC::JIT::privateCompileHasIndexedProperty):
+ * jit/JITOpcodes32_64.cpp:
+ (JSC::JIT::emit_op_catch):
+ (JSC::JIT::emit_op_switch_imm):
+ (JSC::JIT::emit_op_switch_char):
+ (JSC::JIT::emit_op_switch_string):
+ (JSC::JIT::privateCompileHasIndexedProperty):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::stringGetByValStubGenerator):
+ (JSC::JIT::privateCompileGetByVal):
+ (JSC::JIT::privateCompileGetByValWithCachedId):
+ (JSC::JIT::privateCompilePutByVal):
+ (JSC::JIT::privateCompilePutByValWithCachedId):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::stringGetByValStubGenerator):
+ * jit/JITStubRoutine.h:
+ * jit/Repatch.cpp:
+ (JSC::readCallTarget):
+ (JSC::appropriateOptimizingPutByIdFunction):
+ (JSC::linkPolymorphicCall):
+ (JSC::resetPutByID):
+ * jit/SlowPathCall.h:
+ (JSC::JITSlowPathCall::call):
+ * jit/SpecializedThunkJIT.h:
+ (JSC::SpecializedThunkJIT::finalize):
+ (JSC::SpecializedThunkJIT::callDoubleToDouble):
+ * jit/ThunkGenerators.cpp:
+ (JSC::throwExceptionFromCallSlowPathGenerator):
+ (JSC::slowPathFor):
+ (JSC::linkCallThunkGenerator):
+ (JSC::linkPolymorphicCallThunkGenerator):
+ (JSC::virtualThunkFor):
+ (JSC::nativeForGenerator):
+ (JSC::arityFixupGenerator):
+ (JSC::unreachableGenerator):
+ (JSC::boundThisNoArgsFunctionCallGenerator):
+ * llint/LLIntThunks.cpp:
+ (JSC::LLInt::generateThunkWithJumpTo):
+ (JSC::LLInt::functionForCallEntryThunkGenerator):
+ (JSC::LLInt::functionForConstructEntryThunkGenerator):
+ (JSC::LLInt::functionForCallArityCheckThunkGenerator):
+ (JSC::LLInt::functionForConstructArityCheckThunkGenerator):
+ (JSC::LLInt::evalEntryThunkGenerator):
+ (JSC::LLInt::programEntryThunkGenerator):
+ (JSC::LLInt::moduleProgramEntryThunkGenerator):
+ * runtime/PtrTag.h:
+ * wasm/WasmB3IRGenerator.cpp:
+ (JSC::Wasm::B3IRGenerator::addCall):
+ (JSC::Wasm::B3IRGenerator::addCallIndirect):
+ * wasm/WasmBBQPlan.cpp:
+ (JSC::Wasm::BBQPlan::complete):
+ * wasm/WasmBinding.cpp:
+ (JSC::Wasm::wasmToWasm):
+ * wasm/WasmOMGPlan.cpp:
+ (JSC::Wasm::OMGPlan::work):
+ * wasm/WasmThunks.cpp:
+ (JSC::Wasm::throwExceptionFromWasmThunkGenerator):
+ (JSC::Wasm::throwStackOverflowFromWasmThunkGenerator):
+ (JSC::Wasm::triggerOMGTierUpThunkGenerator):
+ * wasm/js/WasmToJS.cpp:
+ (JSC::Wasm::handleBadI64Use):
+ (JSC::Wasm::wasmToJS):
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::loadFromFrameAndJump):
+ (JSC::Yarr::YarrGenerator::BacktrackingState::linkDataLabels):
+ (JSC::Yarr::YarrGenerator::generateTryReadUnicodeCharacterHelper):
+ (JSC::Yarr::YarrGenerator::generateEnter):
+ (JSC::Yarr::YarrGenerator::YarrGenerator):
+ (JSC::Yarr::YarrGenerator::compile):
+ (JSC::Yarr::jitCompile):
+ * yarr/YarrJIT.h:
+ (JSC::Yarr::YarrCodeBlock::execute):
+
2018-03-14 Caitlin Potter <caitp@igalia.com>
[JSC] fix order of evaluation for ClassDefinitionEvaluation
diff --git a/Source/JavaScriptCore/assembler/ARM64Assembler.h b/Source/JavaScriptCore/assembler/ARM64Assembler.h
index 74a0f9b..4bd0168 100644
--- a/Source/JavaScriptCore/assembler/ARM64Assembler.h
+++ b/Source/JavaScriptCore/assembler/ARM64Assembler.h
@@ -29,6 +29,7 @@
#include "AssemblerBuffer.h"
#include "AssemblerCommon.h"
+#include "PtrTag.h"
#include <limits.h>
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
@@ -3033,7 +3034,7 @@
ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from));
ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
- intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2;
+ intptr_t offset = (removeCodePtrTag<intptr_t>(to) - removeCodePtrTag<intptr_t>(fromInstruction)) >> 2;
ASSERT(static_cast<int>(offset) == offset);
int insn = unconditionalBranchImmediate(isCall, static_cast<int>(offset));
diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 84e3d4f..bd544cf 100644
--- a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -34,6 +34,7 @@
#include "MacroAssemblerCodeRef.h"
#include "MacroAssemblerHelpers.h"
#include "Options.h"
+#include "PtrTag.h"
#include <wtf/CryptographicallyRandomNumber.h>
#include <wtf/Noncopyable.h>
#include <wtf/SharedTask.h>
@@ -871,9 +872,10 @@
AssemblerType::linkPointer(code, label, value.executableAddress());
}
- static void* getLinkerAddress(void* code, AssemblerLabel label)
+ // FIXME: remove the default PtrTag value once we've tagged all the clients.
+ static void* getLinkerAddress(void* code, AssemblerLabel label, PtrTag tag = NoPtrTag)
{
- return AssemblerType::getRelocatedAddress(code, label);
+ return tagCodePtr(AssemblerType::getRelocatedAddress(code, label), tag);
}
static unsigned getLinkerCallReturnOffset(Call call)
@@ -951,6 +953,12 @@
buffer.setCodeSize(targetCodeSize);
}
+ ALWAYS_INLINE void tagPtr(RegisterID, PtrTag) { }
+ ALWAYS_INLINE void tagPtr(RegisterID, RegisterID) { }
+ ALWAYS_INLINE void untagPtr(RegisterID, PtrTag) { }
+ ALWAYS_INLINE void untagPtr(RegisterID, RegisterID) { }
+ ALWAYS_INLINE void removePtrTag(RegisterID) { }
+
protected:
AbstractMacroAssembler()
: m_randomSource(0)
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
index 1d19d72..b982c28 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.cpp
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -44,20 +44,20 @@
return Options::dumpDisassembly();
}
-LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly(PtrTag tag)
{
performFinalization();
ASSERT(m_didAllocate);
if (m_executableMemory)
- return CodeRef(*m_executableMemory);
+ return CodeRef(*m_executableMemory, tag);
- return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code));
+ return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(tagCodePtr(m_code, tag)));
}
-LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(PtrTag tag, const char* format, ...)
{
- CodeRef result = finalizeCodeWithoutDisassembly();
+ CodeRef result = finalizeCodeWithoutDisassembly(tag);
if (m_alreadyDisassembled)
return result;
@@ -70,7 +70,8 @@
va_end(argList);
out.printf(":\n");
- out.printf(" Code at [%p, %p):\n", result.code().executableAddress(), result.code().executableAddress<char*>() + result.size());
+ uint8_t* executableAddress = removeCodePtrTag<uint8_t*>(result.code().executableAddress());
+ out.printf(" Code at [%p, %p):\n", executableAddress, executableAddress + result.size());
CString header = out.toCString();
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h
index 7b7a838..2248fa9 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.h
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.h
@@ -121,6 +121,13 @@
// These methods are used to link or set values at code generation time.
+ template<typename Func, typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func>::type>::value>>
+ void link(Call call, Func funcName, PtrTag tag)
+ {
+ FunctionPtr function(funcName, tag);
+ link(call, function);
+ }
+
void link(Call call, FunctionPtr function)
{
ASSERT(call.isFlagSet(Call::Linkable));
@@ -175,7 +182,7 @@
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(call.isFlagSet(Call::Near));
- return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)),
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label), NearCallPtrTag),
call.isFlagSet(Call::Tail) ? NearCallMode::Tail : NearCallMode::Regular);
}
@@ -184,9 +191,10 @@
return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label)));
}
- CodeLocationLabel locationOf(Label label)
+ // FIXME: remove the default PtrTag value once we've tagged all the clients.
+ CodeLocationLabel locationOf(Label label, PtrTag tag = NoPtrTag)
{
- return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label), tag));
}
CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
@@ -232,8 +240,8 @@
// finalizeCodeWithoutDisassembly() directly if you have your own way of
// displaying disassembly.
- JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassembly();
- JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+ JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassembly(PtrTag);
+ JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassembly(PtrTag, const char* format, ...) WTF_ATTRIBUTE_PRINTF(3, 4);
CodePtr trampolineAt(Label label)
{
@@ -307,19 +315,19 @@
Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks;
};
-#define FINALIZE_CODE_IF(condition, linkBufferReference, ...) \
+#define FINALIZE_CODE_IF(condition, linkBufferReference, resultPtrTag, ...) \
(UNLIKELY((condition)) \
- ? (linkBufferReference).finalizeCodeWithDisassembly(__VA_ARGS__) \
- : (linkBufferReference).finalizeCodeWithoutDisassembly())
+ ? (linkBufferReference).finalizeCodeWithDisassembly(resultPtrTag, __VA_ARGS__) \
+ : (linkBufferReference).finalizeCodeWithoutDisassembly(resultPtrTag))
bool shouldDumpDisassemblyFor(CodeBlock*);
-#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, ...) \
- FINALIZE_CODE_IF((shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly()), linkBufferReference, __VA_ARGS__)
+#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, resultPtrTag, ...) \
+ FINALIZE_CODE_IF((shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly()), linkBufferReference, resultPtrTag, __VA_ARGS__)
// Use this to finalize code, like so:
//
-// CodeRef code = FINALIZE_CODE(linkBuffer, "my super thingy number %d", number);
+// CodeRef code = FINALIZE_CODE(linkBuffer, tag, "my super thingy number %d", number);
//
// Which, in disassembly mode, will print:
//
@@ -333,11 +341,11 @@
// Note that the format string and print arguments are only evaluated when dumpDisassembly
// is true, so you can hide expensive disassembly-only computations inside there.
-#define FINALIZE_CODE(linkBufferReference, ...) \
- FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly()), linkBufferReference, __VA_ARGS__)
+#define FINALIZE_CODE(linkBufferReference, resultPtrTag, ...) \
+ FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly()), linkBufferReference, resultPtrTag, __VA_ARGS__)
-#define FINALIZE_DFG_CODE(linkBufferReference, ...) \
- FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly()), linkBufferReference, __VA_ARGS__)
+#define FINALIZE_DFG_CODE(linkBufferReference, resultPtrTag, ...) \
+ FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly()), linkBufferReference, resultPtrTag, __VA_ARGS__)
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
index 3adddd8..b766698 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -789,17 +789,17 @@
return Jump(m_assembler.jmp());
}
- void jump(RegisterID target)
+ void jump(RegisterID target, PtrTag)
{
m_assembler.bx(target);
}
- void jump(Address address)
+ void jump(Address address, PtrTag)
{
load32(address, ARMRegisters::pc);
}
- void jump(AbsoluteAddress address)
+ void jump(AbsoluteAddress address, PtrTag)
{
move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0);
load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
@@ -988,12 +988,12 @@
return Call(m_assembler.jmp(), Call::LinkableNearTail);
}
- Call call(RegisterID target)
+ Call call(RegisterID target, PtrTag)
{
return Call(m_assembler.blx(target), Call::None);
}
- void call(Address address)
+ void call(Address address, PtrTag)
{
call32(address.base, address.offset);
}
@@ -1118,7 +1118,7 @@
m_assembler.mov(ARMRegisters::r0, ARMRegisters::r0);
}
- Call call()
+ Call call(PtrTag)
{
ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
@@ -1499,7 +1499,7 @@
static FunctionPtr readCallTarget(CodeLocationCall call)
{
- return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())), CodeEntryPtrTag);
}
static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
index 031f7c8..bb9b49bf 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
@@ -3084,7 +3084,7 @@
// Jumps, calls, returns
- ALWAYS_INLINE Call call()
+ ALWAYS_INLINE Call call(PtrTag)
{
AssemblerLabel pointerLabel = m_assembler.label();
moveWithFixedWidth(TrustedImmPtr(nullptr), getCachedDataTempRegisterIDAndInvalidate());
@@ -3095,17 +3095,17 @@
return Call(callLabel, Call::Linkable);
}
- ALWAYS_INLINE Call call(RegisterID target)
+ ALWAYS_INLINE Call call(RegisterID target, PtrTag)
{
invalidateAllTempRegisters();
m_assembler.blr(target);
return Call(m_assembler.label(), Call::None);
}
- ALWAYS_INLINE Call call(Address address)
+ ALWAYS_INLINE Call call(Address address, PtrTag tag)
{
load64(address, getCachedDataTempRegisterIDAndInvalidate());
- return call(dataTempRegister);
+ return call(dataTempRegister, tag);
}
ALWAYS_INLINE Jump jump()
@@ -3115,24 +3115,24 @@
return Jump(label, m_makeJumpPatchable ? Assembler::JumpNoConditionFixedSize : Assembler::JumpNoCondition);
}
- void jump(RegisterID target)
+ void jump(RegisterID target, PtrTag)
{
m_assembler.br(target);
}
- void jump(Address address)
+ void jump(Address address, PtrTag)
{
load64(address, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.br(dataTempRegister);
}
- void jump(BaseIndex address)
+ void jump(BaseIndex address, PtrTag)
{
load64(address, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.br(dataTempRegister);
}
- void jump(AbsoluteAddress address)
+ void jump(AbsoluteAddress address, PtrTag)
{
move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
load64(Address(dataTempRegister), dataTempRegister);
@@ -3764,7 +3764,7 @@
static FunctionPtr readCallTarget(CodeLocationCall call)
{
- return FunctionPtr(reinterpret_cast<void(*)()>(Assembler::readCallTarget(call.dataLocation())));
+ return FunctionPtr(reinterpret_cast<void(*)()>(Assembler::readCallTarget(call.dataLocation())), CodeEntryPtrTag);
}
static void replaceWithVMHalt(CodeLocationLabel instructionStart)
@@ -4431,11 +4431,11 @@
static void linkCall(void* code, Call call, FunctionPtr function)
{
if (!call.isFlagSet(Call::Near))
- Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+ Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.executableAddress());
else if (call.isFlagSet(Call::Tail))
- Assembler::linkJump(code, call.m_label, function.value());
+ Assembler::linkJump(code, call.m_label, function.executableAddress());
else
- Assembler::linkCall(code, call.m_label, function.value());
+ Assembler::linkCall(code, call.m_label, function.executableAddress());
}
CachedTempRegister m_dataMemoryTempRegister;
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index fdaedb9..1babc4d 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -1599,19 +1599,19 @@
return branchTest32(cond, addressTempRegister, mask8);
}
- void jump(RegisterID target)
+ void jump(RegisterID target, PtrTag)
{
m_assembler.bx(target);
}
// Address is a memory location containing the address to jump to
- void jump(Address address)
+ void jump(Address address, PtrTag)
{
load32(address, dataTempRegister);
m_assembler.bx(dataTempRegister);
}
- void jump(AbsoluteAddress address)
+ void jump(AbsoluteAddress address, PtrTag)
{
move(TrustedImmPtr(address.m_ptr), dataTempRegister);
load32(Address(dataTempRegister), dataTempRegister);
@@ -1763,7 +1763,7 @@
ShiftTypeAndAmount shift(SRType_LSL, scale);
m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
- jump(dataTempRegister);
+ jump(dataTempRegister, NoPtrTag);
}
// Miscellaneous operations:
@@ -1787,18 +1787,18 @@
return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail);
}
- ALWAYS_INLINE Call call()
+ ALWAYS_INLINE Call call(PtrTag)
{
moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
}
- ALWAYS_INLINE Call call(RegisterID target)
+ ALWAYS_INLINE Call call(RegisterID target, PtrTag)
{
return Call(m_assembler.blx(target), Call::None);
}
- ALWAYS_INLINE Call call(Address address)
+ ALWAYS_INLINE Call call(Address address, PtrTag)
{
load32(address, dataTempRegister);
return Call(m_assembler.blx(dataTempRegister), Call::None);
@@ -1977,7 +1977,7 @@
static FunctionPtr readCallTarget(CodeLocationCall call)
{
- return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())), CodeEntryPtrTag);
}
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp
index 21d1834..bb86961 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,9 +33,10 @@
namespace JSC {
-MacroAssemblerCodePtr MacroAssemblerCodePtr::createLLIntCodePtr(OpcodeID codeId)
+MacroAssemblerCodePtr MacroAssemblerCodePtr::createLLIntCodePtr(OpcodeID opcodeID)
{
- return createFromExecutableAddress(LLInt::getCodePtr(codeId));
+ ASSERT(opcodeID >= NUMBER_OF_BYTECODE_IDS);
+ return createFromExecutableAddress(LLInt::getCodePtr(opcodeID));
}
void MacroAssemblerCodePtr::dumpWithName(const char* name, PrintStream& out) const
@@ -56,9 +57,9 @@
dumpWithName("CodePtr", out);
}
-MacroAssemblerCodeRef MacroAssemblerCodeRef::createLLIntCodeRef(OpcodeID codeId)
+MacroAssemblerCodeRef MacroAssemblerCodeRef::createLLIntCodeRef(OpcodeID opcodeID)
{
- return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
+ return createSelfManagedCodeRef(MacroAssemblerCodePtr::createLLIntCodePtr(opcodeID));
}
bool MacroAssemblerCodeRef::tryToDisassemble(PrintStream& out, const char* prefix) const
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index 03564b5..b29b2c0 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -27,6 +27,7 @@
#include "ExecutableAllocator.h"
#include "JSCPoison.h"
+#include "PtrTag.h"
#include <wtf/DataLog.h>
#include <wtf/PrintStream.h>
#include <wtf/RefPtr.h>
@@ -63,9 +64,9 @@
public:
FunctionPtr() { }
- template<typename returnType, typename... Arguments>
- FunctionPtr(returnType(*value)(Arguments...))
- : m_value(reinterpret_cast<void*>(value))
+ template<typename ReturnType, typename... Arguments>
+ FunctionPtr(ReturnType(*value)(Arguments...), PtrTag tag = SlowPathPtrTag)
+ : m_value(tagCFunctionPtr<void*>(value, tag))
{
PoisonedMasmPtr::assertIsNotPoisoned(m_value);
ASSERT_VALID_CODE_POINTER(m_value);
@@ -75,9 +76,9 @@
// different types; these methods already defined for fastcall, below.
#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
- template<typename returnType, typename... Arguments>
- FunctionPtr(returnType(CDECL *value)(Arguments...))
- : m_value(reinterpret_cast<void*>(value))
+ template<typename ReturnType, typename... Arguments>
+ FunctionPtr(ReturnType(CDECL *value)(Arguments...), PtrTag tag = SlowPathPtrTag)
+ : m_value(tagCFunctionPtr<void*>(value, tag))
{
PoisonedMasmPtr::assertIsNotPoisoned(m_value);
ASSERT_VALID_CODE_POINTER(m_value);
@@ -87,9 +88,9 @@
#if COMPILER_SUPPORTS(FASTCALL_CALLING_CONVENTION)
- template<typename returnType, typename... Arguments>
- FunctionPtr(returnType(FASTCALL *value)(Arguments...))
- : m_value(reinterpret_cast<void*>(value))
+ template<typename ReturnType, typename... Arguments>
+ FunctionPtr(ReturnType(FASTCALL *value)(Arguments...), PtrTag tag = SlowPathPtrTag)
+ : m_value(tagCFunctionPtr<void*>(value, tag))
{
PoisonedMasmPtr::assertIsNotPoisoned(m_value);
ASSERT_VALID_CODE_POINTER(m_value);
@@ -98,11 +99,11 @@
#endif // COMPILER_SUPPORTS(FASTCALL_CALLING_CONVENTION)
template<typename FunctionType>
- explicit FunctionPtr(FunctionType* value)
+ explicit FunctionPtr(FunctionType* value, PtrTag tag = SlowPathPtrTag)
// Using a C-ctyle cast here to avoid compiler error on RVTC:
// Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
// (I guess on RVTC function pointers have a different constness to GCC/MSVC?)
- : m_value((void*)value)
+ : m_value(tagCodePtr<void*>(value, tag))
{
PoisonedMasmPtr::assertIsNotPoisoned(m_value);
ASSERT_VALID_CODE_POINTER(m_value);
@@ -113,7 +114,7 @@
void* value() const
{
PoisonedMasmPtr::assertIsNotPoisoned(m_value);
- return m_value;
+ return removeCodePtrTag(m_value);
}
void* executableAddress() const
{
@@ -183,7 +184,7 @@
ASSERT(value);
ASSERT_VALID_CODE_POINTER(m_value.unpoisoned());
}
-
+
static MacroAssemblerCodePtr createFromExecutableAddress(void* value)
{
ASSERT(value);
@@ -227,7 +228,7 @@
{
m_value.assertIsPoisoned();
ASSERT_VALID_CODE_POINTER(m_value);
- return m_value.unpoisoned<T>();
+ return bitwise_cast<T>(m_value ? removeCodePtrTag(m_value.unpoisoned()) : nullptr);
}
#endif
@@ -315,8 +316,8 @@
{
}
- MacroAssemblerCodeRef(Ref<ExecutableMemoryHandle>&& executableMemory)
- : m_codePtr(executableMemory->start())
+ MacroAssemblerCodeRef(Ref<ExecutableMemoryHandle>&& executableMemory, PtrTag tag)
+ : m_codePtr(tagCodePtr(executableMemory->start(), tag))
, m_executableMemory(WTFMove(executableMemory))
{
ASSERT(m_executableMemory->isManaged());
@@ -344,7 +345,12 @@
{
return m_codePtr;
}
-
+
+ MacroAssemblerCodePtr retaggedCode(PtrTag oldTag, PtrTag newTag) const
+ {
+ return MacroAssemblerCodePtr(retagCodePtr(m_codePtr.executableAddress(), oldTag, newTag));
+ }
+
size_t size() const
{
if (!m_executableMemory)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
index 3f270fe..3d49deb 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
@@ -1756,14 +1756,14 @@
return branchEqual(MIPSRegisters::zero, MIPSRegisters::zero);
}
- void jump(RegisterID target)
+ void jump(RegisterID target, PtrTag)
{
move(target, MIPSRegisters::t9);
m_assembler.jr(MIPSRegisters::t9);
m_assembler.nop();
}
- void jump(Address address)
+ void jump(Address address, PtrTag)
{
m_fixedWidth = true;
load32(address, MIPSRegisters::t9);
@@ -1772,7 +1772,7 @@
m_fixedWidth = false;
}
- void jump(AbsoluteAddress address)
+ void jump(AbsoluteAddress address, PtrTag)
{
m_fixedWidth = true;
load32(address.m_ptr, MIPSRegisters::t9);
@@ -2250,7 +2250,7 @@
return Call(m_assembler.label(), Call::LinkableNearTail);
}
- Call call()
+ Call call(PtrTag)
{
m_assembler.lui(MIPSRegisters::t9, 0);
m_assembler.ori(MIPSRegisters::t9, MIPSRegisters::t9, 0);
@@ -2259,7 +2259,7 @@
return Call(m_assembler.label(), Call::Linkable);
}
- Call call(RegisterID target)
+ Call call(RegisterID target, PtrTag)
{
move(target, MIPSRegisters::t9);
m_assembler.jalr(MIPSRegisters::t9);
@@ -2267,7 +2267,7 @@
return Call(m_assembler.label(), Call::None);
}
- Call call(Address address)
+ Call call(Address address, PtrTag)
{
m_fixedWidth = true;
load32(address, MIPSRegisters::t9);
@@ -3078,7 +3078,7 @@
static FunctionPtr readCallTarget(CodeLocationCall call)
{
- return FunctionPtr(reinterpret_cast<void(*)()>(MIPSAssembler::readCallTarget(call.dataLocation())));
+ return FunctionPtr(reinterpret_cast<void(*)()>(MIPSAssembler::readCallTarget(call.dataLocation())), CodeEntryPtrTag);
}
static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
index b927100..896db24 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -213,13 +213,13 @@
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Call call()
+ Call call(PtrTag)
{
return Call(m_assembler.call(), Call::Linkable);
}
// Address is a memory location containing the address to jump to
- void jump(AbsoluteAddress address)
+ void jump(AbsoluteAddress address, PtrTag)
{
m_assembler.jmp_m(address.m_ptr);
}
@@ -298,7 +298,7 @@
static FunctionPtr readCallTarget(CodeLocationCall call)
{
intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1];
- return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset));
+ return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset), CodeEntryPtrTag);
}
static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
index ff9bc64..c90b4a7 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -753,7 +753,7 @@
move(TrustedImmPtr(reinterpret_cast<void*>(function)), RegisterID::edx);
push(RegisterID::ebx);
move(TrustedImmPtr(arg), RegisterID::ebx);
- call(RegisterID::eax);
+ call(RegisterID::eax, CFunctionPtrTag);
}
#endif // ENABLE(MASM_PROBE)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index 34b5158..073cce7 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -2696,19 +2696,19 @@
return Jump(m_assembler.jmp());
}
- void jump(RegisterID target)
+ void jump(RegisterID target, PtrTag)
{
m_assembler.jmp_r(target);
}
// Address is a memory location containing the address to jump to
- void jump(Address address)
+ void jump(Address address, PtrTag)
{
m_assembler.jmp_m(address.offset, address.base);
}
// Address is a memory location containing the address to jump to
- void jump(BaseIndex address)
+ void jump(BaseIndex address, PtrTag)
{
m_assembler.jmp_m(address.offset, address.base, address.index, address.scale);
}
@@ -2894,12 +2894,12 @@
return Call(m_assembler.call(), Call::LinkableNear);
}
- Call call(RegisterID target)
+ Call call(RegisterID target, PtrTag)
{
return Call(m_assembler.call(target), Call::None);
}
- void call(Address address)
+ void call(Address address, PtrTag)
{
m_assembler.call_m(address.offset, address.base);
}
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index 8835d9b..693660e 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -190,7 +190,7 @@
}
#endif
- Call call()
+ Call call(PtrTag)
{
#if OS(WINDOWS)
// JIT relies on the CallerFrame (frame pointer) being put on the stack,
@@ -224,10 +224,10 @@
}
// Address is a memory location containing the address to jump to
- void jump(AbsoluteAddress address)
+ void jump(AbsoluteAddress address, PtrTag tag)
{
move(TrustedImmPtr(address.m_ptr), scratchRegister());
- jump(Address(scratchRegister()));
+ jump(Address(scratchRegister()), tag);
}
Call tailRecursiveCall()
@@ -1870,7 +1870,7 @@
static FunctionPtr readCallTarget(CodeLocationCall call)
{
- return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation()));
+ return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation()), CodeEntryPtrTag);
}
bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; }
diff --git a/Source/JavaScriptCore/assembler/testmasm.cpp b/Source/JavaScriptCore/assembler/testmasm.cpp
index 07e8817..7fe8214 100644
--- a/Source/JavaScriptCore/assembler/testmasm.cpp
+++ b/Source/JavaScriptCore/assembler/testmasm.cpp
@@ -149,13 +149,14 @@
CCallHelpers jit;
generate(jit);
LinkBuffer linkBuffer(jit, nullptr);
- return FINALIZE_CODE(linkBuffer, "testmasm compilation");
+ return FINALIZE_CODE(linkBuffer, JITCodePtrTag, "testmasm compilation");
}
template<typename T, typename... Arguments>
T invoke(MacroAssemblerCodeRef code, Arguments... arguments)
{
- T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(code.code().executableAddress());
+ void* executableAddress = untagCFunctionPtr(code.code().executableAddress(), JITCodePtrTag);
+ T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(executableAddress);
return function(arguments...);
}
diff --git a/Source/JavaScriptCore/b3/B3Compile.cpp b/Source/JavaScriptCore/b3/B3Compile.cpp
index c4317fd..75c6547 100644
--- a/Source/JavaScriptCore/b3/B3Compile.cpp
+++ b/Source/JavaScriptCore/b3/B3Compile.cpp
@@ -48,7 +48,7 @@
generate(proc, jit);
LinkBuffer linkBuffer(jit, nullptr);
- return Compilation(FINALIZE_CODE(linkBuffer, "B3::Compilation"), proc.releaseByproducts());
+ return Compilation(FINALIZE_CODE(linkBuffer, JITCodePtrTag, "B3::Compilation"), proc.releaseByproducts());
}
} } // namespace JSC::B3
diff --git a/Source/JavaScriptCore/b3/B3LowerMacros.cpp b/Source/JavaScriptCore/b3/B3LowerMacros.cpp
index 6159467..9cc5cd2 100644
--- a/Source/JavaScriptCore/b3/B3LowerMacros.cpp
+++ b/Source/JavaScriptCore/b3/B3LowerMacros.cpp
@@ -511,7 +511,7 @@
jit.move(CCallHelpers::TrustedImmPtr(jumpTable), scratch);
jit.load64(CCallHelpers::BaseIndex(scratch, index, CCallHelpers::timesPtr()), scratch);
jit.xor64(poisonScratch, scratch);
- jit.jump(scratch);
+ jit.jump(scratch, NoPtrTag);
// These labels are guaranteed to be populated before either late paths or
// link tasks run.
diff --git a/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp b/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp
index def0f4c..f9478bb 100644
--- a/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp
+++ b/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -134,14 +134,14 @@
case Arg::Imm:
case Arg::BigImm:
jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
- jit.call(scratchRegister);
+ jit.call(scratchRegister, NoPtrTag);
break;
case Arg::Tmp:
- jit.call(inst.args[calleeArgOffset].gpr());
+ jit.call(inst.args[calleeArgOffset].gpr(), NoPtrTag);
break;
case Arg::Addr:
case Arg::ExtendedOffsetAddr:
- jit.call(inst.args[calleeArgOffset].asAddress());
+ jit.call(inst.args[calleeArgOffset].asAddress(), NoPtrTag);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
diff --git a/Source/JavaScriptCore/b3/air/testair.cpp b/Source/JavaScriptCore/b3/air/testair.cpp
index 43b923a..901f46b 100644
--- a/Source/JavaScriptCore/b3/air/testair.cpp
+++ b/Source/JavaScriptCore/b3/air/testair.cpp
@@ -90,13 +90,14 @@
LinkBuffer linkBuffer(jit, nullptr);
return std::make_unique<B3::Compilation>(
- FINALIZE_CODE(linkBuffer, "testair compilation"), proc.releaseByproducts());
+ FINALIZE_CODE(linkBuffer, JITCodePtrTag, "testair compilation"), proc.releaseByproducts());
}
template<typename T, typename... Arguments>
T invoke(const B3::Compilation& code, Arguments... arguments)
{
- T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(code.code().executableAddress());
+ void* executableAddress = untagCFunctionPtr(code.code().executableAddress(), JITCodePtrTag);
+ T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(executableAddress);
return function(arguments...);
}
diff --git a/Source/JavaScriptCore/b3/testb3.cpp b/Source/JavaScriptCore/b3/testb3.cpp
index e9451e4..622d223 100644
--- a/Source/JavaScriptCore/b3/testb3.cpp
+++ b/Source/JavaScriptCore/b3/testb3.cpp
@@ -127,7 +127,8 @@
template<typename T, typename... Arguments>
T invoke(MacroAssemblerCodePtr ptr, Arguments... arguments)
{
- T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(ptr.executableAddress());
+ void* executableAddress = untagCFunctionPtr(ptr.executableAddress(), JITCodePtrTag);
+ T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(executableAddress);
return function(arguments...);
}
@@ -13042,12 +13043,12 @@
jit.move(CCallHelpers::TrustedImm64(JITCodePoison::key()), poisonScratch);
jit.load64(CCallHelpers::BaseIndex(scratch, params[0].gpr(), CCallHelpers::timesPtr()), scratch);
jit.xor64(poisonScratch, scratch);
- jit.jump(scratch);
+ jit.jump(scratch, NoPtrTag);
jit.addLinkTask(
[&, jumpTable, labels] (LinkBuffer& linkBuffer) {
for (unsigned i = labels.size(); i--;)
- jumpTable[i] = linkBuffer.locationOf(*labels[i]);
+ jumpTable[i] = linkBuffer.locationOf(*labels[i], NoPtrTag);
});
});
@@ -13296,12 +13297,12 @@
CCallHelpers jit;
generate(proc, jit);
LinkBuffer linkBuffer(jit, nullptr);
- CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
- CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
- CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
-
- MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, "testb3 compilation");
-
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0), JITCodePtrTag);
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1), JITCodePtrTag);
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2), JITCodePtrTag);
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCodePtrTag, "testb3 compilation");
+
CHECK(invoke<int>(labelOne, 1, 2) == 3);
CHECK(invoke<int>(labelTwo, 1, 2) == -1);
CHECK(invoke<int>(labelThree, 1, 2) == 2);
@@ -13329,12 +13330,12 @@
CCallHelpers jit;
generate(proc, jit);
LinkBuffer linkBuffer(jit, nullptr);
- CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
- CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
- CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
-
- MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, "testb3 compilation");
-
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0), JITCodePtrTag);
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1), JITCodePtrTag);
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2), JITCodePtrTag);
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCodePtrTag, "testb3 compilation");
+
CHECK_EQ(invoke<int>(labelOne, 1, 2), 3);
CHECK_EQ(invoke<int>(labelTwo, 1, 2), 3);
CHECK_EQ(invoke<int>(labelThree, 1, 2), 3);
@@ -13416,12 +13417,12 @@
CCallHelpers jit;
generate(proc, jit);
LinkBuffer linkBuffer(jit, nullptr);
- CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
- CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
- CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
-
- MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, "testb3 compilation");
-
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0), JITCodePtrTag);
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1), JITCodePtrTag);
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2), JITCodePtrTag);
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCodePtrTag, "testb3 compilation");
+
CHECK_EQ(invoke<int>(labelOne, 1, 2, 10), 3);
CHECK_EQ(invoke<int>(labelTwo, 1, 2, 10), -1);
CHECK_EQ(invoke<int>(labelThree, 1, 2, 10), 2);
@@ -13533,12 +13534,12 @@
CCallHelpers jit;
generate(proc, jit);
LinkBuffer linkBuffer(jit, nullptr);
- CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
- CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
- CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
-
- MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, "testb3 compilation");
-
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0), JITCodePtrTag);
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1), JITCodePtrTag);
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2), JITCodePtrTag);
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCodePtrTag, "testb3 compilation");
+
CHECK_EQ(invoke<int>(labelOne, 1, 2, 10, false), 3);
CHECK_EQ(invoke<int>(labelTwo, 1, 2, 10, false), -1);
CHECK_EQ(invoke<int>(labelThree, 1, 2, 10, false), 2);
@@ -13611,10 +13612,10 @@
CCallHelpers jit;
generate(proc, jit);
LinkBuffer linkBuffer(jit, nullptr);
- CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
- CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
-
- MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, "testb3 compilation");
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0), JITCodePtrTag);
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1), JITCodePtrTag);
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCodePtrTag, "testb3 compilation");
CHECK(invoke<int>(labelOne, 0) == 1);
CHECK(invoke<int>(labelOne, 42) == 43);
diff --git a/Source/JavaScriptCore/bytecode/AccessCase.cpp b/Source/JavaScriptCore/bytecode/AccessCase.cpp
index 7a1f02c..86821ed 100644
--- a/Source/JavaScriptCore/bytecode/AccessCase.cpp
+++ b/Source/JavaScriptCore/bytecode/AccessCase.cpp
@@ -863,7 +863,7 @@
#endif
jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
- operationCall = jit.call();
+ operationCall = jit.call(NoPtrTag);
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
linkBuffer.link(operationCall, FunctionPtr(this->as<GetterSetterAccessCase>().m_customAccessor.opaque));
});
@@ -1007,7 +1007,7 @@
if (!reallocating) {
jit.setupArguments<decltype(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity)>(baseGPR);
- CCallHelpers::Call operationCall = jit.call();
+ CCallHelpers::Call operationCall = jit.call(NoPtrTag);
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
linkBuffer.link(
operationCall,
@@ -1019,7 +1019,7 @@
jit.setupArguments<decltype(operationReallocateButterflyToGrowPropertyStorage)>(
baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
- CCallHelpers::Call operationCall = jit.call();
+ CCallHelpers::Call operationCall = jit.call(NoPtrTag);
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
linkBuffer.link(
operationCall,
diff --git a/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp b/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp
index fd7139c..c98d2af 100644
--- a/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp
+++ b/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp
@@ -61,7 +61,7 @@
jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...);
- CCallHelpers::Call operationCall = jit.call();
+ CCallHelpers::Call operationCall = jit.call(NoPtrTag);
auto function = m_function;
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
linkBuffer.link(operationCall, FunctionPtr(function));
diff --git a/Source/JavaScriptCore/bytecode/InlineAccess.cpp b/Source/JavaScriptCore/bytecode/InlineAccess.cpp
index 5747a59..a488de3 100644
--- a/Source/JavaScriptCore/bytecode/InlineAccess.cpp
+++ b/Source/JavaScriptCore/bytecode/InlineAccess.cpp
@@ -135,7 +135,7 @@
LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
ASSERT(linkBuffer.isValid());
function(linkBuffer);
- FINALIZE_CODE(linkBuffer, "InlineAccessType: '%s'", name);
+ FINALIZE_CODE(linkBuffer, JITCodePtrTag, "InlineAccessType: '%s'", name);
return true;
}
@@ -290,7 +290,7 @@
RELEASE_ASSERT(linkBuffer.isValid());
linkBuffer.link(jump, target);
- FINALIZE_CODE(linkBuffer, "InlineAccess: linking constant jump");
+ FINALIZE_CODE(linkBuffer, JITCodePtrTag, "InlineAccess: linking constant jump");
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
index c2193d6..7a8247c 100644
--- a/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
@@ -199,10 +199,10 @@
});
} else {
jit->setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&m_vm), GPRInfo::callFrameRegister);
- CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
+ CCallHelpers::Call lookupExceptionHandlerCall = jit->call(NoPtrTag);
jit->addLinkTask(
[=] (LinkBuffer& linkBuffer) {
- linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
+ linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler, NoPtrTag);
});
jit->jumpToExceptionHandler(m_vm);
}
@@ -538,7 +538,7 @@
linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
HandlerInfo handlerToRegister = oldHandler;
- handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
+ handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler, NoPtrTag);
handlerToRegister.start = newExceptionHandlingCallSite.bits();
handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
codeBlock->appendExceptionHandler(handlerToRegister);
@@ -568,7 +568,7 @@
dataLog(FullCodeOrigin(codeBlock, stubInfo.codeOrigin), ": Generating polymorphic access stub for ", listDump(cases), "\n");
MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
- codeBlock, linkBuffer,
+ codeBlock, linkBuffer, NoPtrTag,
"%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data());
bool doesCalls = false;
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 6fea38d..02ead29 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -161,7 +161,7 @@
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
- m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
+ m_calls.append(CallLinkRecord(call(NoPtrTag), lookupExceptionHandlerFromCallerFrame));
jumpToExceptionHandler(*vm());
}
@@ -180,7 +180,7 @@
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
- m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
+ m_calls.append(CallLinkRecord(call(NoPtrTag), lookupExceptionHandler));
jumpToExceptionHandler(*vm());
}
@@ -214,14 +214,14 @@
usedJumpTables.set(data.switchTableIndex);
SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
- table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index], NoPtrTag);
table.ctiOffsets.grow(table.branchOffsets.size());
for (unsigned j = table.ctiOffsets.size(); j--;)
table.ctiOffsets[j] = table.ctiDefault;
for (unsigned j = data.cases.size(); j--;) {
SwitchCase& myCase = data.cases[j];
table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
- linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ linkBuffer.locationOf(m_blockHeads[myCase.target.block->index], NoPtrTag);
}
}
@@ -244,7 +244,7 @@
continue;
StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
- table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index], NoPtrTag);
StringJumpTable::StringOffsetTable::iterator iter;
StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
for (iter = table.offsetTable.begin(); iter != end; ++iter)
@@ -253,7 +253,7 @@
SwitchCase& myCase = data.cases[j];
iter = table.offsetTable.find(myCase.value.stringImpl());
RELEASE_ASSERT(iter != end);
- iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index], NoPtrTag);
}
}
@@ -275,7 +275,7 @@
info.patch.start = start;
ptrdiff_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
- start, linkBuffer.locationOf(m_ins[i].m_done));
+ start, linkBuffer.locationOf(m_ins[i].m_done, NoPtrTag));
RELEASE_ASSERT(inlineSize >= 0);
info.patch.inlineSize = inlineSize;
@@ -283,7 +283,7 @@
start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()));
info.patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
- start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
+ start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label(), NoPtrTag));
}
for (auto& record : m_jsCalls) {
@@ -297,10 +297,10 @@
for (JSDirectCallRecord& record : m_jsDirectCalls) {
CallLinkInfo& info = *record.info;
- linkBuffer.link(record.call, linkBuffer.locationOf(record.slowPath));
+ linkBuffer.link(record.call, linkBuffer.locationOf(record.slowPath, NoPtrTag));
info.setCallLocations(
CodeLocationLabel(),
- linkBuffer.locationOf(record.slowPath),
+ linkBuffer.locationOf(record.slowPath, NoPtrTag),
linkBuffer.locationOfNearCall(record.call));
}
@@ -308,7 +308,7 @@
CallLinkInfo& info = *record.info;
info.setCallLocations(
linkBuffer.locationOf(record.patchableJump),
- linkBuffer.locationOf(record.slowPath),
+ linkBuffer.locationOf(record.slowPath, NoPtrTag),
linkBuffer.locationOfNearCall(record.call));
}
@@ -323,8 +323,8 @@
}
if (info.m_replacementSource.isSet()) {
m_jitCode->common.jumpReplacements.append(JumpReplacement(
- linkBuffer.locationOf(info.m_replacementSource),
- linkBuffer.locationOf(info.m_replacementDestination)));
+ linkBuffer.locationOf(info.m_replacementSource, NoPtrTag),
+ linkBuffer.locationOf(info.m_replacementDestination, NoPtrTag)));
}
}
@@ -334,7 +334,7 @@
Vector<Label>& labels = m_exitSiteLabels[i];
Vector<const void*> addresses;
for (unsigned j = 0; j < labels.size(); ++j)
- addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
+ addresses.append(linkBuffer.locationOf(labels[j], NoPtrTag).executableAddress());
m_graph.compilation()->addOSRExitSite(addresses);
}
} else
@@ -351,7 +351,7 @@
// i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
// If this *is set*, it means we will be landing at this code location from genericUnwind from an
// exception thrown in a child call frame.
- CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
+ CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination, NoPtrTag);
HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
newExceptionHandler.start = callSite.bits();
@@ -504,7 +504,7 @@
branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
emitStoreCodeOrigin(CodeOrigin(0));
move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
- m_callArityFixup = call();
+ m_callArityFixup = call(NoPtrTag);
jump(fromArityCheck);
// Generate slow path code.
@@ -534,7 +534,7 @@
disassemble(*linkBuffer);
- MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
+ MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck, NoPtrTag);
m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
@@ -574,7 +574,7 @@
{
RELEASE_ASSERT(basicBlock.isCatchEntrypoint);
RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition.
- m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf(blockHead).executableAddress(), WTFMove(argumentFormats));
+ m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf(blockHead, NoPtrTag).executableAddress(), WTFMove(argumentFormats));
}
void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index a3a22d6..c2e715f 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -158,7 +158,7 @@
// Add a call out from JIT code, without an exception check.
Call appendCall(const FunctionPtr& function)
{
- Call functionCall = call();
+ Call functionCall = call(NoPtrTag);
m_calls.append(CallLinkRecord(functionCall, function));
return functionCall;
}
diff --git a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
index 93072cc..2dd754f 100644
--- a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,7 +57,7 @@
bool JITFinalizer::finalize()
{
m_jitCode->initializeCodeRef(
- FINALIZE_DFG_CODE(*m_linkBuffer, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data()),
+ FINALIZE_DFG_CODE(*m_linkBuffer, NoPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data()),
MacroAssemblerCodePtr());
m_plan.codeBlock->setJITCode(m_jitCode.copyRef());
@@ -71,7 +71,7 @@
{
RELEASE_ASSERT(!m_withArityCheck.isEmptyValue());
m_jitCode->initializeCodeRef(
- FINALIZE_DFG_CODE(*m_linkBuffer, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data()),
+ FINALIZE_DFG_CODE(*m_linkBuffer, NoPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data()),
m_withArityCheck);
m_plan.codeBlock->setJITCode(m_jitCode.copyRef());
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index 42a898e..31ea024 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -1000,7 +1000,7 @@
RELEASE_ASSERT_NOT_REACHED();
break;
}
- jit.call(GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
alreadyAllocatedArguments.add(id, operand);
@@ -1069,7 +1069,7 @@
LinkBuffer patchBuffer(jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseDFGOSRExit(),
- patchBuffer,
+ patchBuffer, NoPtrTag,
"DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
exitIndex, toCString(exit.m_codeOrigin).data(),
exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
index 41b3f74..0ab02c1 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
@@ -110,7 +110,7 @@
jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
#endif
jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
AssemblyHelpers::Jump doneAdjusting = jit.jump();
tooFewFails.link(&jit);
@@ -263,7 +263,7 @@
jit.setupArguments<decltype(operationOSRWriteBarrier)>(owner);
jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
- jit.call(scratch);
+ jit.call(scratch, NoPtrTag);
#if CPU(X86)
jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister);
@@ -318,7 +318,7 @@
}
jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
- jit.jump(GPRInfo::regT2);
+ jit.jump(GPRInfo::regT2, NoPtrTag);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index b8c046c..8544ceb 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -9889,7 +9889,7 @@
#if USE(JSVALUE64)
m_jit.xor64(poisonScratch, scratch);
#endif
- m_jit.jump(scratch);
+ m_jit.jump(scratch, NoPtrTag);
data->didUseJumpTable = true;
}
@@ -9927,7 +9927,7 @@
silentSpillAllRegisters(scratch);
callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs.gpr(), data->switchTableIndex);
silentFillAllRegisters();
- m_jit.jump(scratch);
+ m_jit.jump(scratch, NoPtrTag);
#else
JITCompiler::Jump notInt = m_jit.branch32(
JITCompiler::NotEqual, valueRegs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
@@ -9942,7 +9942,7 @@
callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs, data->switchTableIndex);
silentFillAllRegisters();
- m_jit.jump(scratch);
+ m_jit.jump(scratch, NoPtrTag);
#endif
noResult(node, UseChildrenCalledExplicitly);
break;
@@ -10206,7 +10206,7 @@
callOperation(
operationSwitchString, string, static_cast<size_t>(data->switchTableIndex), string);
m_jit.exceptionCheck();
- m_jit.jump(string);
+ m_jit.jump(string, NoPtrTag);
return;
}
@@ -10244,7 +10244,7 @@
callOperation(operationSwitchString, string, static_cast<size_t>(data->switchTableIndex), string);
silentFillAllRegisters();
m_jit.exceptionCheck();
- m_jit.jump(string);
+ m_jit.jump(string, NoPtrTag);
}
void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data)
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index b6bd192..c96d41f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -5682,7 +5682,7 @@
osrEnter.link(&m_jit);
}
m_jit.emitRestoreCalleeSaves();
- m_jit.jump(tempGPR);
+ m_jit.jump(tempGPR, NoPtrTag);
});
break;
}
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp
index d259cc8..2868f49 100644
--- a/Source/JavaScriptCore/dfg/DFGThunks.cpp
+++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp
@@ -45,7 +45,7 @@
MacroAssembler jit;
jit.probe(OSRExit::executeOSRExit, vm);
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "DFG OSR exit thunk");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "DFG OSR exit thunk");
}
MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm)
@@ -82,7 +82,7 @@
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
#endif
- MacroAssembler::Call functionCall = jit.call();
+ MacroAssembler::Call functionCall = jit.call(NoPtrTag);
jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->addressOfActiveLength()), GPRInfo::regT0);
jit.storePtr(MacroAssembler::TrustedImmPtr(nullptr), MacroAssembler::Address(GPRInfo::regT0));
@@ -98,14 +98,14 @@
jit.load32(buffer + i, GPRInfo::toRegister(i));
#endif
}
-
- jit.jump(MacroAssembler::AbsoluteAddress(&vm->osrExitJumpDestination));
-
+
+ jit.jump(MacroAssembler::AbsoluteAddress(&vm->osrExitJumpDestination), NoPtrTag);
+
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- patchBuffer.link(functionCall, OSRExit::compileOSRExit);
+ patchBuffer.link(functionCall, OSRExit::compileOSRExit, NoPtrTag);
- return FINALIZE_CODE(patchBuffer, "DFG OSR exit generation thunk");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "DFG OSR exit generation thunk");
}
MacroAssemblerCodeRef osrEntryThunkGenerator(VM* vm)
@@ -143,10 +143,10 @@
jit.restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
jit.emitMaterializeTagCheckRegisters();
- jit.jump(GPRInfo::regT1);
+ jit.jump(GPRInfo::regT1, NoPtrTag);
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "DFG OSR entry thunk");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "DFG OSR entry thunk");
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/ftl/FTLCompile.cpp b/Source/JavaScriptCore/ftl/FTLCompile.cpp
index e22b9ad..e23a13f 100644
--- a/Source/JavaScriptCore/ftl/FTLCompile.cpp
+++ b/Source/JavaScriptCore/ftl/FTLCompile.cpp
@@ -134,7 +134,7 @@
jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
jit.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- CCallHelpers::Call call = jit.call();
+ CCallHelpers::Call call = jit.call(NoPtrTag);
jit.jumpToExceptionHandler(vm);
jit.addLinkTask(
[=] (LinkBuffer& linkBuffer) {
diff --git a/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
index 88f25e3..5176def 100644
--- a/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
+++ b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
@@ -75,13 +75,13 @@
jitCode->initializeB3Code(
FINALIZE_CODE_IF(
- dumpDisassembly, *b3CodeLinkBuffer,
+ dumpDisassembly, *b3CodeLinkBuffer, CodeEntryPtrTag,
"FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data()));
if (entrypointLinkBuffer) {
jitCode->initializeArityCheckEntrypoint(
FINALIZE_CODE_IF(
- dumpDisassembly, *entrypointLinkBuffer,
+ dumpDisassembly, *entrypointLinkBuffer, CodeEntryWithArityCheckPtrTag,
"FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data(), function));
}
diff --git a/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp b/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp
index bd66e76..c65a63d 100644
--- a/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp
+++ b/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp
@@ -67,7 +67,7 @@
linkBuffer.link(params.doneJumps, m_done);
if (m_exceptionTarget)
linkBuffer.link(exceptionJumps, m_exceptionTarget);
- m_stub = FINALIZE_CODE_FOR(codeBlock, linkBuffer, "Lazy slow path call stub");
+ m_stub = FINALIZE_CODE_FOR(codeBlock, linkBuffer, NoPtrTag, "Lazy slow path call stub");
MacroAssembler::repatchJump(m_patchableJump, CodeLocationLabel(m_stub.code()));
}
diff --git a/Source/JavaScriptCore/ftl/FTLLink.cpp b/Source/JavaScriptCore/ftl/FTLLink.cpp
index 71cfbd2..db5e7cf 100644
--- a/Source/JavaScriptCore/ftl/FTLLink.cpp
+++ b/Source/JavaScriptCore/ftl/FTLLink.cpp
@@ -140,13 +140,13 @@
jit.emitFunctionPrologue();
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
- CCallHelpers::Call callArityCheck = jit.call();
+ CCallHelpers::Call callArityCheck = jit.call(NoPtrTag);
auto noException = jit.branch32(CCallHelpers::GreaterThanOrEqual, GPRInfo::returnValueGPR, CCallHelpers::TrustedImm32(0));
jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
jit.move(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- CCallHelpers::Call callLookupExceptionHandlerFromCallerFrame = jit.call();
+ CCallHelpers::Call callLookupExceptionHandlerFromCallerFrame = jit.call(NoPtrTag);
jit.jumpToExceptionHandler(vm);
noException.link(&jit);
@@ -159,7 +159,7 @@
jit.emitFunctionEpilogue();
mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::argumentGPR0));
jit.emitFunctionPrologue();
- CCallHelpers::Call callArityFixup = jit.call();
+ CCallHelpers::Call callArityFixup = jit.call(NoPtrTag);
jit.emitFunctionEpilogue();
mainPathJumps.append(jit.jump());
diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
index bb36563..4a94e67 100644
--- a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
+++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
@@ -286,11 +286,11 @@
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
- CCallHelpers::Call throwCall = jit.call();
+ CCallHelpers::Call throwCall = jit.call(NoPtrTag);
jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
+ CCallHelpers::Call lookupExceptionHandlerCall = jit.call(NoPtrTag);
jit.jumpToExceptionHandler(*vm);
jit.addLinkTask(
@@ -7636,7 +7636,7 @@
auto callWithExceptionCheck = [&] (void* callee) {
jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
- jit.call(GPRInfo::nonPreservedNonArgumentGPR);
+ jit.call(GPRInfo::nonPreservedNonArgumentGPR, NoPtrTag);
exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
};
@@ -7975,7 +7975,7 @@
auto callWithExceptionCheck = [&] (void* callee) {
jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
- jit.call(GPRInfo::nonPreservedNonArgumentGPR);
+ jit.call(GPRInfo::nonPreservedNonArgumentGPR, NoPtrTag);
exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
};
@@ -8163,7 +8163,7 @@
jit.subPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
jit.setupArguments<decltype(operationCallEval)>(GPRInfo::regT1);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR);
- jit.call(GPRInfo::nonPreservedNonArgumentGPR);
+ jit.call(GPRInfo::nonPreservedNonArgumentGPR, NoPtrTag);
exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
index 1c2449f..6b58648 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
@@ -339,7 +339,7 @@
CCallHelpers::TrustedImmPtr(materialization),
CCallHelpers::TrustedImmPtr(materializationArguments));
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization));
// Let everyone know that we're done.
@@ -367,7 +367,7 @@
CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)),
CCallHelpers::TrustedImmPtr(materializationArguments));
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
}
// Save all state from wherever the exit data tells us it was, into the appropriate place in
@@ -494,7 +494,7 @@
LinkBuffer patchBuffer(jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(),
- patchBuffer,
+ patchBuffer, NoPtrTag,
"FTL OSR exit #%u (%s, %s) from %s, with operands = %s",
exitID, toCString(exit.m_codeOrigin).data(),
exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
@@ -541,7 +541,7 @@
}
prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
-
+
compileStub(exitID, jitCode, exit, &vm, codeBlock);
MacroAssembler::repatchJump(
diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp b/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp
index 5fa5ecc..c87d67d 100644
--- a/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp
+++ b/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp
@@ -120,7 +120,7 @@
SlowPathCall SlowPathCallContext::makeCall(VM& vm, void* callTarget)
{
- SlowPathCall result = SlowPathCall(m_jit.call(), keyWithTarget(callTarget));
+ SlowPathCall result = SlowPathCall(m_jit.call(NoPtrTag), keyWithTarget(callTarget));
m_jit.addLinkTask(
[result, &vm] (LinkBuffer& linkBuffer) {
diff --git a/Source/JavaScriptCore/ftl/FTLThunks.cpp b/Source/JavaScriptCore/ftl/FTLThunks.cpp
index e3c7054..5f2d3d0 100644
--- a/Source/JavaScriptCore/ftl/FTLThunks.cpp
+++ b/Source/JavaScriptCore/ftl/FTLThunks.cpp
@@ -86,7 +86,7 @@
jit.peek(
GPRInfo::argumentGPR1,
(stackMisalignment - MacroAssembler::pushToSaveByteOffset()) / sizeof(void*));
- MacroAssembler::Call functionCall = jit.call();
+ MacroAssembler::Call functionCall = jit.call(NoPtrTag);
// At this point we want to make a tail call to what was returned to us in the
// returnValueGPR. But at the same time as we do this, we must restore all registers.
@@ -119,21 +119,21 @@
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
patchBuffer.link(functionCall, generationFunction);
- return FINALIZE_CODE(patchBuffer, "%s", name);
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "%s", name);
}
MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm)
{
unsigned extraPopsToRestore = 0;
return genericGenerationThunkGenerator(
- vm, compileFTLOSRExit, "FTL OSR exit generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::Needed);
+ vm, FunctionPtr(compileFTLOSRExit, NoPtrTag), "FTL OSR exit generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::Needed);
}
MacroAssemblerCodeRef lazySlowPathGenerationThunkGenerator(VM* vm)
{
unsigned extraPopsToRestore = 1;
return genericGenerationThunkGenerator(
- vm, compileFTLLazySlowPath, "FTL lazy slow path generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::NotNeeded);
+ vm, FunctionPtr(compileFTLLazySlowPath, NoPtrTag), "FTL lazy slow path generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::NotNeeded);
}
static void registerClobberCheck(AssemblyHelpers& jit, RegisterSet dontClobber)
@@ -197,7 +197,7 @@
registerClobberCheck(jit, key.argumentRegisters());
- AssemblyHelpers::Call call = jit.call();
+ AssemblyHelpers::Call call = jit.call(NoPtrTag);
jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset()), GPRInfo::nonPreservedNonReturnGPR);
jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
@@ -224,7 +224,7 @@
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
patchBuffer.link(call, FunctionPtr(key.callTarget()));
- return FINALIZE_CODE(patchBuffer, "FTL slow path call thunk for %s", toCString(key).data());
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "FTL slow path call thunk for %s", toCString(key).data());
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
index 8079ac9..cd5c4b2 100644
--- a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
+++ b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
@@ -346,8 +346,8 @@
#else
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
#endif
- move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
- call(GPRInfo::nonPreservedNonReturnGPR);
+ move(TrustedImmPtr(tagCFunctionPtr(operationExceptionFuzz, SlowPathPtrTag)), GPRInfo::nonPreservedNonReturnGPR);
+ call(GPRInfo::nonPreservedNonReturnGPR, SlowPathPtrTag);
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
@@ -940,8 +940,8 @@
#else
#error "JIT not supported on this platform."
#endif
- move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
- call(scratch);
+ move(TrustedImmPtr(tagCFunctionPtr(function, SlowPathPtrTag)), scratch);
+ call(scratch, SlowPathPtrTag);
move(TrustedImmPtr(scratchBuffer->addressOfActiveLength()), GPRInfo::regT0);
storePtr(TrustedImmPtr(nullptr), GPRInfo::regT0);
diff --git a/Source/JavaScriptCore/jit/CCallHelpers.cpp b/Source/JavaScriptCore/jit/CCallHelpers.cpp
index 0c2af4d..f515c3e 100644
--- a/Source/JavaScriptCore/jit/CCallHelpers.cpp
+++ b/Source/JavaScriptCore/jit/CCallHelpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -59,8 +59,8 @@
loadPtr(Address(scratch1NonArgGPR), shadowPacket);
Jump ok = branchPtr(Below, shadowPacket, TrustedImmPtr(vm.shadowChicken().logEnd()));
setupArguments<decltype(operationProcessShadowChickenLog)>();
- move(TrustedImmPtr(bitwise_cast<void*>(operationProcessShadowChickenLog)), scratch1NonArgGPR);
- call(scratch1NonArgGPR);
+ move(TrustedImmPtr(tagCFunctionPtr(operationProcessShadowChickenLog, SlowPathPtrTag)), scratch1NonArgGPR);
+ call(scratch1NonArgGPR, SlowPathPtrTag);
move(TrustedImmPtr(vm.shadowChicken().addressOfLogCursor()), scratch1NonArgGPR);
loadPtr(Address(scratch1NonArgGPR), shadowPacket);
ok.link(this);
diff --git a/Source/JavaScriptCore/jit/CCallHelpers.h b/Source/JavaScriptCore/jit/CCallHelpers.h
index 352214b..c3061b9 100644
--- a/Source/JavaScriptCore/jit/CCallHelpers.h
+++ b/Source/JavaScriptCore/jit/CCallHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2015-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -518,7 +518,7 @@
// genericUnwind() leaves the handler CallFrame* in vm->callFrameForCatch,
// and the address of the handler in vm->targetMachinePCForThrow.
loadPtr(&vm.targetMachinePCForThrow, GPRInfo::regT1);
- jump(GPRInfo::regT1);
+ jump(GPRInfo::regT1, ExceptionHandlerPtrTag);
}
void prepareForTailCallSlow(GPRReg calleeGPR = InvalidGPRReg)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index de7b545..d4aad60 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -298,7 +298,7 @@
// to appear in the console or anywhere in memory, via the PrintStream buffer.
// The second is we can't guarantee that the code is readable when using the
// asyncDisassembly option as our caller will set our pages execute only.
- return linkBuffer.finalizeCodeWithoutDisassembly();
+ return linkBuffer.finalizeCodeWithoutDisassembly(NoPtrTag);
}
#else // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 0897c5d..95d4bee 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -109,7 +109,7 @@
callOperation(operationOptimize, m_bytecodeOffset);
skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
move(returnValueGPR2, stackPointerRegister);
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
skipOptimize.link(this);
}
#endif
@@ -776,32 +776,36 @@
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
- record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset], NoPtrTag);
for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
- record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset], NoPtrTag) : record.jumpTable.simpleJumpTable->ctiDefault;
}
} else {
ASSERT(record.type == SwitchRecord::String);
- record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
+ auto* stringJumpTable = record.jumpTable.stringJumpTable;
+ stringJumpTable->ctiDefault =
+ patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset], NoPtrTag);
- for (auto& location : record.jumpTable.stringJumpTable->offsetTable.values()) {
+ for (auto& location : stringJumpTable->offsetTable.values()) {
unsigned offset = location.branchOffset;
- location.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ location.ctiOffset = offset
+ ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset], NoPtrTag)
+ : stringJumpTable->ctiDefault;
}
}
}
for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
- handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
+ handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target], ExceptionHandlerPtrTag);
}
for (auto& record : m_calls) {
if (record.to)
- patchBuffer.link(record.from, FunctionPtr(record.to));
+ patchBuffer.link(record.from, FunctionPtr(record.to, SlowPathPtrTag));
}
for (unsigned i = m_getByIds.size(); i--;)
@@ -812,7 +816,7 @@
m_putByIds[i].finalize(patchBuffer);
if (m_byValCompilationInfo.size()) {
- CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler);
+ CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler, ExceptionHandlerPtrTag);
for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
@@ -820,9 +824,9 @@
if (Jump(patchableNotIndexJump).isSet())
notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
- CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
- CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
- CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
+ CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget, NoPtrTag);
+ CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget, NoPtrTag);
+ CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget, NoPtrTag);
CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
*byValCompilationInfo.byValInfo = ByValInfo(
@@ -855,7 +859,7 @@
MacroAssemblerCodePtr withArityCheck;
if (m_codeBlock->codeType() == FunctionCode)
- withArityCheck = patchBuffer.locationOf(m_arityCheck);
+ withArityCheck = patchBuffer.locationOf(m_arityCheck, CodeEntryWithArityCheckPtrTag);
if (Options::dumpDisassembly()) {
m_disassembler->dump(patchBuffer);
@@ -871,7 +875,7 @@
m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
CodeRef result = FINALIZE_CODE(
- patchBuffer,
+ patchBuffer, CodeEntryPtrTag,
"Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data());
m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
@@ -913,7 +917,7 @@
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
- m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
+ m_calls.append(CallRecord(call(SlowPathPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame, SlowPathPtrTag).value()));
jumpToExceptionHandler(*vm());
}
@@ -932,7 +936,7 @@
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
- m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
+ m_calls.append(CallRecord(call(SlowPathPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler, SlowPathPtrTag).value()));
jumpToExceptionHandler(*vm());
}
}
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 6ea3a5b..5b72052 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -269,7 +269,7 @@
// Add a call out from JIT code, without an exception check.
Call appendCall(const FunctionPtr& function)
{
- Call functionCall = call();
+ Call functionCall = call(NoPtrTag);
m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value()));
return functionCall;
}
diff --git a/Source/JavaScriptCore/jit/JITMathIC.h b/Source/JavaScriptCore/jit/JITMathIC.h
index cb24c1b..1525fcf 100644
--- a/Source/JavaScriptCore/jit/JITMathIC.h
+++ b/Source/JavaScriptCore/jit/JITMathIC.h
@@ -139,7 +139,7 @@
LinkBuffer linkBuffer(jit, m_inlineStart.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
RELEASE_ASSERT(linkBuffer.isValid());
linkBuffer.link(jump, CodeLocationLabel(m_code.code()));
- FINALIZE_CODE(linkBuffer, "JITMathIC: linking constant jump to out of line stub");
+ FINALIZE_CODE(linkBuffer, NoPtrTag, "JITMathIC: linking constant jump to out of line stub");
};
auto replaceCall = [&] () {
@@ -166,7 +166,7 @@
linkBuffer.link(jumpToDone, doneLocation());
m_code = FINALIZE_CODE_FOR(
- codeBlock, linkBuffer, "JITMathIC: generating out of line fast IC snippet");
+ codeBlock, linkBuffer, NoPtrTag, "JITMathIC: generating out of line fast IC snippet");
if (!generationState.shouldSlowPathRepatch) {
// We won't need to regenerate, so we can wire the slow path call
@@ -208,7 +208,7 @@
linkBuffer.link(slowPathJumpList, slowPathStartLocation());
m_code = FINALIZE_CODE_FOR(
- codeBlock, linkBuffer, "JITMathIC: generating out of line IC snippet");
+ codeBlock, linkBuffer, NoPtrTag, "JITMathIC: generating out of line IC snippet");
}
linkJumpToOutOfLineSnippet();
@@ -216,17 +216,17 @@
void finalizeInlineCode(const MathICGenerationState& state, LinkBuffer& linkBuffer)
{
- CodeLocationLabel start = linkBuffer.locationOf(state.fastPathStart);
+ CodeLocationLabel start = linkBuffer.locationOf(state.fastPathStart, NoPtrTag);
m_inlineStart = start;
m_inlineSize = MacroAssembler::differenceBetweenCodePtr(
- start, linkBuffer.locationOf(state.fastPathEnd));
+ start, linkBuffer.locationOf(state.fastPathEnd, NoPtrTag));
ASSERT(m_inlineSize > 0);
m_deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
start, linkBuffer.locationOf(state.slowPathCall));
m_deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
- start, linkBuffer.locationOf(state.slowPathStart));
+ start, linkBuffer.locationOf(state.slowPathStart, SlowPathPtrTag));
}
ArithProfile* arithProfile() const { return m_arithProfile; }
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 8fb56be..850dca4 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -572,7 +572,7 @@
callOperation(operationTryOSREnterAtCatchAndValueProfile, m_bytecodeOffset);
auto skipOSREntry = branchTestPtr(Zero, returnValueGPR);
emitRestoreCalleeSaves();
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
skipOSREntry.link(this);
if (buffer && shouldEmitProfiling()) {
buffer->forEach([&] (ValueProfileAndOperand& profile) {
@@ -610,7 +610,7 @@
emitGetVirtualRegister(scrutinee, regT0);
callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
}
void JIT::emit_op_switch_char(Instruction* currentInstruction)
@@ -626,7 +626,7 @@
emitGetVirtualRegister(scrutinee, regT0);
callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
}
void JIT::emit_op_switch_string(Instruction* currentInstruction)
@@ -641,7 +641,7 @@
emitGetVirtualRegister(scrutinee, regT0);
callOperation(operationSwitchStringWithUnknownKeyType, regT0, tableIndex);
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
}
void JIT::emit_op_debug(Instruction* currentInstruction)
@@ -877,7 +877,7 @@
abortWithReason(JITUnreasonableLoopHintJumpTarget);
ok.link(this);
}
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
noOptimizedEntry.link(this);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
@@ -1081,11 +1081,11 @@
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ m_codeBlock, patchBuffer, NoPtrTag,
"Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric, SlowPathPtrTag));
}
void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index fe6814b..88ddf2b 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -738,7 +738,7 @@
callOperation(operationTryOSREnterAtCatchAndValueProfile, m_bytecodeOffset);
auto skipOSREntry = branchTestPtr(Zero, returnValueGPR);
emitRestoreCalleeSaves();
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
skipOSREntry.link(this);
if (buffer && shouldEmitProfiling()) {
buffer->forEach([&] (ValueProfileAndOperand& profile) {
@@ -776,7 +776,7 @@
emitLoad(scrutinee, regT1, regT0);
callOperation(operationSwitchImmWithUnknownKeyType, JSValueRegs(regT1, regT0), tableIndex);
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
}
void JIT::emit_op_switch_char(Instruction* currentInstruction)
@@ -792,7 +792,7 @@
emitLoad(scrutinee, regT1, regT0);
callOperation(operationSwitchCharWithUnknownKeyType, JSValueRegs(regT1, regT0), tableIndex);
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
}
void JIT::emit_op_switch_string(Instruction* currentInstruction)
@@ -807,7 +807,7 @@
emitLoad(scrutinee, regT1, regT0);
callOperation(operationSwitchStringWithUnknownKeyType, JSValueRegs(regT1, regT0), tableIndex);
- jump(returnValueGPR);
+ jump(returnValueGPR, NoPtrTag);
}
void JIT::emit_op_debug(Instruction* currentInstruction)
@@ -932,11 +932,11 @@
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ m_codeBlock, patchBuffer, NoPtrTag,
"Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric, SlowPathPtrTag));
}
void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 1f6a24f..0f5d70f 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -90,7 +90,7 @@
jit.ret();
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "String get_by_val stub");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "String get_by_val stub");
}
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
@@ -1265,11 +1265,11 @@
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ m_codeBlock, patchBuffer, NoPtrTag,
"Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric, SlowPathPtrTag));
}
void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
@@ -1292,17 +1292,17 @@
for (const auto& callSite : m_calls) {
if (callSite.to)
- patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
+ patchBuffer.link(callSite.from, FunctionPtr(callSite.to, SlowPathPtrTag));
}
gen.finalize(patchBuffer);
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ m_codeBlock, patchBuffer, NoPtrTag,
"Baseline get_by_val with cached property name '%s' stub for %s, return point %p", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value());
byValInfo->stubInfo = gen.stubInfo();
MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric, SlowPathPtrTag));
}
void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -1346,22 +1346,22 @@
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
if (needsLinkForWriteBarrier) {
ASSERT(m_calls.last().to == operationWriteBarrierSlowPath);
- patchBuffer.link(m_calls.last().from, operationWriteBarrierSlowPath);
+ patchBuffer.link(m_calls.last().from, operationWriteBarrierSlowPath, SlowPathPtrTag);
}
bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
if (!isDirect) {
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ m_codeBlock, patchBuffer, NoPtrTag,
"Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
} else {
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ m_codeBlock, patchBuffer, NoPtrTag,
"Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
}
MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric, SlowPathPtrTag));
}
void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
@@ -1382,17 +1382,17 @@
for (const auto& callSite : m_calls) {
if (callSite.to)
- patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
+ patchBuffer.link(callSite.from, FunctionPtr(callSite.to, SlowPathPtrTag));
}
gen.finalize(patchBuffer);
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ m_codeBlock, patchBuffer, NoPtrTag,
"Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p", (putKind == Direct) ? "_direct" : "", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value());
byValInfo->stubInfo = gen.stubInfo();
MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric, SlowPathPtrTag));
}
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index c8810ee..43ef9e4 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -178,7 +178,7 @@
jit.ret();
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "String get_by_val stub");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "String get_by_val stub");
}
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h
index a323b53..b8b3047 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.h
@@ -114,8 +114,8 @@
};
// Helper for the creation of simple stub routines that need no help from the GC.
-#define FINALIZE_CODE_FOR_STUB(codeBlock, patchBuffer, ...) \
- (adoptRef(new JITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), __VA_ARGS__))))
+#define FINALIZE_CODE_FOR_STUB(codeBlock, patchBuffer, resultPtrTag, ...) \
+ (adoptRef(new JITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), (resultPtrTag), __VA_ARGS__))))
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
index 3d2774f..f5c8eaa 100644
--- a/Source/JavaScriptCore/jit/Repatch.cpp
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -73,7 +73,7 @@
if (codeBlock->jitType() == JITCode::FTLJIT) {
return FunctionPtr(codeBlock->vm()->ftlThunks->keyForSlowPathCallThunk(
MacroAssemblerCodePtr::createFromExecutableAddress(
- result.executableAddress())).callTarget());
+ result.executableAddress())).callTarget(), CodeEntryPtrTag);
}
#else
UNUSED_PARAM(codeBlock);
@@ -386,14 +386,17 @@
static V_JITOperation_ESsiJJI appropriateOptimizingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
{
- if (slot.isStrictMode()) {
+ auto pickSlowPath = [&] () -> V_JITOperation_ESsiJJI {
+ if (slot.isStrictMode()) {
+ if (putKind == Direct)
+ return operationPutByIdDirectStrictOptimize;
+ return operationPutByIdStrictOptimize;
+ }
if (putKind == Direct)
- return operationPutByIdDirectStrictOptimize;
- return operationPutByIdStrictOptimize;
- }
- if (putKind == Direct)
- return operationPutByIdDirectNonStrictOptimize;
- return operationPutByIdNonStrictOptimize;
+ return operationPutByIdDirectNonStrictOptimize;
+ return operationPutByIdNonStrictOptimize;
+ };
+ return tagCFunctionPtr(pickSlowPath(), SlowPathPtrTag);
}
static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
@@ -1068,7 +1071,7 @@
// with a non-decorated bottom bit but a normal call calls an address with a decorated bottom bit.
bool isTailCall = callToCodePtr.call.isFlagSet(CCallHelpers::Call::Tail);
patchBuffer.link(
- callToCodePtr.call, FunctionPtr(isTailCall ? callToCodePtr.codePtr.dataLocation() : callToCodePtr.codePtr.executableAddress()));
+ callToCodePtr.call, FunctionPtr(tagCodePtr(isTailCall ? callToCodePtr.codePtr.dataLocation() : callToCodePtr.codePtr.executableAddress(), CodeEntryPtrTag)));
}
if (isWebAssembly || JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0));
@@ -1078,7 +1081,7 @@
auto stubRoutine = adoptRef(*new PolymorphicCallStubRoutine(
FINALIZE_CODE_FOR(
- callerCodeBlock, patchBuffer,
+ callerCodeBlock, patchBuffer, NoPtrTag,
"Polymorphic call stub for %s, return point %p, targets %s",
isWebAssembly ? "WebAssembly" : toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(),
toCString(listDump(callCases)).data()),
@@ -1124,7 +1127,7 @@
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
- ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), optimizedFunction);
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), tagCFunctionPtr(optimizedFunction, SlowPathPtrTag));
InlineAccess::rewireStubAsJump(stubInfo, stubInfo.slowPathStartLocation());
}
diff --git a/Source/JavaScriptCore/jit/SlowPathCall.h b/Source/JavaScriptCore/jit/SlowPathCall.h
index 53883f1..4afe091 100644
--- a/Source/JavaScriptCore/jit/SlowPathCall.h
+++ b/Source/JavaScriptCore/jit/SlowPathCall.h
@@ -61,7 +61,7 @@
m_jit->move(JIT::callFrameRegister, JIT::argumentGPR0);
m_jit->move(JIT::TrustedImmPtr(m_pc), JIT::argumentGPR1);
#endif
- JIT::Call call = m_jit->call();
+ JIT::Call call = m_jit->call(NoPtrTag);
m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
#if CPU(X86) && USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 5a154c1..cdd706c 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -170,14 +170,14 @@
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
for (unsigned i = 0; i < m_calls.size(); i++)
patchBuffer.link(m_calls[i].first, m_calls[i].second);
- return FINALIZE_CODE(patchBuffer, "Specialized thunk for %s", thunkKind);
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "Specialized thunk for %s", thunkKind);
}
// Assumes that the target function uses fpRegister0 as the first argument
// and return value. Like any sensible architecture would.
void callDoubleToDouble(FunctionPtr function)
{
- m_calls.append(std::make_pair(call(), function));
+ m_calls.append(std::make_pair(call(SlowPathPtrTag), function));
}
void callDoubleToDoublePreservingReturn(FunctionPtr function)
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index 89aa475..0f6f6be 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -71,11 +71,11 @@
jit.setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
jit.jumpToExceptionHandler(*vm);
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "Throw exception from call slow path thunk");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "Throw exception from call slow path thunk");
}
static void slowPathFor(
@@ -95,7 +95,7 @@
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
@@ -105,7 +105,7 @@
jit.setupArguments<decltype(slowPathFunction)>(GPRInfo::regT2);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
if (maxFrameExtentForSlowPathCall)
jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#endif
@@ -126,7 +126,7 @@
jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
doNotTrash.link(&jit);
- jit.jump(GPRInfo::returnValueGPR);
+ jit.jump(GPRInfo::returnValueGPR, NoPtrTag);
}
MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
@@ -141,7 +141,7 @@
slowPathFor(jit, vm, operationLinkCall);
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "Link call slow path thunk");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "Link call slow path thunk");
}
// For closure optimizations, we only include calls, since if you're using closures for
@@ -153,7 +153,7 @@
slowPathFor(jit, vm, operationLinkPolymorphicCall);
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "Link polymorphic call slow path thunk");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "Link polymorphic call slow path thunk");
}
// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
@@ -226,7 +226,7 @@
jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
jit.prepareForTailCallSlow(GPRInfo::regT4);
}
- jit.jump(GPRInfo::regT4);
+ jit.jump(GPRInfo::regT4, NoPtrTag);
notJSFunction.link(&jit);
slowCase.append(jit.branchIfNotType(GPRInfo::regT0, InternalFunctionType));
@@ -236,12 +236,11 @@
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
-
slowPathFor(jit, vm, operationVirtualCall);
-
+
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
- patchBuffer,
+ patchBuffer, NoPtrTag,
"Virtual %s slow path thunk",
callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct");
}
@@ -294,9 +293,9 @@
if (thunkFunctionType == ThunkFunctionType::JSFunction) {
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT1);
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction), NoPtrTag);
} else
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, InternalFunction::offsetOfNativeFunctionFor(kind)));
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, InternalFunction::offsetOfNativeFunctionFor(kind)), NoPtrTag);
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
@@ -315,7 +314,7 @@
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, InternalFunction::offsetOfNativeFunctionFor(kind)), X86Registers::r9);
jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), X86Registers::esi);
jit.xor64(X86Registers::esi, X86Registers::r9);
- jit.call(X86Registers::r9);
+ jit.call(X86Registers::r9, NoPtrTag);
#else
// Calling convention: f(ecx, edx, r8, r9, ...);
@@ -330,9 +329,9 @@
if (thunkFunctionType == ThunkFunctionType::JSFunction) {
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), X86Registers::r9);
- jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
+ jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), NoPtrTag);
} else
- jit.call(JSInterfaceJIT::Address(X86Registers::edx, InternalFunction::offsetOfNativeFunctionFor(kind)));
+ jit.call(JSInterfaceJIT::Address(X86Registers::edx, InternalFunction::offsetOfNativeFunctionFor(kind)), NoPtrTag);
jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
@@ -354,7 +353,7 @@
jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, InternalFunction::offsetOfNativeFunctionFor(kind)), ARM64Registers::x2);
jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), ARM64Registers::x1);
jit.xor64(ARM64Registers::x1, ARM64Registers::x2);
- jit.call(ARM64Registers::x2);
+ jit.call(ARM64Registers::x2, NoPtrTag);
#elif CPU(ARM) || CPU(MIPS)
#if CPU(MIPS)
@@ -370,9 +369,9 @@
if (thunkFunctionType == ThunkFunctionType::JSFunction) {
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT2);
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), NoPtrTag);
} else
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, InternalFunction::offsetOfNativeFunctionFor(kind)));
+ jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, InternalFunction::offsetOfNativeFunctionFor(kind)), NoPtrTag);
#if CPU(MIPS)
// Restore stack space
@@ -416,8 +415,8 @@
#endif
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
#endif
- jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
- jit.call(JSInterfaceJIT::regT3);
+ jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException, NoPtrTag).value()), JSInterfaceJIT::regT3);
+ jit.call(JSInterfaceJIT::regT3, NoPtrTag);
#if CPU(X86) && USE(JSVALUE32_64)
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
#elif OS(WINDOWS)
@@ -427,7 +426,7 @@
jit.jumpToExceptionHandler(*vm);
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "%s %s%s trampoline", thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data());
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "%s %s%s trampoline", thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data());
}
MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
@@ -584,7 +583,7 @@
#endif
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "fixup arity");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "fixup arity");
}
MacroAssemblerCodeRef unreachableGenerator(VM* vm)
@@ -594,7 +593,7 @@
jit.breakpoint();
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "unreachable thunk");
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "unreachable thunk");
}
static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
@@ -1182,7 +1181,7 @@
jit.xor64(GPRInfo::regT1, GPRInfo::regT0);
#endif
emitPointerValidation(jit, GPRInfo::regT0);
- jit.call(GPRInfo::regT0);
+ jit.call(GPRInfo::regT0, NoPtrTag);
jit.emitFunctionEpilogue();
jit.ret();
@@ -1190,7 +1189,7 @@
LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
linkBuffer.link(noCode, CodeLocationLabel(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
return FINALIZE_CODE(
- linkBuffer, "Specialized thunk for bound function calls with no arguments");
+ linkBuffer, NoPtrTag, "Specialized thunk for bound function calls with no arguments");
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
index 9d774fc..45c39be 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.cpp
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -51,51 +51,52 @@
namespace LLInt {
-static MacroAssemblerCodeRef generateThunkWithJumpTo(VM* vm, void (*target)(), const char *thunkKind)
+static MacroAssemblerCodeRef generateThunkWithJumpTo(VM* vm, OpcodeID opcodeID, PtrTag thunkTag, const char *thunkKind)
{
JSInterfaceJIT jit(vm);
-
+
// FIXME: there's probably a better way to do it on X86, but I'm not sure I care.
+ LLIntCode target = LLInt::getCodeFunctionPtr(opcodeID);
jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0);
- jit.jump(JSInterfaceJIT::regT0);
-
+ jit.jump(JSInterfaceJIT::regT0, ptrTag(BytecodeHelperPtrTag, opcodeID));
+
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, "LLInt %s prologue thunk", thunkKind);
+ return FINALIZE_CODE(patchBuffer, thunkTag, "LLInt %s prologue thunk", thunkKind);
}
MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_prologue), "function for call");
+ return generateThunkWithJumpTo(vm, llint_function_for_call_prologue, CodeEntryPtrTag, "function for call");
}
MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_prologue), "function for construct");
+ return generateThunkWithJumpTo(vm, llint_function_for_construct_prologue, CodeEntryPtrTag, "function for construct");
}
MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_arity_check), "function for call with arity check");
+ return generateThunkWithJumpTo(vm, llint_function_for_call_arity_check, CodeEntryWithArityCheckPtrTag, "function for call with arity check");
}
MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_arity_check), "function for construct with arity check");
+ return generateThunkWithJumpTo(vm, llint_function_for_construct_arity_check, CodeEntryWithArityCheckPtrTag, "function for construct with arity check");
}
MacroAssemblerCodeRef evalEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_eval_prologue), "eval");
+ return generateThunkWithJumpTo(vm, llint_eval_prologue, CodeEntryPtrTag, "eval");
}
MacroAssemblerCodeRef programEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_program_prologue), "program");
+ return generateThunkWithJumpTo(vm, llint_program_prologue, CodeEntryPtrTag, "program");
}
MacroAssemblerCodeRef moduleProgramEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_module_program_prologue), "module_program");
+ return generateThunkWithJumpTo(vm, llint_module_program_prologue, CodeEntryPtrTag, "module_program");
}
} // namespace LLInt
diff --git a/Source/JavaScriptCore/runtime/PtrTag.h b/Source/JavaScriptCore/runtime/PtrTag.h
index edf1daf..993ceef 100644
--- a/Source/JavaScriptCore/runtime/PtrTag.h
+++ b/Source/JavaScriptCore/runtime/PtrTag.h
@@ -33,6 +33,7 @@
NoPtrTag = 0,
NearCallPtrTag,
NearJumpPtrTag,
+
CFunctionPtrTag,
BytecodePtrTag,
@@ -43,6 +44,12 @@
JITCodePtrTag,
NativeCodePtrTag,
SlowPathPtrTag,
+
+ Yarr8BitPtrTag,
+ Yarr16BitPtrTag,
+ YarrMatchOnly8BitPtrTag,
+ YarrMatchOnly16BitPtrTag,
+ YarrBacktrackPtrTag,
};
#if !USE(POINTER_PROFILING)
diff --git a/Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp b/Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp
index 2ee33df..5c9f47a 100644
--- a/Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp
+++ b/Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp
@@ -1147,7 +1147,7 @@
patchpoint->clobberLate(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking));
patchpoint->setGenerator([returnType] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
AllowMacroScratchRegisterUsage allowScratch(jit);
- jit.call(params[returnType == Void ? 0 : 1].gpr());
+ jit.call(params[returnType == Void ? 0 : 1].gpr(), NoPtrTag);
});
});
UpsilonValue* embedderCallResultUpsilon = returnType == Void ? nullptr : isEmbedderBlock->appendNew<UpsilonValue>(m_proc, origin(), embedderCallResult);
@@ -1330,7 +1330,7 @@
patchpoint->append(calleeCode, ValueRep::SomeRegister);
patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
AllowMacroScratchRegisterUsage allowScratch(jit);
- jit.call(params[returnType == Void ? 0 : 1].gpr());
+ jit.call(params[returnType == Void ? 0 : 1].gpr(), NoPtrTag);
});
});
diff --git a/Source/JavaScriptCore/wasm/WasmBBQPlan.cpp b/Source/JavaScriptCore/wasm/WasmBBQPlan.cpp
index 06c17ee..1a55d70 100644
--- a/Source/JavaScriptCore/wasm/WasmBBQPlan.cpp
+++ b/Source/JavaScriptCore/wasm/WasmBBQPlan.cpp
@@ -308,7 +308,7 @@
}
m_wasmInternalFunctions[functionIndex]->entrypoint.compilation = std::make_unique<B3::Compilation>(
- FINALIZE_CODE(linkBuffer, "WebAssembly function[%i] %s", functionIndex, SignatureInformation::get(signatureIndex).toString().ascii().data()),
+ FINALIZE_CODE(linkBuffer, NoPtrTag, "WebAssembly function[%i] %s", functionIndex, SignatureInformation::get(signatureIndex).toString().ascii().data()),
WTFMove(context.wasmEntrypointByproducts));
}
@@ -320,7 +320,7 @@
}
embedderToWasmInternalFunction->entrypoint.compilation = std::make_unique<B3::Compilation>(
- FINALIZE_CODE(linkBuffer, "Embedder->WebAssembly entrypoint[%i] %s", functionIndex, SignatureInformation::get(signatureIndex).toString().ascii().data()),
+ FINALIZE_CODE(linkBuffer, NoPtrTag, "Embedder->WebAssembly entrypoint[%i] %s", functionIndex, SignatureInformation::get(signatureIndex).toString().ascii().data()),
WTFMove(context.embedderEntrypointByproducts));
}
}
diff --git a/Source/JavaScriptCore/wasm/WasmBinding.cpp b/Source/JavaScriptCore/wasm/WasmBinding.cpp
index 4e88872..ba97097 100644
--- a/Source/JavaScriptCore/wasm/WasmBinding.cpp
+++ b/Source/JavaScriptCore/wasm/WasmBinding.cpp
@@ -76,13 +76,13 @@
// Tail call into the callee WebAssembly function.
jit.loadPtr(scratch, scratch);
- jit.jump(scratch);
+ jit.jump(scratch, NoPtrTag);
LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, JITCompilationCanFail);
if (UNLIKELY(patchBuffer.didFailToAllocate()))
return makeUnexpected(BindingFailure::OutOfMemory);
- return FINALIZE_CODE(patchBuffer, "WebAssembly->WebAssembly import[%i]", importIndex);
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "WebAssembly->WebAssembly import[%i]", importIndex);
}
} } // namespace JSC::Wasm
diff --git a/Source/JavaScriptCore/wasm/WasmOMGPlan.cpp b/Source/JavaScriptCore/wasm/WasmOMGPlan.cpp
index 0d34a44..ce7edaaa 100644
--- a/Source/JavaScriptCore/wasm/WasmOMGPlan.cpp
+++ b/Source/JavaScriptCore/wasm/WasmOMGPlan.cpp
@@ -98,7 +98,7 @@
}
omgEntrypoint.compilation = std::make_unique<B3::Compilation>(
- FINALIZE_CODE(linkBuffer, "WebAssembly OMG function[%i] %s", m_functionIndex, SignatureInformation::get(signatureIndex).toString().ascii().data()),
+ FINALIZE_CODE(linkBuffer, NoPtrTag, "WebAssembly OMG function[%i] %s", m_functionIndex, SignatureInformation::get(signatureIndex).toString().ascii().data()),
WTFMove(context.wasmEntrypointByproducts));
omgEntrypoint.calleeSaveRegisters = WTFMove(parseAndCompileResult.value()->entrypoint.calleeSaveRegisters);
diff --git a/Source/JavaScriptCore/wasm/WasmThunks.cpp b/Source/JavaScriptCore/wasm/WasmThunks.cpp
index 5bfe573..e637208 100644
--- a/Source/JavaScriptCore/wasm/WasmThunks.cpp
+++ b/Source/JavaScriptCore/wasm/WasmThunks.cpp
@@ -51,15 +51,15 @@
jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR0), GPRInfo::argumentGPR0);
jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(GPRInfo::argumentGPR0);
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- CCallHelpers::Call call = jit.call();
- jit.jump(GPRInfo::returnValueGPR);
+ CCallHelpers::Call call = jit.call(NoPtrTag);
+ jit.jump(GPRInfo::returnValueGPR, NoPtrTag);
jit.breakpoint(); // We should not reach this.
ThrowWasmException throwWasmException = Thunks::singleton().throwWasmException();
RELEASE_ASSERT(throwWasmException);
LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
linkBuffer.link(call, FunctionPtr(throwWasmException));
- return FINALIZE_CODE(linkBuffer, "Throw exception from Wasm");
+ return FINALIZE_CODE(linkBuffer, NoPtrTag, "Throw exception from Wasm");
}
MacroAssemblerCodeRef throwStackOverflowFromWasmThunkGenerator(const AbstractLocker& locker)
@@ -73,7 +73,7 @@
auto jumpToExceptionHandler = jit.jump();
LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
linkBuffer.link(jumpToExceptionHandler, CodeLocationLabel(Thunks::singleton().stub(locker, throwExceptionFromWasmThunkGenerator).code()));
- return FINALIZE_CODE(linkBuffer, "Throw stack overflow from Wasm");
+ return FINALIZE_CODE(linkBuffer, NoPtrTag, "Throw stack overflow from Wasm");
}
MacroAssemblerCodeRef triggerOMGTierUpThunkGenerator(const AbstractLocker&)
@@ -92,14 +92,14 @@
typedef void (*Run)(Instance*, uint32_t);
Run run = OMGPlan::runForIndex;
jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(run)), GPRInfo::argumentGPR2);
- jit.call(GPRInfo::argumentGPR2);
+ jit.call(GPRInfo::argumentGPR2, NoPtrTag);
ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToSpill, RegisterSet(), numberOfStackBytesUsedForRegisterPreservation, extraPaddingBytes);
jit.emitFunctionEpilogue();
jit.ret();
LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(linkBuffer, "Trigger OMG tier up");
+ return FINALIZE_CODE(linkBuffer, NoPtrTag, "Trigger OMG tier up");
}
static Thunks* thunks;
diff --git a/Source/JavaScriptCore/wasm/js/WasmToJS.cpp b/Source/JavaScriptCore/wasm/js/WasmToJS.cpp
index 2e206c3..2e0a910 100644
--- a/Source/JavaScriptCore/wasm/js/WasmToJS.cpp
+++ b/Source/JavaScriptCore/wasm/js/WasmToJS.cpp
@@ -93,7 +93,7 @@
// Let's be paranoid on the exception path and zero out the poison instead of leaving it in an argument GPR.
jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::argumentGPR3);
- auto call = jit.call();
+ auto call = jit.call(NoPtrTag);
jit.jumpToExceptionHandler(*vm);
void (*throwBadI64)(ExecState*, JSWebAssemblyInstance*) = [] (ExecState* exec, JSWebAssemblyInstance* instance) -> void {
@@ -116,7 +116,7 @@
return makeUnexpected(BindingFailure::OutOfMemory);
linkBuffer.link(call, throwBadI64);
- return FINALIZE_CODE(linkBuffer, "WebAssembly->JavaScript invalid i64 use in import[%i]", importIndex);
+ return FINALIZE_CODE(linkBuffer, NoPtrTag, "WebAssembly->JavaScript invalid i64 use in import[%i]", importIndex);
}
return MacroAssemblerCodeRef();
@@ -302,7 +302,7 @@
static_assert(GPRInfo::numberOfArgumentRegisters >= 4, "We rely on this with the call below.");
jit.setupArguments<decltype(callFunc)>(GPRInfo::argumentGPR1, CCallHelpers::TrustedImm32(signatureIndex), CCallHelpers::TrustedImmPtr(buffer));
- auto call = jit.call();
+ auto call = jit.call(NoPtrTag);
auto noException = jit.emitExceptionCheck(*vm, AssemblyHelpers::InvertedExceptionCheck);
// Exception here.
@@ -314,7 +314,7 @@
genericUnwind(vm, exec);
ASSERT(!!vm->callFrameForCatch);
};
- auto exceptionCall = jit.call();
+ auto exceptionCall = jit.call(NoPtrTag);
jit.jumpToExceptionHandler(*vm);
noException.link(&jit);
@@ -342,7 +342,7 @@
linkBuffer.link(call, callFunc);
linkBuffer.link(exceptionCall, doUnwinding);
- return FINALIZE_CODE(linkBuffer, "WebAssembly->JavaScript import[%i] %s", importIndex, signature.toString().ascii().data());
+ return FINALIZE_CODE(linkBuffer, NoPtrTag, "WebAssembly->JavaScript import[%i] %s", importIndex, signature.toString().ascii().data());
}
// Note: We don't need to perform a stack check here since WasmB3IRGenerator
@@ -546,7 +546,7 @@
slowPath.link(&jit);
jit.setupArguments<decltype(convertToI32)>(GPRInfo::returnValueGPR);
- auto call = jit.call();
+ auto call = jit.call(NoPtrTag);
exceptionChecks.append(jit.emitJumpIfException(*vm));
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
@@ -581,7 +581,7 @@
notANumber.link(&jit);
jit.setupArguments<decltype(convertToF32)>(GPRInfo::returnValueGPR);
- auto call = jit.call();
+ auto call = jit.call(NoPtrTag);
exceptionChecks.append(jit.emitJumpIfException(*vm));
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
@@ -615,7 +615,7 @@
notANumber.link(&jit);
jit.setupArguments<decltype(convertToF64)>(GPRInfo::returnValueGPR);
- auto call = jit.call();
+ auto call = jit.call(NoPtrTag);
exceptionChecks.append(jit.emitJumpIfException(*vm));
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
@@ -634,7 +634,7 @@
exceptionChecks.link(&jit);
jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- auto call = jit.call();
+ auto call = jit.call(NoPtrTag);
jit.jumpToExceptionHandler(*vm);
void (*doUnwinding)(ExecState*) = [] (ExecState* exec) -> void {
@@ -659,7 +659,7 @@
CodeLocationNearCall hotPathOther = patchBuffer.locationOfNearCall(fastCall);
callLinkInfo->setCallLocations(callReturnLocation, hotPathBegin, hotPathOther);
- return FINALIZE_CODE(patchBuffer, "WebAssembly->JavaScript import[%i] %s", importIndex, signature.toString().ascii().data());
+ return FINALIZE_CODE(patchBuffer, NoPtrTag, "WebAssembly->JavaScript import[%i] %s", importIndex, signature.toString().ascii().data());
}
void* wasmToJSException(ExecState* exec, Wasm::ExceptionType type, Instance* wasmInstance)
diff --git a/Source/JavaScriptCore/yarr/YarrJIT.cpp b/Source/JavaScriptCore/yarr/YarrJIT.cpp
index 75bb880..683edcc 100644
--- a/Source/JavaScriptCore/yarr/YarrJIT.cpp
+++ b/Source/JavaScriptCore/yarr/YarrJIT.cpp
@@ -41,7 +41,7 @@
template<YarrJITCompileMode compileMode>
class YarrGenerator : private MacroAssembler {
- friend void jitCompile(VM*, YarrCodeBlock& jitObject, const String& pattern, unsigned& numSubpatterns, const char*& error, bool ignoreCase, bool multiline);
+ friend void jitCompile(VM*, YarrCodeBlock&, const String& pattern, unsigned& numSubpatterns, const char*& error, bool ignoreCase, bool multiline);
#if CPU(ARM)
static const RegisterID input = ARMRegisters::r0;
@@ -604,7 +604,7 @@
void loadFromFrameAndJump(unsigned frameLocation)
{
- jump(Address(stackPointerRegister, frameLocation * sizeof(void*)));
+ jump(Address(stackPointerRegister, frameLocation * sizeof(void*)), ptrTag(YarrBacktrackPtrTag, &m_codeBlock));
}
unsigned alignCallFrameSizeInBytes(unsigned callFrameSize)
@@ -927,11 +927,11 @@
}
// Called at the end of code generation to link all return addresses.
- void linkDataLabels(LinkBuffer& linkBuffer)
+ void linkDataLabels(LinkBuffer& linkBuffer, YarrCodeBlock& codeBlock)
{
ASSERT(isEmpty());
for (unsigned i = 0; i < m_backtrackRecords.size(); ++i)
- linkBuffer.patch(m_backtrackRecords[i].m_dataLabel, linkBuffer.locationOf(m_backtrackRecords[i].m_backtrackLocation));
+ linkBuffer.patch(m_backtrackRecords[i].m_dataLabel, linkBuffer.locationOf(m_backtrackRecords[i].m_backtrackLocation, ptrTag(YarrBacktrackPtrTag, &codeBlock)));
}
private:
@@ -3282,6 +3282,10 @@
m_tryReadUnicodeCharacterEntry = label();
+#if CPU(ARM64)
+ tagPtr(linkRegister, stackPointerRegister);
+#endif
+
tryReadUnicodeCharImpl(regT0);
ret();
@@ -3343,6 +3347,7 @@
loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), output);
#endif
#elif CPU(ARM64)
+ tagPtr(linkRegister, stackPointerRegister);
if (m_decodeSurrogatePairs) {
pushPair(framePointerRegister, linkRegister);
move(TrustedImm32(0x10000), supplementaryPlanesBase);
@@ -3417,9 +3422,10 @@
}
public:
- YarrGenerator(VM* vm, YarrPattern& pattern, YarrCharSize charSize)
+ YarrGenerator(VM* vm, YarrPattern& pattern, YarrCodeBlock& codeBlock, YarrCharSize charSize)
: m_vm(vm)
, m_pattern(pattern)
+ , m_codeBlock(codeBlock)
, m_charSize(charSize)
, m_decodeSurrogatePairs(m_charSize == Char16 && m_pattern.unicode())
, m_unicodeIgnoreCase(m_pattern.unicode() && m_pattern.ignoreCase())
@@ -3431,18 +3437,20 @@
{
}
- void compile(YarrCodeBlock& jitObject)
+ void compile()
{
+ YarrCodeBlock& codeBlock = m_codeBlock;
+
#ifndef JIT_UNICODE_EXPRESSIONS
if (m_decodeSurrogatePairs) {
- jitObject.setFallBackWithFailureReason(JITFailureReason::DecodeSurrogatePair);
+ codeBlock.setFallBackWithFailureReason(JITFailureReason::DecodeSurrogatePair);
return;
}
#endif
#if ENABLE(YARR_JIT_ALL_PARENS_EXPRESSIONS)
if (m_containsNestedSubpatterns)
- jitObject.setUsesPaternContextBuffer();
+ codeBlock.setUsesPaternContextBuffer();
#endif
// We need to compile before generating code since we set flags based on compilation that
@@ -3450,7 +3458,7 @@
opCompileBody(m_pattern.m_body);
if (m_failureReason) {
- jitObject.setFallBackWithFailureReason(*m_failureReason);
+ codeBlock.setFallBackWithFailureReason(*m_failureReason);
return;
}
@@ -3497,32 +3505,32 @@
LinkBuffer linkBuffer(*this, REGEXP_CODE_ID, JITCompilationCanFail);
if (linkBuffer.didFailToAllocate()) {
- jitObject.setFallBackWithFailureReason(JITFailureReason::ExecutableMemoryAllocationFailure);
+ codeBlock.setFallBackWithFailureReason(JITFailureReason::ExecutableMemoryAllocationFailure);
return;
}
if (!m_tryReadUnicodeCharacterCalls.isEmpty()) {
- CodeLocationLabel tryReadUnicodeCharacterHelper = linkBuffer.locationOf(m_tryReadUnicodeCharacterEntry);
+ CodeLocationLabel tryReadUnicodeCharacterHelper = linkBuffer.locationOf(m_tryReadUnicodeCharacterEntry, NearCallPtrTag);
for (auto call : m_tryReadUnicodeCharacterCalls)
linkBuffer.link(call, tryReadUnicodeCharacterHelper);
}
- m_backtrackingState.linkDataLabels(linkBuffer);
+ m_backtrackingState.linkDataLabels(linkBuffer, codeBlock);
if (compileMode == MatchOnly) {
if (m_charSize == Char8)
- jitObject.set8BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, "Match-only 8-bit regular expression"));
+ codeBlock.set8BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, ptrTag(YarrMatchOnly8BitPtrTag, &codeBlock), "Match-only 8-bit regular expression"));
else
- jitObject.set16BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, "Match-only 16-bit regular expression"));
+ codeBlock.set16BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, ptrTag(YarrMatchOnly16BitPtrTag, &codeBlock), "Match-only 16-bit regular expression"));
} else {
if (m_charSize == Char8)
- jitObject.set8BitCode(FINALIZE_CODE(linkBuffer, "8-bit regular expression"));
+ codeBlock.set8BitCode(FINALIZE_CODE(linkBuffer, ptrTag(Yarr8BitPtrTag, &codeBlock), "8-bit regular expression"));
else
- jitObject.set16BitCode(FINALIZE_CODE(linkBuffer, "16-bit regular expression"));
+ codeBlock.set16BitCode(FINALIZE_CODE(linkBuffer, ptrTag(Yarr16BitPtrTag, &codeBlock), "16-bit regular expression"));
}
if (m_failureReason)
- jitObject.setFallBackWithFailureReason(*m_failureReason);
+ codeBlock.setFallBackWithFailureReason(*m_failureReason);
}
private:
@@ -3530,6 +3538,7 @@
YarrPattern& m_pattern;
+ YarrCodeBlock& m_codeBlock;
YarrCharSize m_charSize;
// Used to detect regular expression constructs that are not currently
@@ -3591,14 +3600,14 @@
}
}
-void jitCompile(YarrPattern& pattern, YarrCharSize charSize, VM* vm, YarrCodeBlock& jitObject, YarrJITCompileMode mode)
+void jitCompile(YarrPattern& pattern, YarrCharSize charSize, VM* vm, YarrCodeBlock& codeBlock, YarrJITCompileMode mode)
{
if (mode == MatchOnly)
- YarrGenerator<MatchOnly>(vm, pattern, charSize).compile(jitObject);
+ YarrGenerator<MatchOnly>(vm, pattern, codeBlock, charSize).compile();
else
- YarrGenerator<IncludeSubpatterns>(vm, pattern, charSize).compile(jitObject);
+ YarrGenerator<IncludeSubpatterns>(vm, pattern, codeBlock, charSize).compile();
- if (auto failureReason = jitObject.failureReason()) {
+ if (auto failureReason = codeBlock.failureReason()) {
if (Options::dumpCompiledRegExpPatterns())
dumpCompileFailure(*failureReason);
}
diff --git a/Source/JavaScriptCore/yarr/YarrJIT.h b/Source/JavaScriptCore/yarr/YarrJIT.h
index 2475b63..dbb73c8 100644
--- a/Source/JavaScriptCore/yarr/YarrJIT.h
+++ b/Source/JavaScriptCore/yarr/YarrJIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -101,25 +101,25 @@
MatchResult execute(const LChar* input, unsigned start, unsigned length, int* output, void* freeParenContext, unsigned parenContextSize)
{
ASSERT(has8BitCode());
- return MatchResult(reinterpret_cast<YarrJITCode8>(m_ref8.code().executableAddress())(input, start, length, output, freeParenContext, parenContextSize));
+ return MatchResult(untagCFunctionPtr<YarrJITCode8>(m_ref8.code().executableAddress(), ptrTag(Yarr8BitPtrTag, this))(input, start, length, output, freeParenContext, parenContextSize));
}
MatchResult execute(const UChar* input, unsigned start, unsigned length, int* output, void* freeParenContext, unsigned parenContextSize)
{
ASSERT(has16BitCode());
- return MatchResult(reinterpret_cast<YarrJITCode16>(m_ref16.code().executableAddress())(input, start, length, output, freeParenContext, parenContextSize));
+ return MatchResult(untagCFunctionPtr<YarrJITCode16>(m_ref16.code().executableAddress(), ptrTag(Yarr16BitPtrTag, this))(input, start, length, output, freeParenContext, parenContextSize));
}
MatchResult execute(const LChar* input, unsigned start, unsigned length, void* freeParenContext, unsigned parenContextSize)
{
ASSERT(has8BitCodeMatchOnly());
- return MatchResult(reinterpret_cast<YarrJITCodeMatchOnly8>(m_matchOnly8.code().executableAddress())(input, start, length, 0, freeParenContext, parenContextSize));
+ return MatchResult(untagCFunctionPtr<YarrJITCodeMatchOnly8>(m_matchOnly8.code().executableAddress(), ptrTag(YarrMatchOnly8BitPtrTag, this))(input, start, length, 0, freeParenContext, parenContextSize));
}
MatchResult execute(const UChar* input, unsigned start, unsigned length, void* freeParenContext, unsigned parenContextSize)
{
ASSERT(has16BitCodeMatchOnly());
- return MatchResult(reinterpret_cast<YarrJITCodeMatchOnly16>(m_matchOnly16.code().executableAddress())(input, start, length, 0, freeParenContext, parenContextSize));
+ return MatchResult(untagCFunctionPtr<YarrJITCodeMatchOnly16>(m_matchOnly16.code().executableAddress(), ptrTag(YarrMatchOnly16BitPtrTag, this))(input, start, length, 0, freeParenContext, parenContextSize));
}
#else
MatchResult execute(const LChar* input, unsigned start, unsigned length, int* output)