Refactoring: make DFG::Plan a class.
https://bugs.webkit.org/show_bug.cgi?id=187968
Reviewed by Saam Barati.
This patch makes all the DFG::Plan fields private, and provide accessor methods
for them. This makes it easier to reason about how these fields are used and
modified.
* dfg/DFGAbstractInterpreterInlines.h:
(JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::handleCall):
(JSC::DFG::ByteCodeParser::handleVarargsCall):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::handleIntrinsicCall):
(JSC::DFG::ByteCodeParser::handleDOMJITGetter):
(JSC::DFG::ByteCodeParser::handleModuleNamespaceLoad):
(JSC::DFG::ByteCodeParser::handleGetById):
(JSC::DFG::ByteCodeParser::handlePutById):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
(JSC::DFG::ByteCodeParser::parseCodeBlock):
(JSC::DFG::ByteCodeParser::parse):
* dfg/DFGCFAPhase.cpp:
(JSC::DFG::CFAPhase::run):
(JSC::DFG::CFAPhase::injectOSR):
* dfg/DFGClobberize.h:
(JSC::DFG::clobberize):
* dfg/DFGCommonData.cpp:
(JSC::DFG::CommonData::notifyCompilingStructureTransition):
* dfg/DFGCommonData.h:
* dfg/DFGConstantFoldingPhase.cpp:
(JSC::DFG::ConstantFoldingPhase::foldConstants):
* dfg/DFGDriver.cpp:
(JSC::DFG::compileImpl):
* dfg/DFGFinalizer.h:
* dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::fixupNode):
(JSC::DFG::FixupPhase::fixupCompareStrictEqAndSameValue):
* dfg/DFGGraph.cpp:
(JSC::DFG::Graph::Graph):
(JSC::DFG::Graph::watchCondition):
(JSC::DFG::Graph::inferredTypeFor):
(JSC::DFG::Graph::requiredRegisterCountForExit):
(JSC::DFG::Graph::registerFrozenValues):
(JSC::DFG::Graph::registerStructure):
(JSC::DFG::Graph::registerAndWatchStructureTransition):
(JSC::DFG::Graph::assertIsRegistered):
* dfg/DFGGraph.h:
(JSC::DFG::Graph::compilation):
(JSC::DFG::Graph::identifiers):
(JSC::DFG::Graph::watchpoints):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::JITCompiler):
(JSC::DFG::JITCompiler::link):
(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):
(JSC::DFG::JITCompiler::disassemble):
* dfg/DFGJITCompiler.h:
(JSC::DFG::JITCompiler::addWeakReference):
* dfg/DFGJITFinalizer.cpp:
(JSC::DFG::JITFinalizer::finalize):
(JSC::DFG::JITFinalizer::finalizeFunction):
(JSC::DFG::JITFinalizer::finalizeCommon):
* dfg/DFGOSREntrypointCreationPhase.cpp:
(JSC::DFG::OSREntrypointCreationPhase::run):
* dfg/DFGPhase.cpp:
(JSC::DFG::Phase::beginPhase):
* dfg/DFGPhase.h:
(JSC::DFG::runAndLog):
* dfg/DFGPlan.cpp:
(JSC::DFG::Plan::Plan):
(JSC::DFG::Plan::computeCompileTimes const):
(JSC::DFG::Plan::reportCompileTimes const):
(JSC::DFG::Plan::compileInThread):
(JSC::DFG::Plan::compileInThreadImpl):
(JSC::DFG::Plan::isStillValid):
(JSC::DFG::Plan::reallyAdd):
(JSC::DFG::Plan::notifyCompiling):
(JSC::DFG::Plan::notifyReady):
(JSC::DFG::Plan::finalizeWithoutNotifyingCallback):
(JSC::DFG::Plan::finalizeAndNotifyCallback):
(JSC::DFG::Plan::key):
(JSC::DFG::Plan::checkLivenessAndVisitChildren):
(JSC::DFG::Plan::finalizeInGC):
(JSC::DFG::Plan::isKnownToBeLiveDuringGC):
(JSC::DFG::Plan::cancel):
(JSC::DFG::Plan::cleanMustHandleValuesIfNecessary):
* dfg/DFGPlan.h:
(JSC::DFG::Plan::canTierUpAndOSREnter const):
(JSC::DFG::Plan::vm const):
(JSC::DFG::Plan::codeBlock):
(JSC::DFG::Plan::mode const):
(JSC::DFG::Plan::osrEntryBytecodeIndex const):
(JSC::DFG::Plan::mustHandleValues const):
(JSC::DFG::Plan::threadData const):
(JSC::DFG::Plan::compilation const):
(JSC::DFG::Plan::finalizer const):
(JSC::DFG::Plan::setFinalizer):
(JSC::DFG::Plan::inlineCallFrames const):
(JSC::DFG::Plan::watchpoints):
(JSC::DFG::Plan::identifiers):
(JSC::DFG::Plan::weakReferences):
(JSC::DFG::Plan::transitions):
(JSC::DFG::Plan::recordedStatuses):
(JSC::DFG::Plan::willTryToTierUp const):
(JSC::DFG::Plan::setWillTryToTierUp):
(JSC::DFG::Plan::tierUpInLoopHierarchy):
(JSC::DFG::Plan::tierUpAndOSREnterBytecodes):
(JSC::DFG::Plan::stage const):
(JSC::DFG::Plan::callback const):
(JSC::DFG::Plan::setCallback):
* dfg/DFGPlanInlines.h:
(JSC::DFG::Plan::iterateCodeBlocksForGC):
* dfg/DFGPreciseLocalClobberize.h:
(JSC::DFG::PreciseLocalClobberizeAdaptor::readTop):
* dfg/DFGPredictionInjectionPhase.cpp:
(JSC::DFG::PredictionInjectionPhase::run):
* dfg/DFGSafepoint.cpp:
(JSC::DFG::Safepoint::Safepoint):
(JSC::DFG::Safepoint::~Safepoint):
(JSC::DFG::Safepoint::begin):
* dfg/DFGSafepoint.h:
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPointer):
(JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPoisonedPointer):
* dfg/DFGStackLayoutPhase.cpp:
(JSC::DFG::StackLayoutPhase::run):
* dfg/DFGStrengthReductionPhase.cpp:
(JSC::DFG::StrengthReductionPhase::handleNode):
* dfg/DFGTierUpCheckInjectionPhase.cpp:
(JSC::DFG::TierUpCheckInjectionPhase::run):
* dfg/DFGTypeCheckHoistingPhase.cpp:
(JSC::DFG::TypeCheckHoistingPhase::disableHoistingAcrossOSREntries):
* dfg/DFGWorklist.cpp:
(JSC::DFG::Worklist::isActiveForVM const):
(JSC::DFG::Worklist::compilationState):
(JSC::DFG::Worklist::waitUntilAllPlansForVMAreReady):
(JSC::DFG::Worklist::removeAllReadyPlansForVM):
(JSC::DFG::Worklist::completeAllReadyPlansForVM):
(JSC::DFG::Worklist::visitWeakReferences):
(JSC::DFG::Worklist::removeDeadPlans):
(JSC::DFG::Worklist::removeNonCompilingPlansForVM):
* dfg/DFGWorklistInlines.h:
(JSC::DFG::Worklist::iterateCodeBlocksForGC):
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLFail.cpp:
(JSC::FTL::fail):
* ftl/FTLJITFinalizer.cpp:
(JSC::FTL::JITFinalizer::finalizeCommon):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::compileMultiPutByOffset):
(JSC::FTL::DFG::LowerDFGToB3::buildExitArguments):
(JSC::FTL::DFG::LowerDFGToB3::addWeakReference):
* ftl/FTLState.cpp:
(JSC::FTL::State::State):
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@234178 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index e04639c..1296303 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,3 +1,166 @@
+2018-07-24 Mark Lam <mark.lam@apple.com>
+
+ Refactoring: make DFG::Plan a class.
+ https://bugs.webkit.org/show_bug.cgi?id=187968
+
+ Reviewed by Saam Barati.
+
+ This patch makes all the DFG::Plan fields private, and provide accessor methods
+ for them. This makes it easier to reason about how these fields are used and
+ modified.
+
+ * dfg/DFGAbstractInterpreterInlines.h:
+ (JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::handleCall):
+ (JSC::DFG::ByteCodeParser::handleVarargsCall):
+ (JSC::DFG::ByteCodeParser::handleInlining):
+ (JSC::DFG::ByteCodeParser::handleIntrinsicCall):
+ (JSC::DFG::ByteCodeParser::handleDOMJITGetter):
+ (JSC::DFG::ByteCodeParser::handleModuleNamespaceLoad):
+ (JSC::DFG::ByteCodeParser::handleGetById):
+ (JSC::DFG::ByteCodeParser::handlePutById):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ (JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
+ (JSC::DFG::ByteCodeParser::parseCodeBlock):
+ (JSC::DFG::ByteCodeParser::parse):
+ * dfg/DFGCFAPhase.cpp:
+ (JSC::DFG::CFAPhase::run):
+ (JSC::DFG::CFAPhase::injectOSR):
+ * dfg/DFGClobberize.h:
+ (JSC::DFG::clobberize):
+ * dfg/DFGCommonData.cpp:
+ (JSC::DFG::CommonData::notifyCompilingStructureTransition):
+ * dfg/DFGCommonData.h:
+ * dfg/DFGConstantFoldingPhase.cpp:
+ (JSC::DFG::ConstantFoldingPhase::foldConstants):
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compileImpl):
+ * dfg/DFGFinalizer.h:
+ * dfg/DFGFixupPhase.cpp:
+ (JSC::DFG::FixupPhase::fixupNode):
+ (JSC::DFG::FixupPhase::fixupCompareStrictEqAndSameValue):
+ * dfg/DFGGraph.cpp:
+ (JSC::DFG::Graph::Graph):
+ (JSC::DFG::Graph::watchCondition):
+ (JSC::DFG::Graph::inferredTypeFor):
+ (JSC::DFG::Graph::requiredRegisterCountForExit):
+ (JSC::DFG::Graph::registerFrozenValues):
+ (JSC::DFG::Graph::registerStructure):
+ (JSC::DFG::Graph::registerAndWatchStructureTransition):
+ (JSC::DFG::Graph::assertIsRegistered):
+ * dfg/DFGGraph.h:
+ (JSC::DFG::Graph::compilation):
+ (JSC::DFG::Graph::identifiers):
+ (JSC::DFG::Graph::watchpoints):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::JITCompiler):
+ (JSC::DFG::JITCompiler::link):
+ (JSC::DFG::JITCompiler::compile):
+ (JSC::DFG::JITCompiler::compileFunction):
+ (JSC::DFG::JITCompiler::disassemble):
+ * dfg/DFGJITCompiler.h:
+ (JSC::DFG::JITCompiler::addWeakReference):
+ * dfg/DFGJITFinalizer.cpp:
+ (JSC::DFG::JITFinalizer::finalize):
+ (JSC::DFG::JITFinalizer::finalizeFunction):
+ (JSC::DFG::JITFinalizer::finalizeCommon):
+ * dfg/DFGOSREntrypointCreationPhase.cpp:
+ (JSC::DFG::OSREntrypointCreationPhase::run):
+ * dfg/DFGPhase.cpp:
+ (JSC::DFG::Phase::beginPhase):
+ * dfg/DFGPhase.h:
+ (JSC::DFG::runAndLog):
+ * dfg/DFGPlan.cpp:
+ (JSC::DFG::Plan::Plan):
+ (JSC::DFG::Plan::computeCompileTimes const):
+ (JSC::DFG::Plan::reportCompileTimes const):
+ (JSC::DFG::Plan::compileInThread):
+ (JSC::DFG::Plan::compileInThreadImpl):
+ (JSC::DFG::Plan::isStillValid):
+ (JSC::DFG::Plan::reallyAdd):
+ (JSC::DFG::Plan::notifyCompiling):
+ (JSC::DFG::Plan::notifyReady):
+ (JSC::DFG::Plan::finalizeWithoutNotifyingCallback):
+ (JSC::DFG::Plan::finalizeAndNotifyCallback):
+ (JSC::DFG::Plan::key):
+ (JSC::DFG::Plan::checkLivenessAndVisitChildren):
+ (JSC::DFG::Plan::finalizeInGC):
+ (JSC::DFG::Plan::isKnownToBeLiveDuringGC):
+ (JSC::DFG::Plan::cancel):
+ (JSC::DFG::Plan::cleanMustHandleValuesIfNecessary):
+ * dfg/DFGPlan.h:
+ (JSC::DFG::Plan::canTierUpAndOSREnter const):
+ (JSC::DFG::Plan::vm const):
+ (JSC::DFG::Plan::codeBlock):
+ (JSC::DFG::Plan::mode const):
+ (JSC::DFG::Plan::osrEntryBytecodeIndex const):
+ (JSC::DFG::Plan::mustHandleValues const):
+ (JSC::DFG::Plan::threadData const):
+ (JSC::DFG::Plan::compilation const):
+ (JSC::DFG::Plan::finalizer const):
+ (JSC::DFG::Plan::setFinalizer):
+ (JSC::DFG::Plan::inlineCallFrames const):
+ (JSC::DFG::Plan::watchpoints):
+ (JSC::DFG::Plan::identifiers):
+ (JSC::DFG::Plan::weakReferences):
+ (JSC::DFG::Plan::transitions):
+ (JSC::DFG::Plan::recordedStatuses):
+ (JSC::DFG::Plan::willTryToTierUp const):
+ (JSC::DFG::Plan::setWillTryToTierUp):
+ (JSC::DFG::Plan::tierUpInLoopHierarchy):
+ (JSC::DFG::Plan::tierUpAndOSREnterBytecodes):
+ (JSC::DFG::Plan::stage const):
+ (JSC::DFG::Plan::callback const):
+ (JSC::DFG::Plan::setCallback):
+ * dfg/DFGPlanInlines.h:
+ (JSC::DFG::Plan::iterateCodeBlocksForGC):
+ * dfg/DFGPreciseLocalClobberize.h:
+ (JSC::DFG::PreciseLocalClobberizeAdaptor::readTop):
+ * dfg/DFGPredictionInjectionPhase.cpp:
+ (JSC::DFG::PredictionInjectionPhase::run):
+ * dfg/DFGSafepoint.cpp:
+ (JSC::DFG::Safepoint::Safepoint):
+ (JSC::DFG::Safepoint::~Safepoint):
+ (JSC::DFG::Safepoint::begin):
+ * dfg/DFGSafepoint.h:
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPointer):
+ (JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPoisonedPointer):
+ * dfg/DFGStackLayoutPhase.cpp:
+ (JSC::DFG::StackLayoutPhase::run):
+ * dfg/DFGStrengthReductionPhase.cpp:
+ (JSC::DFG::StrengthReductionPhase::handleNode):
+ * dfg/DFGTierUpCheckInjectionPhase.cpp:
+ (JSC::DFG::TierUpCheckInjectionPhase::run):
+ * dfg/DFGTypeCheckHoistingPhase.cpp:
+ (JSC::DFG::TypeCheckHoistingPhase::disableHoistingAcrossOSREntries):
+ * dfg/DFGWorklist.cpp:
+ (JSC::DFG::Worklist::isActiveForVM const):
+ (JSC::DFG::Worklist::compilationState):
+ (JSC::DFG::Worklist::waitUntilAllPlansForVMAreReady):
+ (JSC::DFG::Worklist::removeAllReadyPlansForVM):
+ (JSC::DFG::Worklist::completeAllReadyPlansForVM):
+ (JSC::DFG::Worklist::visitWeakReferences):
+ (JSC::DFG::Worklist::removeDeadPlans):
+ (JSC::DFG::Worklist::removeNonCompilingPlansForVM):
+ * dfg/DFGWorklistInlines.h:
+ (JSC::DFG::Worklist::iterateCodeBlocksForGC):
+ * ftl/FTLCompile.cpp:
+ (JSC::FTL::compile):
+ * ftl/FTLFail.cpp:
+ (JSC::FTL::fail):
+ * ftl/FTLJITFinalizer.cpp:
+ (JSC::FTL::JITFinalizer::finalizeCommon):
+ * ftl/FTLLink.cpp:
+ (JSC::FTL::link):
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::compileMultiPutByOffset):
+ (JSC::FTL::DFG::LowerDFGToB3::buildExitArguments):
+ (JSC::FTL::DFG::LowerDFGToB3::addWeakReference):
+ * ftl/FTLState.cpp:
+ (JSC::FTL::State::State):
+
2018-07-24 Saam Barati <sbarati@apple.com>
Make VM::canUseJIT an inlined function
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
index c048472..940faf6 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
@@ -3395,8 +3395,8 @@
newSet.merge(*m_graph.addStructureSet(variant.oldStructure()));
}
}
-
- if (status.numVariants() == 1 || isFTL(m_graph.m_plan.mode))
+
+ if (status.numVariants() == 1 || m_graph.m_plan.isFTL())
m_state.setFoundConstants(true);
didFoldClobberWorld();
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index e9c7fa5..54d7156 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -1238,8 +1238,8 @@
// If we have profiling information about this call, and it did not behave too polymorphically,
// we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
if (callLinkStatus.canOptimize()) {
- addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses.addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
-
+ addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
+
VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
@@ -1283,8 +1283,8 @@
VERBOSE_LOG(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
if (callLinkStatus.canOptimize()) {
- addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses.addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
-
+ addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
+
if (handleVarargsInlining(callTarget, result,
callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments),
firstVarArgOffset, op,
@@ -1915,7 +1915,7 @@
// the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
// we could improve that aspect of this by doing polymorphic inlining but having the profiling
// also.
- if (!isFTL(m_graph.m_plan.mode) || !Options::usePolymorphicCallInlining()) {
+ if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
return CallOptimizationResult::DidNothing;
}
@@ -2807,7 +2807,7 @@
case FTLTrueIntrinsic: {
insertChecks();
- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(isFTL(m_graph.m_plan.mode))));
+ set(VirtualRegister(resultOperand), jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
return true;
}
@@ -2821,7 +2821,7 @@
case IsFinalTierIntrinsic: {
insertChecks();
set(VirtualRegister(resultOperand),
- jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
+ jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
return true;
}
@@ -3167,7 +3167,7 @@
case CPUCpuidIntrinsic:
case CPUPauseIntrinsic: {
#if CPU(X86_64)
- if (!isFTL(m_graph.m_plan.mode))
+ if (!m_graph.m_plan.isFTL())
return false;
insertChecks();
set(VirtualRegister(resultOperand),
@@ -3335,7 +3335,7 @@
addToGraph(CheckSubClass, OpInfo(domAttribute.classInfo), thisNode);
bool wasSeenInJIT = true;
- addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), GetByIdStatus(GetByIdStatus::Custom, wasSeenInJIT, variant))), thisNode);
+ addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), GetByIdStatus(GetByIdStatus::Custom, wasSeenInJIT, variant))), thisNode);
CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add();
callDOMGetterData->customAccessorGetter = variant.customAccessorGetter();
@@ -3367,8 +3367,8 @@
if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
return false;
addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse));
-
- addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), getById)), base);
+
+ addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getById)), base);
// Ideally we wouldn't have to do this Phantom. But:
//
@@ -4029,16 +4029,16 @@
// GetByIdStatus. That means that the constant folder also needs to do the same!
if (getByIdStatus.numVariants() > 1) {
- if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
+ if (getByIdStatus.makesCalls() || !m_graph.m_plan.isFTL()
|| !Options::usePolymorphicAccessInlining()
|| getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
set(VirtualRegister(destinationOperand),
addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
return;
}
-
- addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
-
+
+ addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
+
Vector<MultiGetByOffsetCase, 2> cases;
// 1) Emit prototype structure checks for all chains. This could sort of maybe not be
@@ -4081,8 +4081,8 @@
return;
}
- addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
-
+ addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
+
ASSERT(getByIdStatus.numVariants() == 1);
GetByIdVariant variant = getByIdStatus[0];
@@ -4175,7 +4175,7 @@
}
if (putByIdStatus.numVariants() > 1) {
- if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
+ if (!m_graph.m_plan.isFTL() || putByIdStatus.makesCalls()
|| !Options::usePolymorphicAccessInlining()
|| putByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
@@ -4195,8 +4195,8 @@
if (UNLIKELY(m_graph.compilation()))
m_graph.compilation()->noticeInlinedPutById();
-
- addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
+ addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
for (const PutByIdVariant& variant : putByIdStatus.variants()) {
m_graph.registerInferredType(variant.requiredType());
@@ -4218,8 +4218,8 @@
switch (variant.kind()) {
case PutByIdVariant::Replace: {
- addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
-
+ addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
store(base, identifierNumber, variant, value);
if (UNLIKELY(m_graph.compilation()))
m_graph.compilation()->noticeInlinedPutById();
@@ -4227,8 +4227,8 @@
}
case PutByIdVariant::Transition: {
- addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
-
+ addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
if (!check(variant.conditionSet())) {
emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
@@ -4296,8 +4296,8 @@
}
case PutByIdVariant::Setter: {
- addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
-
+ addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant);
if (!loadedValue) {
emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
@@ -5648,7 +5648,7 @@
NEXT_OPCODE(op_catch);
}
- if (m_graph.m_plan.mode == FTLForOSREntryMode) {
+ if (m_graph.m_plan.mode() == FTLForOSREntryMode) {
NEXT_OPCODE(op_catch);
}
@@ -6495,8 +6495,8 @@
}
if (allOK) {
- addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addInByIdStatus(currentCodeOrigin(), status)), base);
-
+ addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base);
+
Node* match = addToGraph(MatchStructure, OpInfo(data), base);
set(VirtualRegister(currentInstruction[1].u.operand), match);
NEXT_OPCODE(op_in_by_id);
@@ -6699,8 +6699,8 @@
// Inline case.
ASSERT(codeBlock != byteCodeParser->m_codeBlock);
ASSERT(inlineCallFrameStart.isValid());
-
- m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add();
+
+ m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames()->add();
m_optimizedContext.inlineCallFrame = m_inlineCallFrame;
// The owner is the machine code block, and we already have a barrier on that when the
@@ -6776,7 +6776,7 @@
}
if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
- Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump();
+ Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump();
if (inlineCallFrame()) {
DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller);
deferredSourceDump.append(dump);
@@ -6870,7 +6870,7 @@
if (m_hasAnyForceOSRExits) {
BlockSet blocksToIgnore;
for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
- if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex) {
+ if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex()) {
blocksToIgnore.add(block);
break;
}
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
index a0002eb..fa9c1bc 100644
--- a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
@@ -57,8 +57,8 @@
ASSERT(m_graph.m_refCountState == EverythingIsLive);
m_count = 0;
-
- if (m_verbose && !shouldDumpGraphAtEachPhase(m_graph.m_plan.mode)) {
+
+ if (m_verbose && !shouldDumpGraphAtEachPhase(m_graph.m_plan.mode())) {
dataLog("Graph before CFA:\n");
m_graph.dump();
}
@@ -88,7 +88,7 @@
if (!block->isOSRTarget)
continue;
- if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
+ if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex())
continue;
// We record that the block needs some OSR stuff, but we don't do that yet. We want to
@@ -156,9 +156,10 @@
dataLog(" Found must-handle block: ", *block, "\n");
bool changed = false;
- for (size_t i = m_graph.m_plan.mustHandleValues.size(); i--;) {
- int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
- JSValue value = m_graph.m_plan.mustHandleValues[i];
+ const Operands<JSValue>& mustHandleValues = m_graph.m_plan.mustHandleValues();
+ for (size_t i = mustHandleValues.size(); i--;) {
+ int operand = mustHandleValues.operandForIndex(i);
+ JSValue value = mustHandleValues[i];
Node* node = block->variablesAtHead.operand(operand);
if (!node) {
if (m_verbose)
diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.h b/Source/JavaScriptCore/dfg/DFGClobberize.h
index 2b04fff..4563481 100644
--- a/Source/JavaScriptCore/dfg/DFGClobberize.h
+++ b/Source/JavaScriptCore/dfg/DFGClobberize.h
@@ -520,7 +520,7 @@
case PhantomClonedArguments:
// DFG backend requires that the locals that this reads are flushed. FTL backend can handle those
// locals being promoted.
- if (!isFTL(graph.m_plan.mode))
+ if (!graph.m_plan.isFTL())
read(Stack);
// Even though it's phantom, it still has the property that one can't be replaced with another.
diff --git a/Source/JavaScriptCore/dfg/DFGCommonData.cpp b/Source/JavaScriptCore/dfg/DFGCommonData.cpp
index b11aab1..59565b5 100644
--- a/Source/JavaScriptCore/dfg/DFGCommonData.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCommonData.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,7 @@
void CommonData::notifyCompilingStructureTransition(Plan& plan, CodeBlock* codeBlock, Node* node)
{
- plan.transitions.addLazily(
+ plan.transitions().addLazily(
codeBlock,
node->origin.semantic.codeOriginOwner(),
node->transition()->previous.get(),
diff --git a/Source/JavaScriptCore/dfg/DFGCommonData.h b/Source/JavaScriptCore/dfg/DFGCommonData.h
index f7dbe3e..cb30438 100644
--- a/Source/JavaScriptCore/dfg/DFGCommonData.h
+++ b/Source/JavaScriptCore/dfg/DFGCommonData.h
@@ -48,7 +48,7 @@
namespace DFG {
struct Node;
-struct Plan;
+class Plan;
// CommonData holds the set of data that both DFG and FTL code blocks need to know
// about themselves.
diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
index 108180d..fb1f6dc 100644
--- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
@@ -560,7 +560,7 @@
auto addFilterStatus = [&] () {
m_insertionSet.insertNode(
indexInBlock, SpecNone, FilterGetByIdStatus, node->origin,
- OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(node->origin.semantic, status)),
+ OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(node->origin.semantic, status)),
Edge(child));
};
@@ -570,8 +570,8 @@
changed = true;
break;
}
-
- if (!isFTL(m_graph.m_plan.mode))
+
+ if (!m_graph.m_plan.isFTL())
break;
addFilterStatus();
@@ -617,8 +617,8 @@
break;
ASSERT(status.numVariants());
-
- if (status.numVariants() > 1 && !isFTL(m_graph.m_plan.mode))
+
+ if (status.numVariants() > 1 && !m_graph.m_plan.isFTL())
break;
changed = true;
@@ -650,15 +650,15 @@
m_insertionSet.insertNode(
indexInBlock, SpecNone, FilterPutByIdStatus, node->origin,
- OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(node->origin.semantic, status)),
+ OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(node->origin.semantic, status)),
Edge(child));
if (status.numVariants() == 1) {
emitPutByOffset(indexInBlock, node, baseValue, status[0], identifierNumber);
break;
}
-
- ASSERT(isFTL(m_graph.m_plan.mode));
+
+ ASSERT(m_graph.m_plan.isFTL());
MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
data->variants = status.variants();
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp
index 0f6e0a4..e2a3457 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp
@@ -101,8 +101,8 @@
Ref<Plan> plan = adoptRef(
*new Plan(codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues));
-
- plan->callback = WTFMove(callback);
+
+ plan->setCallback(WTFMove(callback));
if (Options::useConcurrentJIT()) {
Worklist& worklist = ensureGlobalWorklistFor(mode);
if (logCompilationChanges(mode))
diff --git a/Source/JavaScriptCore/dfg/DFGFinalizer.h b/Source/JavaScriptCore/dfg/DFGFinalizer.h
index f28beba..80b9242 100644
--- a/Source/JavaScriptCore/dfg/DFGFinalizer.h
+++ b/Source/JavaScriptCore/dfg/DFGFinalizer.h
@@ -32,7 +32,7 @@
namespace JSC { namespace DFG {
-struct Plan;
+class Plan;
class Finalizer {
WTF_MAKE_NONCOPYABLE(Finalizer); WTF_MAKE_FAST_ALLOCATED;
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
index 2f0e23e..a9f0b25 100644
--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -907,7 +907,7 @@
// conversions.
if (!child->shouldSpeculateInt32()
&& !child->shouldSpeculateAnyInt()
- && !(child->shouldSpeculateNumberOrBoolean() && isFTL(m_graph.m_plan.mode)))
+ && !(child->shouldSpeculateNumberOrBoolean() && m_graph.m_plan.isFTL()))
badNews = true;
}
@@ -930,7 +930,7 @@
else if (child->shouldSpeculateAnyInt())
fixEdge<Int52RepUse>(child);
else {
- RELEASE_ASSERT(child->shouldSpeculateNumberOrBoolean() && isFTL(m_graph.m_plan.mode));
+ RELEASE_ASSERT(child->shouldSpeculateNumberOrBoolean() && m_graph.m_plan.isFTL());
fixDoubleOrBooleanEdge(child);
}
}
@@ -3497,7 +3497,7 @@
node->setOpAndDefaultFlags(CompareStrictEq);
return;
}
- if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 7) || isFTL(m_graph.m_plan.mode))) {
+ if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 7) || m_graph.m_plan.isFTL())) {
fixEdge<StringUse>(node->child1());
fixEdge<StringUse>(node->child2());
node->setOpAndDefaultFlags(CompareStrictEq);
@@ -3569,12 +3569,12 @@
node->setOpAndDefaultFlags(CompareStrictEq);
return;
}
- if (node->child1()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) {
+ if (node->child1()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || m_graph.m_plan.isFTL())) {
fixEdge<StringUse>(node->child1());
node->setOpAndDefaultFlags(CompareStrictEq);
return;
}
- if (node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) {
+ if (node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || m_graph.m_plan.isFTL())) {
fixEdge<StringUse>(node->child2());
node->setOpAndDefaultFlags(CompareStrictEq);
return;
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp
index ca4b0e2..d4483c6 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp
+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -70,7 +70,7 @@
Graph::Graph(VM& vm, Plan& plan)
: m_vm(vm)
, m_plan(plan)
- , m_codeBlock(m_plan.codeBlock)
+ , m_codeBlock(m_plan.codeBlock())
, m_profiledBlock(m_codeBlock->alternative())
, m_ssaCFG(std::make_unique<SSACFG>(*this))
, m_nextMachineLocal(0)
@@ -1022,14 +1022,15 @@
{
if (!key.isWatchable())
return false;
-
- m_plan.weakReferences.addLazily(key.object());
+
+ DesiredWeakReferences& weakReferences = m_plan.weakReferences();
+ weakReferences.addLazily(key.object());
if (key.hasPrototype())
- m_plan.weakReferences.addLazily(key.prototype());
+ weakReferences.addLazily(key.prototype());
if (key.hasRequiredValue())
- m_plan.weakReferences.addLazily(key.requiredValue());
-
- m_plan.watchpoints.addLazily(key);
+ weakReferences.addLazily(key.requiredValue());
+
+ m_plan.watchpoints().addLazily(key);
if (key.kind() == PropertyCondition::Presence)
m_safeToLoad.add(std::make_pair(key.object(), key.offset()));
@@ -1076,12 +1077,12 @@
m_inferredTypes.add(key, typeDescriptor);
- m_plan.weakReferences.addLazily(typeObject);
+ m_plan.weakReferences().addLazily(typeObject);
registerInferredType(typeDescriptor);
// Note that we may already be watching this desired inferred type, because multiple structures may
// point to the same InferredType instance.
- m_plan.watchpoints.addLazily(DesiredInferredType(typeObject, typeDescriptor));
+ m_plan.watchpoints().addLazily(DesiredInferredType(typeObject, typeDescriptor));
return typeDescriptor;
}
@@ -1226,7 +1227,7 @@
unsigned Graph::requiredRegisterCountForExit()
{
unsigned count = JIT::frameRegisterCountFor(m_profiledBlock);
- for (InlineCallFrameSet::iterator iter = m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
+ for (InlineCallFrameSet::iterator iter = m_plan.inlineCallFrames()->begin(); !!iter; ++iter) {
InlineCallFrame* inlineCallFrame = *iter;
CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
unsigned requiredCount = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + JIT::frameRegisterCountFor(codeBlock);
@@ -1426,11 +1427,11 @@
continue;
ASSERT(value->structure());
- ASSERT(m_plan.weakReferences.contains(value->structure()));
-
+ ASSERT(m_plan.weakReferences().contains(value->structure()));
+
switch (value->strength()) {
case WeakValue: {
- m_plan.weakReferences.addLazily(value->value().asCell());
+ m_plan.weakReferences().addLazily(value->value().asCell());
break;
}
case StrongValue: {
@@ -1503,8 +1504,8 @@
RegisteredStructure Graph::registerStructure(Structure* structure, StructureRegistrationResult& result)
{
- m_plan.weakReferences.addLazily(structure);
- if (m_plan.watchpoints.consider(structure))
+ m_plan.weakReferences().addLazily(structure);
+ if (m_plan.watchpoints().consider(structure))
result = StructureRegisteredAndWatched;
else
result = StructureRegisteredNormally;
@@ -1513,8 +1514,8 @@
void Graph::registerAndWatchStructureTransition(Structure* structure)
{
- m_plan.weakReferences.addLazily(structure);
- m_plan.watchpoints.addLazily(structure->transitionWatchpointSet());
+ m_plan.weakReferences().addLazily(structure);
+ m_plan.watchpoints().addLazily(structure->transitionWatchpointSet());
}
void Graph::assertIsRegistered(Structure* structure)
@@ -1522,9 +1523,9 @@
// It's convenient to be able to call this with a maybe-null structure.
if (!structure)
return;
-
- DFG_ASSERT(*this, nullptr, m_plan.weakReferences.contains(structure));
-
+
+ DFG_ASSERT(*this, nullptr, m_plan.weakReferences().contains(structure));
+
if (!structure->dfgShouldWatch())
return;
if (watchpoints().isWatched(structure->transitionWatchpointSet()))
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index 24f1653..2a09c0f 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -780,12 +780,12 @@
InlineWatchpointSet& set = globalObject->numberToStringWatchpoint();
return isWatchingGlobalObjectWatchpoint(globalObject, set);
}
-
- Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
-
- DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
- DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
-
+
+ Profiler::Compilation* compilation() { return m_plan.compilation(); }
+
+ DesiredIdentifiers& identifiers() { return m_plan.identifiers(); }
+ DesiredWatchpoints& watchpoints() { return m_plan.watchpoints(); }
+
// Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
// this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
// what's going on.
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 5fd50b8..4f06be7 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -59,8 +59,8 @@
if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler))
m_disassembler = std::make_unique<Disassembler>(dfg);
#if ENABLE(FTL_JIT)
- m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy);
- for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes)
+ m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy());
+ for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes())
m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
#endif
}
@@ -192,9 +192,9 @@
m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
- if (!m_graph.m_plan.inlineCallFrames->isEmpty())
- m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
-
+ if (!m_graph.m_plan.inlineCallFrames()->isEmpty())
+ m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames();
+
#if USE(JSVALUE32_64)
m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
#endif
@@ -406,7 +406,7 @@
auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ m_graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(m_graph.m_plan));
return;
}
@@ -417,9 +417,9 @@
codeBlock()->shrinkToFit(CodeBlock::LateShrink);
disassemble(*linkBuffer);
-
- m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
- m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer));
+
+ m_graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer)));
}
void JITCompiler::compileFunction()
@@ -511,7 +511,7 @@
// === Link ===
auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ m_graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(m_graph.m_plan));
return;
}
link(*linkBuffer);
@@ -527,8 +527,8 @@
MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = linkBuffer->locationOf<JSEntryPtrTag>(arityCheck);
- m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
- m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
+ m_graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck));
}
void JITCompiler::disassemble(LinkBuffer& linkBuffer)
@@ -537,9 +537,9 @@
m_disassembler->dump(linkBuffer);
linkBuffer.didAlreadyDisassemble();
}
-
- if (UNLIKELY(m_graph.m_plan.compilation))
- m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
+
+ if (UNLIKELY(m_graph.m_plan.compilation()))
+ m_disassembler->reportToProfiler(m_graph.m_plan.compilation(), linkBuffer);
}
#if USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index c17b032..0bd66c7 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -214,7 +214,7 @@
void addWeakReference(JSCell* target)
{
- m_graph.m_plan.weakReferences.addLazily(target);
+ m_graph.m_plan.weakReferences().addLazily(target);
}
void addWeakReferences(const StructureSet& structureSet)
diff --git a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
index e46196c..73326e2 100644
--- a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
@@ -56,11 +56,11 @@
bool JITFinalizer::finalize()
{
- MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data());
+ MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::DFGJIT)).data());
m_jitCode->initializeCodeRef(codeRef, codeRef.code());
-
- m_plan.codeBlock->setJITCode(m_jitCode.copyRef());
-
+
+ m_plan.codeBlock()->setJITCode(m_jitCode.copyRef());
+
finalizeCommon();
return true;
@@ -70,10 +70,10 @@
{
RELEASE_ASSERT(!m_withArityCheck.isEmptyValue());
m_jitCode->initializeCodeRef(
- FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data()),
+ FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::DFGJIT)).data()),
m_withArityCheck);
- m_plan.codeBlock->setJITCode(m_jitCode.copyRef());
-
+ m_plan.codeBlock()->setJITCode(m_jitCode.copyRef());
+
finalizeCommon();
return true;
@@ -82,18 +82,18 @@
void JITFinalizer::finalizeCommon()
{
// Some JIT finalizers may have added more constants. Shrink-to-fit those things now.
- m_plan.codeBlock->constants().shrinkToFit();
- m_plan.codeBlock->constantsSourceCodeRepresentation().shrinkToFit();
-
+ m_plan.codeBlock()->constants().shrinkToFit();
+ m_plan.codeBlock()->constantsSourceCodeRepresentation().shrinkToFit();
+
#if ENABLE(FTL_JIT)
- m_jitCode->optimizeAfterWarmUp(m_plan.codeBlock);
+ m_jitCode->optimizeAfterWarmUp(m_plan.codeBlock());
#endif // ENABLE(FTL_JIT)
-
- if (UNLIKELY(m_plan.compilation))
- m_plan.vm->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock, *m_plan.compilation);
-
- if (!m_plan.willTryToTierUp)
- m_plan.codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
+
+ if (UNLIKELY(m_plan.compilation()))
+ m_plan.vm()->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock(), *m_plan.compilation());
+
+ if (!m_plan.willTryToTierUp())
+ m_plan.codeBlock()->baselineVersion()->m_didFailFTLCompilation = true;
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
index fbdbe0a..ed52c0b 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,10 +47,10 @@
bool run()
{
- RELEASE_ASSERT(m_graph.m_plan.mode == FTLForOSREntryMode);
+ RELEASE_ASSERT(m_graph.m_plan.mode() == FTLForOSREntryMode);
RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
-
- unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex;
+
+ unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex();
RELEASE_ASSERT(bytecodeIndex);
RELEASE_ASSERT(bytecodeIndex != UINT_MAX);
diff --git a/Source/JavaScriptCore/dfg/DFGPhase.cpp b/Source/JavaScriptCore/dfg/DFGPhase.cpp
index b225531..41079e1 100644
--- a/Source/JavaScriptCore/dfg/DFGPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,8 +45,8 @@
m_graph.dump(out);
m_graphDumpBeforePhase = out.toCString();
}
-
- if (!shouldDumpGraphAtEachPhase(m_graph.m_plan.mode))
+
+ if (!shouldDumpGraphAtEachPhase(m_graph.m_plan.mode()))
return;
dataLog("Beginning DFG phase ", m_name, ".\n");
diff --git a/Source/JavaScriptCore/dfg/DFGPhase.h b/Source/JavaScriptCore/dfg/DFGPhase.h
index 863e068..f9c7050 100644
--- a/Source/JavaScriptCore/dfg/DFGPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGPhase.h
@@ -81,7 +81,7 @@
bool result = phase.run();
- if (result && logCompilationChanges(phase.graph().m_plan.mode))
+ if (result && logCompilationChanges(phase.graph().m_plan.mode()))
dataLogF("Phase %s changed the IR.\n", phase.name());
return result;
}
diff --git a/Source/JavaScriptCore/dfg/DFGPlan.cpp b/Source/JavaScriptCore/dfg/DFGPlan.cpp
index f122f6e..cde4860 100644
--- a/Source/JavaScriptCore/dfg/DFGPlan.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPlan.cpp
@@ -105,7 +105,7 @@
void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
{
GraphDumpMode modeForFinalValidate = DumpGraph;
- if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
+ if (verboseCompilationEnabled(graph.m_plan.mode()) || forceDump) {
dataLog(text, "\n");
graph.dump();
modeForFinalValidate = DontDumpGraph;
@@ -136,19 +136,19 @@
Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
CompilationMode mode, unsigned osrEntryBytecodeIndex,
const Operands<JSValue>& mustHandleValues)
- : vm(passedCodeBlock->vm())
- , codeBlock(passedCodeBlock)
- , profiledDFGCodeBlock(profiledDFGCodeBlock)
- , mode(mode)
- , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
- , mustHandleValues(mustHandleValues)
- , compilation(UNLIKELY(vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(vm->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
- , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
- , identifiers(codeBlock)
- , weakReferences(codeBlock)
- , stage(Preparing)
+ : m_vm(passedCodeBlock->vm())
+ , m_codeBlock(passedCodeBlock)
+ , m_profiledDFGCodeBlock(profiledDFGCodeBlock)
+ , m_mode(mode)
+ , m_osrEntryBytecodeIndex(osrEntryBytecodeIndex)
+ , m_mustHandleValues(mustHandleValues)
+ , m_compilation(UNLIKELY(m_vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
+ , m_inlineCallFrames(adoptRef(new InlineCallFrameSet()))
+ , m_identifiers(m_codeBlock)
+ , m_weakReferences(m_codeBlock)
+ , m_stage(Preparing)
{
- RELEASE_ASSERT(codeBlock->alternative()->jitCode());
+ RELEASE_ASSERT(m_codeBlock->alternative()->jitCode());
}
Plan::~Plan()
@@ -159,43 +159,43 @@
{
return reportCompileTimes()
|| Options::reportTotalCompileTimes()
- || (vm && vm->m_perBytecodeProfiler);
+ || (m_vm && m_vm->m_perBytecodeProfiler);
}
bool Plan::reportCompileTimes() const
{
return Options::reportCompileTimes()
|| Options::reportDFGCompileTimes()
- || (Options::reportFTLCompileTimes() && isFTL(mode));
+ || (Options::reportFTLCompileTimes() && isFTL());
}
void Plan::compileInThread(ThreadData* threadData)
{
- this->threadData = threadData;
-
+ m_threadData = threadData;
+
MonotonicTime before { };
CString codeBlockName;
if (UNLIKELY(computeCompileTimes()))
before = MonotonicTime::now();
if (UNLIKELY(reportCompileTimes()))
- codeBlockName = toCString(*codeBlock);
-
+ codeBlockName = toCString(*m_codeBlock);
+
CompilationScope compilationScope;
- if (logCompilationChanges(mode) || Options::logPhaseTimes())
- dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
+ if (logCompilationChanges(m_mode) || Options::logPhaseTimes())
+ dataLog("DFG(Plan) compiling ", *m_codeBlock, " with ", m_mode, ", number of instructions = ", m_codeBlock->instructionCount(), "\n");
CompilationPath path = compileInThreadImpl();
- RELEASE_ASSERT(path == CancelPath || finalizer);
- RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
-
+ RELEASE_ASSERT(path == CancelPath || m_finalizer);
+ RELEASE_ASSERT((path == CancelPath) == (m_stage == Cancelled));
+
MonotonicTime after { };
if (UNLIKELY(computeCompileTimes())) {
after = MonotonicTime::now();
if (Options::reportTotalCompileTimes()) {
- if (isFTL(mode)) {
+ if (isFTL()) {
totalFTLCompileTime += after - before;
totalFTLDFGCompileTime += m_timeBeforeFTL - before;
totalFTLB3CompileTime += after - m_timeBeforeFTL;
@@ -221,14 +221,14 @@
RELEASE_ASSERT_NOT_REACHED();
break;
}
- if (codeBlock) { // codeBlock will be null if the compilation was cancelled.
+ if (m_codeBlock) { // m_codeBlock will be null if the compilation was cancelled.
if (path == FTLPath)
- CODEBLOCK_LOG_EVENT(codeBlock, "ftlCompile", ("took ", (after - before).milliseconds(), " ms (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ") with ", pathName));
+ CODEBLOCK_LOG_EVENT(m_codeBlock, "ftlCompile", ("took ", (after - before).milliseconds(), " ms (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ") with ", pathName));
else
- CODEBLOCK_LOG_EVENT(codeBlock, "dfgCompile", ("took ", (after - before).milliseconds(), " ms with ", pathName));
+ CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgCompile", ("took ", (after - before).milliseconds(), " ms with ", pathName));
}
if (UNLIKELY(reportCompileTimes())) {
- dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", (after - before).milliseconds(), " ms");
+ dataLog("Optimized ", codeBlockName, " using ", m_mode, " with ", pathName, " into ", m_finalizer ? m_finalizer->codeSize() : 0, " bytes in ", (after - before).milliseconds(), " ms");
if (path == FTLPath)
dataLog(" (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ")");
dataLog(".\n");
@@ -238,17 +238,17 @@
Plan::CompilationPath Plan::compileInThreadImpl()
{
cleanMustHandleValuesIfNecessary();
-
- if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
+
+ if (verboseCompilationEnabled(m_mode) && m_osrEntryBytecodeIndex != UINT_MAX) {
dataLog("\n");
- dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
+ dataLog("Compiler must handle OSR entry from bc#", m_osrEntryBytecodeIndex, " with values: ", m_mustHandleValues, "\n");
dataLog("\n");
}
-
- Graph dfg(*vm, *this);
+
+ Graph dfg(*m_vm, *this);
parse(dfg);
- codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
+ m_codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
bool changed = false;
@@ -270,7 +270,7 @@
// in the CodeBlock. This is a good time to perform an early shrink, which is more
// powerful than a late one. It's safe to do so because we haven't generated any code
// that references any of the tables directly, yet.
- codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
+ m_codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
if (validationEnabled())
validate(dfg);
@@ -290,11 +290,11 @@
RUN_PHASE(performPredictionInjection);
RUN_PHASE(performStaticExecutionCountEstimation);
-
- if (mode == FTLForOSREntryMode) {
+
+ if (m_mode == FTLForOSREntryMode) {
bool result = performOSREntrypointCreation(dfg);
if (!result) {
- finalizer = std::make_unique<FailedFinalizer>(*this);
+ m_finalizer = std::make_unique<FailedFinalizer>(*this);
return FailPath;
}
RUN_PHASE(performCPSRethreading);
@@ -308,9 +308,9 @@
RUN_PHASE(performFixup);
RUN_PHASE(performInvalidationPointInjection);
RUN_PHASE(performTypeCheckHoisting);
-
+
dfg.m_fixpointState = FixpointNotConverged;
-
+
// For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
// many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
// small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
@@ -331,7 +331,7 @@
validate(dfg);
RUN_PHASE(performCPSRethreading);
- if (!isFTL(mode)) {
+ if (!isFTL()) {
// Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
// in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
// ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
@@ -365,10 +365,10 @@
dfg.ensureCPSNaturalLoops();
}
- switch (mode) {
+ switch (m_mode) {
case DFGMode: {
dfg.m_fixpointState = FixpointConverged;
-
+
RUN_PHASE(performTierUpCheckInjection);
RUN_PHASE(performFastStoreBarrierInsertion);
@@ -383,7 +383,7 @@
dumpAndVerifyGraph(dfg, "Graph after optimization:");
JITCompiler dataFlowJIT(dfg);
- if (codeBlock->codeType() == FunctionCode)
+ if (m_codeBlock->codeType() == FunctionCode)
dataFlowJIT.compileFunction();
else
dataFlowJIT.compile();
@@ -395,7 +395,7 @@
case FTLForOSREntryMode: {
#if ENABLE(FTL_JIT)
if (FTL::canCompile(dfg) == FTL::CannotCompile) {
- finalizer = std::make_unique<FailedFinalizer>(*this);
+ m_finalizer = std::make_unique<FailedFinalizer>(*this);
return FailPath;
}
@@ -454,11 +454,11 @@
RUN_PHASE(performCleanUp);
RUN_PHASE(performIntegerCheckCombining);
RUN_PHASE(performGlobalCSE);
-
+
// At this point we're not allowed to do any further code motion because our reasoning
// about code motion assumes that it's OK to insert GC points in random places.
dfg.m_fixpointState = FixpointConverged;
-
+
RUN_PHASE(performLivenessAnalysis);
RUN_PHASE(performCFA);
RUN_PHASE(performGlobalStoreBarrierInsertion);
@@ -473,11 +473,11 @@
RUN_PHASE(performWatchpointCollection);
if (FTL::canCompile(dfg) == FTL::CannotCompile) {
- finalizer = std::make_unique<FailedFinalizer>(*this);
+ m_finalizer = std::make_unique<FailedFinalizer>(*this);
return FailPath;
}
- dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode));
+ dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(m_mode));
// Flash a safepoint in case the GC wants some action.
Safepoint::Result safepointResult;
@@ -536,95 +536,95 @@
bool Plan::isStillValid()
{
- CodeBlock* replacement = codeBlock->replacement();
+ CodeBlock* replacement = m_codeBlock->replacement();
if (!replacement)
return false;
// FIXME: This is almost certainly not necessary. There's no way for the baseline
// code to be replaced during a compilation, except if we delete the plan, in which
// case we wouldn't be here.
// https://bugs.webkit.org/show_bug.cgi?id=132707
- if (codeBlock->alternative() != replacement->baselineVersion())
+ if (m_codeBlock->alternative() != replacement->baselineVersion())
return false;
- if (!watchpoints.areStillValid())
+ if (!m_watchpoints.areStillValid())
return false;
return true;
}
void Plan::reallyAdd(CommonData* commonData)
{
- watchpoints.reallyAdd(codeBlock, *commonData);
- identifiers.reallyAdd(*vm, commonData);
- weakReferences.reallyAdd(*vm, commonData);
- transitions.reallyAdd(*vm, commonData);
- commonData->recordedStatuses = WTFMove(recordedStatuses);
+ m_watchpoints.reallyAdd(m_codeBlock, *commonData);
+ m_identifiers.reallyAdd(*m_vm, commonData);
+ m_weakReferences.reallyAdd(*m_vm, commonData);
+ m_transitions.reallyAdd(*m_vm, commonData);
+ commonData->recordedStatuses = WTFMove(m_recordedStatuses);
}
void Plan::notifyCompiling()
{
- stage = Compiling;
+ m_stage = Compiling;
}
void Plan::notifyReady()
{
- callback->compilationDidBecomeReadyAsynchronously(codeBlock, profiledDFGCodeBlock);
- stage = Ready;
+ m_callback->compilationDidBecomeReadyAsynchronously(m_codeBlock, m_profiledDFGCodeBlock);
+ m_stage = Ready;
}
CompilationResult Plan::finalizeWithoutNotifyingCallback()
{
// We will establish new references from the code block to things. So, we need a barrier.
- vm->heap.writeBarrier(codeBlock);
-
+ m_vm->heap.writeBarrier(m_codeBlock);
+
if (!isStillValid()) {
- CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("invalidated"));
+ CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("invalidated"));
return CompilationInvalidated;
}
bool result;
- if (codeBlock->codeType() == FunctionCode)
- result = finalizer->finalizeFunction();
+ if (m_codeBlock->codeType() == FunctionCode)
+ result = m_finalizer->finalizeFunction();
else
- result = finalizer->finalize();
-
+ result = m_finalizer->finalize();
+
if (!result) {
- CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("failed"));
+ CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("failed"));
return CompilationFailed;
}
-
- reallyAdd(codeBlock->jitCode()->dfgCommon());
-
+
+ reallyAdd(m_codeBlock->jitCode()->dfgCommon());
+
if (validationEnabled()) {
TrackedReferences trackedReferences;
-
- for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
+
+ for (WriteBarrier<JSCell>& reference : m_codeBlock->jitCode()->dfgCommon()->weakReferences)
trackedReferences.add(reference.get());
- for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
+ for (WriteBarrier<Structure>& reference : m_codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
trackedReferences.add(reference.get());
- for (WriteBarrier<Unknown>& constant : codeBlock->constants())
+ for (WriteBarrier<Unknown>& constant : m_codeBlock->constants())
trackedReferences.add(constant.get());
- for (auto* inlineCallFrame : *inlineCallFrames) {
+ for (auto* inlineCallFrame : *m_inlineCallFrames) {
ASSERT(inlineCallFrame->baselineCodeBlock.get());
trackedReferences.add(inlineCallFrame->baselineCodeBlock.get());
}
-
+
// Check that any other references that we have anywhere in the JITCode are also
// tracked either strongly or weakly.
- codeBlock->jitCode()->validateReferences(trackedReferences);
+ m_codeBlock->jitCode()->validateReferences(trackedReferences);
}
-
- CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("succeeded"));
+
+ CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("succeeded"));
return CompilationSuccessful;
}
void Plan::finalizeAndNotifyCallback()
{
- callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
+ m_callback->compilationDidComplete(m_codeBlock, m_profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
}
CompilationKey Plan::key()
{
- return CompilationKey(codeBlock->alternative(), mode);
+ return CompilationKey(m_codeBlock->alternative(), m_mode);
}
void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
@@ -633,82 +633,82 @@
return;
cleanMustHandleValuesIfNecessary();
- for (unsigned i = mustHandleValues.size(); i--;)
- visitor.appendUnbarriered(mustHandleValues[i]);
-
- recordedStatuses.markIfCheap(visitor);
+ for (unsigned i = m_mustHandleValues.size(); i--;)
+ visitor.appendUnbarriered(m_mustHandleValues[i]);
- visitor.appendUnbarriered(codeBlock);
- visitor.appendUnbarriered(codeBlock->alternative());
- visitor.appendUnbarriered(profiledDFGCodeBlock);
+ m_recordedStatuses.markIfCheap(visitor);
- if (inlineCallFrames) {
- for (auto* inlineCallFrame : *inlineCallFrames) {
+ visitor.appendUnbarriered(m_codeBlock);
+ visitor.appendUnbarriered(m_codeBlock->alternative());
+ visitor.appendUnbarriered(m_profiledDFGCodeBlock);
+
+ if (m_inlineCallFrames) {
+ for (auto* inlineCallFrame : *m_inlineCallFrames) {
ASSERT(inlineCallFrame->baselineCodeBlock.get());
visitor.appendUnbarriered(inlineCallFrame->baselineCodeBlock.get());
}
}
- weakReferences.visitChildren(visitor);
- transitions.visitChildren(visitor);
+ m_weakReferences.visitChildren(visitor);
+ m_transitions.visitChildren(visitor);
}
void Plan::finalizeInGC()
{
- recordedStatuses.finalizeWithoutDeleting();
+ m_recordedStatuses.finalizeWithoutDeleting();
}
bool Plan::isKnownToBeLiveDuringGC()
{
- if (stage == Cancelled)
+ if (m_stage == Cancelled)
return false;
- if (!Heap::isMarked(codeBlock->ownerExecutable()))
+ if (!Heap::isMarked(m_codeBlock->ownerExecutable()))
return false;
- if (!Heap::isMarked(codeBlock->alternative()))
+ if (!Heap::isMarked(m_codeBlock->alternative()))
return false;
- if (!!profiledDFGCodeBlock && !Heap::isMarked(profiledDFGCodeBlock))
+ if (!!m_profiledDFGCodeBlock && !Heap::isMarked(m_profiledDFGCodeBlock))
return false;
return true;
}
void Plan::cancel()
{
- vm = nullptr;
- codeBlock = nullptr;
- profiledDFGCodeBlock = nullptr;
- mustHandleValues.clear();
- compilation = nullptr;
- finalizer = nullptr;
- inlineCallFrames = nullptr;
- watchpoints = DesiredWatchpoints();
- identifiers = DesiredIdentifiers();
- weakReferences = DesiredWeakReferences();
- transitions = DesiredTransitions();
- callback = nullptr;
- stage = Cancelled;
+ m_vm = nullptr;
+ m_codeBlock = nullptr;
+ m_profiledDFGCodeBlock = nullptr;
+ m_mustHandleValues.clear();
+ m_compilation = nullptr;
+ m_finalizer = nullptr;
+ m_inlineCallFrames = nullptr;
+ m_watchpoints = DesiredWatchpoints();
+ m_identifiers = DesiredIdentifiers();
+ m_weakReferences = DesiredWeakReferences();
+ m_transitions = DesiredTransitions();
+ m_callback = nullptr;
+ m_stage = Cancelled;
}
void Plan::cleanMustHandleValuesIfNecessary()
{
- LockHolder locker(mustHandleValueCleaningLock);
-
- if (!mustHandleValuesMayIncludeGarbage)
+ LockHolder locker(m_mustHandleValueCleaningLock);
+
+ if (!m_mustHandleValuesMayIncludeGarbage)
return;
-
- mustHandleValuesMayIncludeGarbage = false;
-
- if (!codeBlock)
+
+ m_mustHandleValuesMayIncludeGarbage = false;
+
+ if (!m_codeBlock)
return;
-
- if (!mustHandleValues.numberOfLocals())
+
+ if (!m_mustHandleValues.numberOfLocals())
return;
-
- CodeBlock* alternative = codeBlock->alternative();
- FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, osrEntryBytecodeIndex);
-
- for (unsigned local = mustHandleValues.numberOfLocals(); local--;) {
+
+ CodeBlock* alternative = m_codeBlock->alternative();
+ FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, m_osrEntryBytecodeIndex);
+
+ for (unsigned local = m_mustHandleValues.numberOfLocals(); local--;) {
if (!liveness[local])
- mustHandleValues.local(local) = jsUndefined();
+ m_mustHandleValues.local(local) = jsUndefined();
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGPlan.h b/Source/JavaScriptCore/dfg/DFGPlan.h
index ef4ab5c..3da11bd 100644
--- a/Source/JavaScriptCore/dfg/DFGPlan.h
+++ b/Source/JavaScriptCore/dfg/DFGPlan.h
@@ -51,7 +51,8 @@
#if ENABLE(DFG_JIT)
-struct Plan : public ThreadSafeRefCounted<Plan> {
+class Plan : public ThreadSafeRefCounted<Plan> {
+public:
Plan(
CodeBlock* codeBlockToCompile, CodeBlock* profiledDFGCodeBlock,
CompilationMode, unsigned osrEntryBytecodeIndex,
@@ -76,47 +77,43 @@
void finalizeInGC();
void cancel();
- bool canTierUpAndOSREnter() const { return !tierUpAndOSREnterBytecodes.isEmpty(); }
-
+ bool canTierUpAndOSREnter() const { return !m_tierUpAndOSREnterBytecodes.isEmpty(); }
+
void cleanMustHandleValuesIfNecessary();
-
- // Warning: pretty much all of the pointer fields in this object get nulled by cancel(). So, if
- // you're writing code that is callable on the cancel path, be sure to null check everything!
-
- VM* vm;
- // These can be raw pointers because we visit them during every GC in checkLivenessAndVisitChildren.
- CodeBlock* codeBlock;
- CodeBlock* profiledDFGCodeBlock;
+ VM* vm() const { return m_vm; }
- CompilationMode mode;
- const unsigned osrEntryBytecodeIndex;
- Operands<JSValue> mustHandleValues;
- bool mustHandleValuesMayIncludeGarbage { true };
- Lock mustHandleValueCleaningLock;
-
- ThreadData* threadData;
+ CodeBlock* codeBlock() { return m_codeBlock; }
- RefPtr<Profiler::Compilation> compilation;
+ bool isFTL() const { return DFG::isFTL(m_mode); }
+ CompilationMode mode() const { return m_mode; }
+ unsigned osrEntryBytecodeIndex() const { return m_osrEntryBytecodeIndex; }
+ const Operands<JSValue>& mustHandleValues() const { return m_mustHandleValues; }
- std::unique_ptr<Finalizer> finalizer;
-
- RefPtr<InlineCallFrameSet> inlineCallFrames;
- DesiredWatchpoints watchpoints;
- DesiredIdentifiers identifiers;
- DesiredWeakReferences weakReferences;
- DesiredTransitions transitions;
- RecordedStatuses recordedStatuses;
-
- bool willTryToTierUp { false };
+ ThreadData* threadData() const { return m_threadData; }
+ Profiler::Compilation* compilation() const { return m_compilation.get(); }
- HashMap<unsigned, Vector<unsigned>> tierUpInLoopHierarchy;
- Vector<unsigned> tierUpAndOSREnterBytecodes;
+ Finalizer* finalizer() const { return m_finalizer.get(); }
+ void setFinalizer(std::unique_ptr<Finalizer>&& finalizer) { m_finalizer = WTFMove(finalizer); }
+
+ RefPtr<InlineCallFrameSet> inlineCallFrames() const { return m_inlineCallFrames; }
+ DesiredWatchpoints& watchpoints() { return m_watchpoints; }
+ DesiredIdentifiers& identifiers() { return m_identifiers; }
+ DesiredWeakReferences& weakReferences() { return m_weakReferences; }
+ DesiredTransitions& transitions() { return m_transitions; }
+ RecordedStatuses& recordedStatuses() { return m_recordedStatuses; }
+
+ bool willTryToTierUp() const { return m_willTryToTierUp; }
+ void setWillTryToTierUp(bool willTryToTierUp) { m_willTryToTierUp = willTryToTierUp; }
+
+ HashMap<unsigned, Vector<unsigned>>& tierUpInLoopHierarchy() { return m_tierUpInLoopHierarchy; }
+ Vector<unsigned>& tierUpAndOSREnterBytecodes() { return m_tierUpAndOSREnterBytecodes; }
enum Stage { Preparing, Compiling, Ready, Cancelled };
- Stage stage;
+ Stage stage() const { return m_stage; }
- RefPtr<DeferredCompilationCallback> callback;
+ DeferredCompilationCallback* callback() const { return m_callback.get(); }
+ void setCallback(Ref<DeferredCompilationCallback>&& callback) { m_callback = WTFMove(callback); }
private:
bool computeCompileTimes() const;
@@ -128,6 +125,43 @@
bool isStillValid();
void reallyAdd(CommonData*);
+ // Warning: pretty much all of the pointer fields in this object get nulled by cancel(). So, if
+ // you're writing code that is callable on the cancel path, be sure to null check everything!
+
+ VM* m_vm;
+
+ // These can be raw pointers because we visit them during every GC in checkLivenessAndVisitChildren.
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_profiledDFGCodeBlock;
+
+ CompilationMode m_mode;
+ const unsigned m_osrEntryBytecodeIndex;
+ Operands<JSValue> m_mustHandleValues;
+ bool m_mustHandleValuesMayIncludeGarbage { true };
+ Lock m_mustHandleValueCleaningLock;
+
+ ThreadData* m_threadData;
+
+ RefPtr<Profiler::Compilation> m_compilation;
+
+ std::unique_ptr<Finalizer> m_finalizer;
+
+ RefPtr<InlineCallFrameSet> m_inlineCallFrames;
+ DesiredWatchpoints m_watchpoints;
+ DesiredIdentifiers m_identifiers;
+ DesiredWeakReferences m_weakReferences;
+ DesiredTransitions m_transitions;
+ RecordedStatuses m_recordedStatuses;
+
+ bool m_willTryToTierUp { false };
+
+ HashMap<unsigned, Vector<unsigned>> m_tierUpInLoopHierarchy;
+ Vector<unsigned> m_tierUpAndOSREnterBytecodes;
+
+ Stage m_stage;
+
+ RefPtr<DeferredCompilationCallback> m_callback;
+
MonotonicTime m_timeBeforeFTL;
};
diff --git a/Source/JavaScriptCore/dfg/DFGPlanInlines.h b/Source/JavaScriptCore/dfg/DFGPlanInlines.h
index 151f089..37cd046 100644
--- a/Source/JavaScriptCore/dfg/DFGPlanInlines.h
+++ b/Source/JavaScriptCore/dfg/DFGPlanInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -41,10 +41,10 @@
// an explicit barrier. So, we need to be pessimistic and assume that
// all our CodeBlocks must be visited during GC.
- func(codeBlock);
- func(codeBlock->alternative());
- if (profiledDFGCodeBlock)
- func(profiledDFGCodeBlock);
+ func(m_codeBlock);
+ func(m_codeBlock->alternative());
+ if (m_profiledDFGCodeBlock)
+ func(m_profiledDFGCodeBlock);
}
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h b/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h
index 0556640..0d8bffe 100644
--- a/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h
+++ b/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h
@@ -179,8 +179,8 @@
default:
break;
}
-
- if (isPhantomNode && isFTL(m_graph.m_plan.mode))
+
+ if (isPhantomNode && m_graph.m_plan.isFTL())
break;
if (isForwardingNode && m_node->hasArgumentsChild() && m_node->argumentsChild()
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
index 6ce864b..d0c720d 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -68,16 +68,17 @@
continue;
if (!block->isOSRTarget)
continue;
- if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
+ if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex())
continue;
- for (size_t i = 0; i < m_graph.m_plan.mustHandleValues.size(); ++i) {
- int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
+ const Operands<JSValue>& mustHandleValues = m_graph.m_plan.mustHandleValues();
+ for (size_t i = 0; i < mustHandleValues.size(); ++i) {
+ int operand = mustHandleValues.operandForIndex(i);
Node* node = block->variablesAtHead.operand(operand);
if (!node)
continue;
ASSERT(node->accessesStack(m_graph));
node->variableAccessData()->predict(
- speculationFromValue(m_graph.m_plan.mustHandleValues[i]));
+ speculationFromValue(mustHandleValues[i]));
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGSafepoint.cpp b/Source/JavaScriptCore/dfg/DFGSafepoint.cpp
index 948c057..0ef0318d 100644
--- a/Source/JavaScriptCore/dfg/DFGSafepoint.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSafepoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,7 +47,7 @@
}
Safepoint::Safepoint(Plan& plan, Result& result)
- : m_vm(plan.vm)
+ : m_vm(plan.vm())
, m_plan(plan)
, m_didCallBegin(false)
, m_result(result)
@@ -60,7 +60,7 @@
Safepoint::~Safepoint()
{
RELEASE_ASSERT(m_didCallBegin);
- if (ThreadData* data = m_plan.threadData) {
+ if (ThreadData* data = m_plan.threadData()) {
RELEASE_ASSERT(data->m_safepoint == this);
data->m_rightToRun.lock();
data->m_safepoint = nullptr;
@@ -77,7 +77,7 @@
{
RELEASE_ASSERT(!m_didCallBegin);
m_didCallBegin = true;
- if (ThreadData* data = m_plan.threadData) {
+ if (ThreadData* data = m_plan.threadData()) {
RELEASE_ASSERT(!data->m_safepoint);
data->m_safepoint = this;
data->m_rightToRun.unlockFairly();
diff --git a/Source/JavaScriptCore/dfg/DFGSafepoint.h b/Source/JavaScriptCore/dfg/DFGSafepoint.h
index f46ccfd..720d0e3 100644
--- a/Source/JavaScriptCore/dfg/DFGSafepoint.h
+++ b/Source/JavaScriptCore/dfg/DFGSafepoint.h
@@ -36,8 +36,8 @@
namespace DFG {
+class Plan;
class Scannable;
-struct Plan;
class Safepoint {
public:
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 65be2d1..8bb6a54 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -145,14 +145,14 @@
static TrustedImmPtr weakPointer(Graph& graph, JSCell* cell)
{
- graph.m_plan.weakReferences.addLazily(cell);
+ graph.m_plan.weakReferences().addLazily(cell);
return TrustedImmPtr(bitwise_cast<size_t>(cell));
}
template<typename Key>
static TrustedImmPtr weakPoisonedPointer(Graph& graph, JSCell* cell)
{
- graph.m_plan.weakReferences.addLazily(cell);
+ graph.m_plan.weakReferences().addLazily(cell);
return TrustedImmPtr(bitwise_cast<size_t>(cell) ^ Key::key());
}
diff --git a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
index 1aeec68..e864d45 100644
--- a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -103,8 +103,8 @@
}
}
}
-
- for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
+
+ for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames()->begin(); !!iter; ++iter) {
InlineCallFrame* inlineCallFrame = *iter;
if (inlineCallFrame->isVarargs()) {
diff --git a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
index 9ab4e53..4467d5f 100644
--- a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
@@ -922,9 +922,9 @@
Graph::parameterSlotsForArgCount(numAllocatedArgs));
}
}
-
- m_graph.m_plan.recordedStatuses.addCallLinkStatus(m_node->origin.semantic, CallLinkStatus(callVariant));
-
+
+ m_graph.m_plan.recordedStatuses().addCallLinkStatus(m_node->origin.semantic, CallLinkStatus(callVariant));
+
m_node->convertToDirectCall(m_graph.freeze(executable));
m_changed = true;
break;
diff --git a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
index a0c2e82..c5067ef 100644
--- a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -61,8 +61,8 @@
bool run()
{
- RELEASE_ASSERT(m_graph.m_plan.mode == DFGMode);
-
+ RELEASE_ASSERT(m_graph.m_plan.mode() == DFGMode);
+
if (!Options::useFTLJIT())
return false;
@@ -110,7 +110,7 @@
unsigned bytecodeIndex = origin.semantic.bytecodeIndex;
if (canOSREnter)
- m_graph.m_plan.tierUpAndOSREnterBytecodes.append(bytecodeIndex);
+ m_graph.m_plan.tierUpAndOSREnterBytecodes().append(bytecodeIndex);
if (const NaturalLoop* loop = naturalLoops.innerMostLoopOf(block)) {
LoopHintDescriptor descriptor;
@@ -147,9 +147,9 @@
}
if (!tierUpCandidates.isEmpty())
- m_graph.m_plan.tierUpInLoopHierarchy.add(entry.key, WTFMove(tierUpCandidates));
+ m_graph.m_plan.tierUpInLoopHierarchy().add(entry.key, WTFMove(tierUpCandidates));
}
- m_graph.m_plan.willTryToTierUp = true;
+ m_graph.m_plan.setWillTryToTierUp(true);
return true;
#else // ENABLE(FTL_JIT)
RELEASE_ASSERT_NOT_REACHED();
diff --git a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
index bacdc1d..9ff9e92 100644
--- a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -442,10 +442,11 @@
ASSERT(block->isReachable);
if (!block->isOSRTarget)
continue;
- if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
+ if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex())
continue;
- for (size_t i = 0; i < m_graph.m_plan.mustHandleValues.size(); ++i) {
- int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
+ const Operands<JSValue>& mustHandleValues = m_graph.m_plan.mustHandleValues();
+ for (size_t i = 0; i < mustHandleValues.size(); ++i) {
+ int operand = mustHandleValues.operandForIndex(i);
Node* node = block->variablesAtHead.operand(operand);
if (!node)
continue;
@@ -455,7 +456,7 @@
continue;
if (!TypeCheck::isValidToHoist(iter->value))
continue;
- JSValue value = m_graph.m_plan.mustHandleValues[i];
+ JSValue value = mustHandleValues[i];
if (!value || !value.isCell() || TypeCheck::isContravenedByValue(iter->value, value)) {
TypeCheck::disableHoisting(iter->value);
continue;
diff --git a/Source/JavaScriptCore/dfg/DFGWorklist.cpp b/Source/JavaScriptCore/dfg/DFGWorklist.cpp
index 68f6aac..b305b71 100644
--- a/Source/JavaScriptCore/dfg/DFGWorklist.cpp
+++ b/Source/JavaScriptCore/dfg/DFGWorklist.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -66,7 +66,7 @@
}
return PollResult::Stop;
}
- RELEASE_ASSERT(m_plan->stage == Plan::Preparing);
+ RELEASE_ASSERT(m_plan->stage() == Plan::Preparing);
m_worklist.m_numberOfActiveThreads++;
return PollResult::Work;
}
@@ -100,7 +100,7 @@
LockHolder locker(m_data.m_rightToRun);
{
LockHolder locker(*m_worklist.m_lock);
- if (m_plan->stage == Plan::Cancelled)
+ if (m_plan->stage() == Plan::Cancelled)
return WorkResult::Continue;
m_plan->notifyCompiling();
}
@@ -109,13 +109,13 @@
dataLog(m_worklist, ": Compiling ", m_plan->key(), " asynchronously\n");
// There's no way for the GC to be safepointing since we own rightToRun.
- if (m_plan->vm->heap.worldIsStopped()) {
+ if (m_plan->vm()->heap.worldIsStopped()) {
dataLog("Heap is stoped but here we are! (1)\n");
RELEASE_ASSERT_NOT_REACHED();
}
m_plan->compileInThread(&m_data);
- if (m_plan->stage != Plan::Cancelled) {
- if (m_plan->vm->heap.worldIsStopped()) {
+ if (m_plan->stage() != Plan::Cancelled) {
+ if (m_plan->vm()->heap.worldIsStopped()) {
dataLog("Heap is stopped but here we are! (2)\n");
RELEASE_ASSERT_NOT_REACHED();
}
@@ -123,7 +123,7 @@
{
LockHolder locker(*m_worklist.m_lock);
- if (m_plan->stage == Plan::Cancelled)
+ if (m_plan->stage() == Plan::Cancelled)
return WorkResult::Continue;
m_plan->notifyReady();
@@ -134,8 +134,8 @@
}
m_worklist.m_readyPlans.append(m_plan);
-
- RELEASE_ASSERT(!m_plan->vm->heap.worldIsStopped());
+
+ RELEASE_ASSERT(!m_plan->vm()->heap.worldIsStopped());
m_worklist.m_planCompiled.notifyAll();
}
@@ -223,7 +223,7 @@
LockHolder locker(*m_lock);
PlanMap::const_iterator end = m_plans.end();
for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
- if (iter->value->vm == &vm)
+ if (iter->value->vm() == &vm)
return true;
}
return false;
@@ -248,7 +248,7 @@
PlanMap::iterator iter = m_plans.find(key);
if (iter == m_plans.end())
return NotKnown;
- return iter->value->stage == Plan::Ready ? Compiled : Compiling;
+ return iter->value->stage() == Plan::Ready ? Compiled : Compiling;
}
void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
@@ -278,9 +278,9 @@
bool allAreCompiled = true;
PlanMap::iterator end = m_plans.end();
for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
- if (iter->value->vm != &vm)
+ if (iter->value->vm() != &vm)
continue;
- if (iter->value->stage != Plan::Ready) {
+ if (iter->value->stage() != Plan::Ready) {
allAreCompiled = false;
break;
}
@@ -299,9 +299,9 @@
LockHolder locker(*m_lock);
for (size_t i = 0; i < m_readyPlans.size(); ++i) {
RefPtr<Plan> plan = m_readyPlans[i];
- if (plan->vm != &vm)
+ if (plan->vm() != &vm)
continue;
- if (plan->stage != Plan::Ready)
+ if (plan->stage() != Plan::Ready)
continue;
myReadyPlans.append(plan);
m_readyPlans[i--] = m_readyPlans.last();
@@ -331,9 +331,9 @@
if (Options::verboseCompilationQueue())
dataLog(*this, ": Completing ", currentKey, "\n");
-
- RELEASE_ASSERT(plan->stage == Plan::Ready);
-
+
+ RELEASE_ASSERT(plan->stage() == Plan::Ready);
+
plan->finalizeAndNotifyCallback();
if (currentKey == requestedKey)
@@ -377,7 +377,7 @@
LockHolder locker(*m_lock);
for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
Plan* plan = iter->value.get();
- if (plan->vm != vm)
+ if (plan->vm() != vm)
continue;
plan->checkLivenessAndVisitChildren(visitor);
}
@@ -401,13 +401,13 @@
HashSet<CompilationKey> deadPlanKeys;
for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
Plan* plan = iter->value.get();
- if (plan->vm != &vm)
+ if (plan->vm() != &vm)
continue;
if (plan->isKnownToBeLiveDuringGC()) {
plan->finalizeInGC();
continue;
}
- RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
+ RELEASE_ASSERT(plan->stage() != Plan::Cancelled); // Should not be cancelled, yet.
ASSERT(!deadPlanKeys.contains(plan->key()));
deadPlanKeys.add(plan->key());
}
@@ -417,12 +417,12 @@
Deque<RefPtr<Plan>> newQueue;
while (!m_queue.isEmpty()) {
RefPtr<Plan> plan = m_queue.takeFirst();
- if (plan->stage != Plan::Cancelled)
+ if (plan->stage() != Plan::Cancelled)
newQueue.append(plan);
}
m_queue.swap(newQueue);
for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
- if (m_readyPlans[i]->stage != Plan::Cancelled)
+ if (m_readyPlans[i]->stage() != Plan::Cancelled)
continue;
m_readyPlans[i--] = m_readyPlans.last();
m_readyPlans.removeLast();
@@ -451,9 +451,9 @@
Vector<RefPtr<Plan>> deadPlans;
for (auto& entry : m_plans) {
Plan* plan = entry.value.get();
- if (plan->vm != &vm)
+ if (plan->vm() != &vm)
continue;
- if (plan->stage == Plan::Compiling)
+ if (plan->stage() == Plan::Compiling)
continue;
deadPlanKeys.add(plan->key());
deadPlans.append(plan);
diff --git a/Source/JavaScriptCore/dfg/DFGWorklistInlines.h b/Source/JavaScriptCore/dfg/DFGWorklistInlines.h
index 8a5bfec..d264711 100644
--- a/Source/JavaScriptCore/dfg/DFGWorklistInlines.h
+++ b/Source/JavaScriptCore/dfg/DFGWorklistInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,7 +47,7 @@
LockHolder locker(*m_lock);
for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
Plan* plan = iter->value.get();
- if (plan->vm != &vm)
+ if (plan->vm() != &vm)
continue;
plan->iterateCodeBlocksForGC(func);
}
diff --git a/Source/JavaScriptCore/ftl/FTLCompile.cpp b/Source/JavaScriptCore/ftl/FTLCompile.cpp
index ca87da3..f8c6ee6 100644
--- a/Source/JavaScriptCore/ftl/FTLCompile.cpp
+++ b/Source/JavaScriptCore/ftl/FTLCompile.cpp
@@ -168,7 +168,7 @@
if (B3::Air::Disassembler* disassembler = state.proc->code().disassembler()) {
PrintStream& out = WTF::dataFile();
- out.print("Generated ", state.graph.m_plan.mode, " code for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ", instruction count = ", state.graph.m_codeBlock->instructionCount(), ":\n");
+ out.print("Generated ", state.graph.m_plan.mode(), " code for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ", instruction count = ", state.graph.m_codeBlock->instructionCount(), ":\n");
LinkBuffer& linkBuffer = *state.finalizer->b3CodeLinkBuffer;
B3::Value* currentB3Value = nullptr;
diff --git a/Source/JavaScriptCore/ftl/FTLFail.cpp b/Source/JavaScriptCore/ftl/FTLFail.cpp
index 5c6426a..1653350 100644
--- a/Source/JavaScriptCore/ftl/FTLFail.cpp
+++ b/Source/JavaScriptCore/ftl/FTLFail.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,7 +37,7 @@
void fail(State& state)
{
- state.graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(state.graph.m_plan);
+ state.graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(state.graph.m_plan));
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
index 0919f0b..7643eda 100644
--- a/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
+++ b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
@@ -75,21 +75,21 @@
MacroAssemblerCodeRef<JSEntryPtrTag> b3CodeRef =
FINALIZE_CODE_IF(dumpDisassembly, *b3CodeLinkBuffer, JSEntryPtrTag,
- "FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data());
+ "FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::FTLJIT)).data());
MacroAssemblerCodeRef<JSEntryPtrTag> arityCheckCodeRef = entrypointLinkBuffer
? FINALIZE_CODE_IF(dumpDisassembly, *entrypointLinkBuffer, JSEntryPtrTag,
- "FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data(), function)
+ "FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::FTLJIT)).data(), function)
: MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(b3CodeRef.code());
jitCode->initializeB3Code(b3CodeRef);
jitCode->initializeArityCheckEntrypoint(arityCheckCodeRef);
- m_plan.codeBlock->setJITCode(*jitCode);
+ m_plan.codeBlock()->setJITCode(*jitCode);
- if (UNLIKELY(m_plan.compilation))
- m_plan.vm->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock, *m_plan.compilation);
-
+ if (UNLIKELY(m_plan.compilation()))
+ m_plan.vm()->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock(), *m_plan.compilation());
+
return true;
}
diff --git a/Source/JavaScriptCore/ftl/FTLLink.cpp b/Source/JavaScriptCore/ftl/FTLLink.cpp
index 174fce9..3fca11e 100644
--- a/Source/JavaScriptCore/ftl/FTLLink.cpp
+++ b/Source/JavaScriptCore/ftl/FTLLink.cpp
@@ -53,9 +53,9 @@
state.jitCode->common.requiredRegisterCountForExit = graph.requiredRegisterCountForExit();
- if (!graph.m_plan.inlineCallFrames->isEmpty())
- state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames;
-
+ if (!graph.m_plan.inlineCallFrames()->isEmpty())
+ state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames();
+
graph.registerFrozenValues();
// Create the entrypoint. Note that we use this entrypoint totally differently
@@ -125,8 +125,8 @@
state.jitCode->common.compilation = compilation;
}
-
- switch (graph.m_plan.mode) {
+
+ switch (graph.m_plan.mode()) {
case FTLMode: {
bool requiresArityFixup = codeBlock->numParameters() != 1;
if (codeBlock->codeType() == FunctionCode && requiresArityFixup) {
diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
index 3ba87e1..761687b 100644
--- a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
+++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
@@ -6669,7 +6669,7 @@
storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
} else {
DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition, variant.kind());
- m_graph.m_plan.transitions.addLazily(
+ m_graph.m_plan.transitions().addLazily(
codeBlock(), m_node->origin.semantic.codeOriginOwner(),
variant.oldStructureForTransition(), variant.newStructure());
@@ -16108,8 +16108,8 @@
Availability availability = availabilityMap.m_locals[i];
if (Options::validateFTLOSRExitLiveness()
- && m_graph.m_plan.mode != FTLForOSREntryMode) {
-
+ && m_graph.m_plan.mode() != FTLForOSREntryMode) {
+
if (availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))
DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", VirtualRegister(operand), ", availability = ", availability, ", origin = ", exitOrigin).data());
}
@@ -16384,7 +16384,7 @@
void addWeakReference(JSCell* target)
{
- m_graph.m_plan.weakReferences.addLazily(target);
+ m_graph.m_plan.weakReferences().addLazily(target);
}
LValue loadStructure(LValue value)
diff --git a/Source/JavaScriptCore/ftl/FTLOutput.h b/Source/JavaScriptCore/ftl/FTLOutput.h
index 588edd8..9f59369 100644
--- a/Source/JavaScriptCore/ftl/FTLOutput.h
+++ b/Source/JavaScriptCore/ftl/FTLOutput.h
@@ -109,7 +109,7 @@
LValue weakPointer(DFG::Graph& graph, JSCell* cell)
{
- ASSERT(graph.m_plan.weakReferences.contains(cell));
+ ASSERT(graph.m_plan.weakReferences().contains(cell));
return constIntPtr(bitwise_cast<intptr_t>(cell));
}
@@ -117,7 +117,7 @@
template<typename Key>
LValue weakPoisonedPointer(DFG::Graph& graph, JSCell* cell)
{
- ASSERT(graph.m_plan.weakReferences.contains(cell));
+ ASSERT(graph.m_plan.weakReferences().contains(cell));
return constIntPtr(bitwise_cast<intptr_t>(cell) ^ Key::key());
}
diff --git a/Source/JavaScriptCore/ftl/FTLState.cpp b/Source/JavaScriptCore/ftl/FTLState.cpp
index 9670129..653ec6f 100644
--- a/Source/JavaScriptCore/ftl/FTLState.cpp
+++ b/Source/JavaScriptCore/ftl/FTLState.cpp
@@ -43,7 +43,7 @@
State::State(Graph& graph)
: graph(graph)
{
- switch (graph.m_plan.mode) {
+ switch (graph.m_plan.mode()) {
case FTLMode: {
jitCode = adoptRef(new JITCode());
break;
@@ -51,7 +51,7 @@
case FTLForOSREntryMode: {
RefPtr<ForOSREntryJITCode> code = adoptRef(new ForOSREntryJITCode());
code->initializeEntryBuffer(graph.m_vm, graph.m_profiledBlock->numCalleeLocals());
- code->setBytecodeIndex(graph.m_plan.osrEntryBytecodeIndex);
+ code->setBytecodeIndex(graph.m_plan.osrEntryBytecodeIndex());
jitCode = code;
break;
}
@@ -60,8 +60,8 @@
break;
}
- graph.m_plan.finalizer = std::make_unique<JITFinalizer>(graph.m_plan);
- finalizer = static_cast<JITFinalizer*>(graph.m_plan.finalizer.get());
+ graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>(graph.m_plan));
+ finalizer = static_cast<JITFinalizer*>(graph.m_plan.finalizer());
proc = std::make_unique<Procedure>();