CodeBlock::jettison() should be implicit
https://bugs.webkit.org/show_bug.cgi?id=120567
Reviewed by Oliver Hunt.
This is a risky change from a performance standpoint, but I believe it's
necessary. This makes all CodeBlocks get swept by GC. Nobody but the GC
can delete CodeBlocks because the GC always holds a reference to them.
Once a CodeBlock reaches just one reference (i.e. the one from the GC)
then the GC will free it only if it's not on the stack.
This allows me to get rid of the jettisoning logic. We need this for FTL
tier-up. Well; we don't need it, but it will help prevent a lot of bugs.
Previously, if you wanted to to replace one code block with another, you
had to remember to tell the GC that the previous code block is
"jettisoned". We would need to do this when tiering up from DFG to FTL
and when dealing with DFG-to-FTL OSR entry code blocks. There are a lot
of permutations here - tiering up to the FTL, OSR entering into the FTL,
deciding that an OSR entry code block is not relevant anymore - just to
name a few. In each of these cases we'd have to jettison the previous
code block. It smells like a huge source of future bugs.
So I made jettisoning implicit by making the GC always watch out for a
CodeBlock being owned solely by the GC.
This change is performance neutral.
* CMakeLists.txt:
* GNUmakefile.list.am:
* JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
* JavaScriptCore.xcodeproj/project.pbxproj:
* Target.pri:
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::CodeBlock):
(JSC::CodeBlock::~CodeBlock):
(JSC::CodeBlock::visitAggregate):
(JSC::CodeBlock::jettison):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::setJITCode):
(JSC::CodeBlock::shouldImmediatelyAssumeLivenessDuringScan):
(JSC::CodeBlockSet::mark):
* dfg/DFGCommonData.h:
(JSC::DFG::CommonData::CommonData):
* heap/CodeBlockSet.cpp: Added.
(JSC::CodeBlockSet::CodeBlockSet):
(JSC::CodeBlockSet::~CodeBlockSet):
(JSC::CodeBlockSet::add):
(JSC::CodeBlockSet::clearMarks):
(JSC::CodeBlockSet::deleteUnmarkedAndUnreferenced):
(JSC::CodeBlockSet::traceMarked):
* heap/CodeBlockSet.h: Added.
* heap/ConservativeRoots.cpp:
(JSC::ConservativeRoots::add):
* heap/ConservativeRoots.h:
* heap/DFGCodeBlocks.cpp: Removed.
* heap/DFGCodeBlocks.h: Removed.
* heap/Heap.cpp:
(JSC::Heap::markRoots):
(JSC::Heap::deleteAllCompiledCode):
(JSC::Heap::deleteUnmarkedCompiledCode):
* heap/Heap.h:
* interpreter/JSStack.cpp:
(JSC::JSStack::gatherConservativeRoots):
* interpreter/JSStack.h:
* runtime/Executable.cpp:
(JSC::ScriptExecutable::installCode):
* runtime/Executable.h:
* runtime/VM.h:
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@154986 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index a2aecc7..52f7552 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1504,6 +1504,7 @@
, m_capabilityLevelState(DFG::CapabilityLevelNotSet)
#endif
{
+ ASSERT(m_heap->isDeferred());
setNumParameters(other.numParameters());
optimizeAfterWarmUp();
jitAfterWarmUp();
@@ -1516,6 +1517,9 @@
m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
}
+
+ m_heap->m_codeBlocks.add(this);
+ m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
}
CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
@@ -1544,7 +1548,7 @@
, m_capabilityLevelState(DFG::CapabilityLevelNotSet)
#endif
{
- m_vm->startedCompiling(this);
+ ASSERT(m_heap->isDeferred());
ASSERT(m_source);
setNumParameters(unlinkedCodeBlock->numParameters());
@@ -1842,7 +1846,8 @@
if (Options::dumpGeneratedBytecodes())
dumpBytecode();
- m_vm->finishedCompiling(this);
+ m_heap->m_codeBlocks.add(this);
+ m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
}
CodeBlock::~CodeBlock()
@@ -1850,12 +1855,6 @@
if (m_vm->m_perBytecodeProfiler)
m_vm->m_perBytecodeProfiler->notifyDestruction(this);
-#if ENABLE(DFG_JIT)
- // Remove myself from the set of DFG code blocks. Note that I may not be in this set
- // (because I'm not a DFG code block), in which case this is a no-op anyway.
- m_vm->heap.m_dfgCodeBlocks.m_set.remove(this);
-#endif
-
#if ENABLE(VERBOSE_VALUE_PROFILE)
dumpValueProfiles();
#endif
@@ -1905,33 +1904,29 @@
void CodeBlock::visitAggregate(SlotVisitor& visitor)
{
-#if ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
- if (JITCode::isOptimizingJIT(jitType())) {
- DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
-
- // I may be asked to scan myself more than once, and it may even happen concurrently.
- // To this end, use a CAS loop to check if I've been called already. Only one thread
- // may proceed past this point - whichever one wins the CAS race.
- unsigned oldValue;
- do {
- oldValue = dfgCommon->visitAggregateHasBeenCalled;
- if (oldValue) {
- // Looks like someone else won! Return immediately to ensure that we don't
- // trace the same CodeBlock concurrently. Doing so is hazardous since we will
- // be mutating the state of ValueProfiles, which contain JSValues, which can
- // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
- // that are nearly impossible to track down.
-
- // Also note that it must be safe to return early as soon as we see the
- // value true (well, (unsigned)1), since once a GC thread is in this method
- // and has won the CAS race (i.e. was responsible for setting the value true)
- // it will definitely complete the rest of this method before declaring
- // termination.
- return;
- }
- } while (!WTF::weakCompareAndSwap(&dfgCommon->visitAggregateHasBeenCalled, 0, 1));
- }
-#endif // ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
+#if ENABLE(PARALLEL_GC)
+ // I may be asked to scan myself more than once, and it may even happen concurrently.
+ // To this end, use a CAS loop to check if I've been called already. Only one thread
+ // may proceed past this point - whichever one wins the CAS race.
+ unsigned oldValue;
+ do {
+ oldValue = m_visitAggregateHasBeenCalled;
+ if (oldValue) {
+ // Looks like someone else won! Return immediately to ensure that we don't
+ // trace the same CodeBlock concurrently. Doing so is hazardous since we will
+ // be mutating the state of ValueProfiles, which contain JSValues, which can
+ // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
+ // that are nearly impossible to track down.
+
+ // Also note that it must be safe to return early as soon as we see the
+ // value true (well, (unsigned)1), since once a GC thread is in this method
+ // and has won the CAS race (i.e. was responsible for setting the value true)
+ // it will definitely complete the rest of this method before declaring
+ // termination.
+ return;
+ }
+ } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
+#endif // ENABLE(PARALLEL_GC)
if (!!m_alternative)
m_alternative->visitAggregate(visitor);
@@ -2740,28 +2735,14 @@
void CodeBlock::jettison()
{
+ DeferGC deferGC(*m_heap);
ASSERT(JITCode::isOptimizingJIT(jitType()));
ASSERT(this == replacement());
alternative()->optimizeAfterWarmUp();
tallyFrequentExitSites();
if (DFG::shouldShowDisassembly())
dataLog("Jettisoning ", *this, ".\n");
- jettisonImpl();
-}
-
-void ProgramCodeBlock::jettisonImpl()
-{
- static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
-}
-
-void EvalCodeBlock::jettisonImpl()
-{
- static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
-}
-
-void FunctionCodeBlock::jettisonImpl()
-{
- static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*vm(), m_isConstructor ? CodeForConstruct : CodeForCall);
+ alternative()->install();
}
#endif