Use dataLogIf more regularly
https://bugs.webkit.org/show_bug.cgi?id=206332
Reviewed by Keith Miller.
Source/JavaScriptCore:
There is lots of code that reads
if (Options::foobar())
dataLogLn("...")
There are a couple of benefits to replacing those by dataLogLnIf(Options::foobar(), "..."):
- Readability, by reducing the number of lines taken by logging
- Less lines appearing as not-taken in test coverage wrongly (wrongly because we probably don't care for the coverage of logging code)
- possibly a tiny perf benefit since dataLogIf correctly uses UNLIKELY.
This patch is a fairly trivial refactoring where I looked for that pattern and replaced it everywhere it appeared in JSC.
* bytecode/BytecodeGeneratorification.cpp:
(JSC::performGeneratorification):
* bytecode/BytecodeLivenessAnalysis.cpp:
(JSC::BytecodeLivenessAnalysis::BytecodeLivenessAnalysis):
* bytecode/CallLinkInfo.cpp:
(JSC::CallLinkInfo::visitWeak):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::finalizeLLIntInlineCaches):
(JSC::CodeBlock::noticeIncomingCall):
(JSC::CodeBlock::optimizationThresholdScalingFactor):
(JSC::CodeBlock::optimizeNextInvocation):
(JSC::CodeBlock::dontOptimizeAnytimeSoon):
(JSC::CodeBlock::optimizeAfterWarmUp):
(JSC::CodeBlock::optimizeAfterLongWarmUp):
(JSC::CodeBlock::optimizeSoon):
(JSC::CodeBlock::forceOptimizationSlowPathConcurrently):
(JSC::CodeBlock::setOptimizationThresholdBasedOnCompilationResult):
(JSC::CodeBlock::shouldOptimizeNow):
* bytecode/DFGExitProfile.cpp:
(JSC::DFG::ExitProfile::add):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::parseCodeBlock):
* dfg/DFGCFAPhase.cpp:
* dfg/DFGJITCode.cpp:
(JSC::DFG::JITCode::optimizeNextInvocation):
(JSC::DFG::JITCode::dontOptimizeAnytimeSoon):
(JSC::DFG::JITCode::optimizeAfterWarmUp):
(JSC::DFG::JITCode::optimizeSoon):
(JSC::DFG::JITCode::forceOptimizationSlowPathConcurrently):
(JSC::DFG::JITCode::setOSREntryBlock):
* dfg/DFGJumpReplacement.cpp:
(JSC::DFG::JumpReplacement::fire):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::prepareOSREntry):
* dfg/DFGOSRExit.cpp:
(JSC::DFG::OSRExit::compileExit):
* dfg/DFGObjectAllocationSinkingPhase.cpp:
* dfg/DFGOperations.cpp:
* dfg/DFGPlan.cpp:
(JSC::DFG::Plan::compileInThreadImpl):
* dfg/DFGToFTLDeferredCompilationCallback.cpp:
(JSC::DFG::ToFTLDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
(JSC::DFG::ToFTLDeferredCompilationCallback::compilationDidComplete):
* dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp:
(JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
(JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete):
* dfg/DFGWorklist.cpp:
(JSC::DFG::Worklist::completeAllReadyPlansForVM):
* ftl/FTLOSREntry.cpp:
(JSC::FTL::prepareOSREntry):
* heap/Heap.cpp:
(JSC::Heap::lastChanceToFinalize):
(JSC::Heap::sweepSynchronously):
(JSC::Heap::collectNow):
(JSC::Heap::runBeginPhase):
(JSC::Heap::runFixpointPhase):
(JSC::Heap::runReloopPhase):
(JSC::Heap::runEndPhase):
(JSC::Heap::finalize):
(JSC::Heap::willStartCollection):
(JSC::Heap::updateAllocationLimits):
(JSC::Heap::notifyIsSafeToCollect):
* heap/MarkStackMergingConstraint.cpp:
(JSC::MarkStackMergingConstraint::prepareToExecuteImpl):
* heap/MarkedSpace.cpp:
* heap/MarkingConstraint.cpp:
(JSC::MarkingConstraint::prepareToExecute):
* heap/MarkingConstraintSet.cpp:
(JSC::MarkingConstraintSet::executeConvergence):
(JSC::MarkingConstraintSet::executeConvergenceImpl):
(JSC::MarkingConstraintSet::executeAll):
* heap/MarkingConstraintSolver.cpp:
(JSC::MarkingConstraintSolver::execute):
* heap/SlotVisitor.cpp:
(JSC::SlotVisitor::appendToMarkStack):
(JSC::SlotVisitor::visitChildren):
(JSC::SlotVisitor::didRace):
* heap/StochasticSpaceTimeMutatorScheduler.cpp:
(JSC::StochasticSpaceTimeMutatorScheduler::beginCollection):
(JSC::StochasticSpaceTimeMutatorScheduler::didExecuteConstraints):
* jit/JIT.cpp:
(JSC::JIT::link):
* jit/JITExceptions.cpp:
(JSC::genericUnwind):
* jit/JITOperations.cpp:
* jit/JITToDFGDeferredCompilationCallback.cpp:
(JSC::JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
(JSC::JITToDFGDeferredCompilationCallback::compilationDidComplete):
* jit/JITWorklist.cpp:
(JSC::JITWorklist::Plan::finalize):
* jit/PolymorphicCallStubRoutine.cpp:
(JSC::PolymorphicCallNode::unlink):
* jit/Repatch.cpp:
(JSC::unlinkFor):
(JSC::linkVirtualFor):
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::jitCompileAndSetHeuristics):
(JSC::LLInt::entryOSR):
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
* parser/ModuleAnalyzer.cpp:
(JSC::ModuleAnalyzer::analyze):
* runtime/JSModuleLoader.cpp:
(JSC::JSModuleLoader::importModule):
(JSC::JSModuleLoader::resolveSync):
(JSC::JSModuleLoader::fetch):
(JSC::JSModuleLoader::evaluate):
(JSC::moduleLoaderModuleDeclarationInstantiation):
* runtime/ScriptExecutable.cpp:
(JSC::ScriptExecutable::installCode):
* runtime/VM.cpp:
(JSC::VM::throwException):
* tools/CompilerTimingScope.cpp:
(JSC::CompilerTimingScope::CompilerTimingScope):
(JSC::CompilerTimingScope::~CompilerTimingScope):
* wasm/WasmMemory.cpp:
* wasm/js/JSWebAssembly.cpp:
(JSC::resolve):
* yarr/YarrJIT.cpp:
(JSC::Yarr::jitCompile):
* yarr/YarrPattern.cpp:
(JSC::Yarr::YarrPattern::compile):
Source/WTF:
* wtf/DataLog.h:
(WTF::dataLog): Marked NEVER_INLINE, since it should never be perf-sensitive
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@254714 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index 90be12c..b8866e0 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,5 +1,145 @@
2020-01-16 Robin Morisset <rmorisset@apple.com>
+ Use dataLogIf more regularly
+ https://bugs.webkit.org/show_bug.cgi?id=206332
+
+ Reviewed by Keith Miller.
+
+ There is lots of code that reads
+ if (Options::foobar())
+ dataLogLn("...")
+
+ There are a couple of benefits to replacing those by dataLogLnIf(Options::foobar(), "..."):
+ - Readability, by reducing the number of lines taken by logging
+ - Less lines appearing as not-taken in test coverage wrongly (wrongly because we probably don't care for the coverage of logging code)
+ - possibly a tiny perf benefit since dataLogIf correctly uses UNLIKELY.
+
+ This patch is a fairly trivial refactoring where I looked for that pattern and replaced it everywhere it appeared in JSC.
+
+ * bytecode/BytecodeGeneratorification.cpp:
+ (JSC::performGeneratorification):
+ * bytecode/BytecodeLivenessAnalysis.cpp:
+ (JSC::BytecodeLivenessAnalysis::BytecodeLivenessAnalysis):
+ * bytecode/CallLinkInfo.cpp:
+ (JSC::CallLinkInfo::visitWeak):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::finalizeLLIntInlineCaches):
+ (JSC::CodeBlock::noticeIncomingCall):
+ (JSC::CodeBlock::optimizationThresholdScalingFactor):
+ (JSC::CodeBlock::optimizeNextInvocation):
+ (JSC::CodeBlock::dontOptimizeAnytimeSoon):
+ (JSC::CodeBlock::optimizeAfterWarmUp):
+ (JSC::CodeBlock::optimizeAfterLongWarmUp):
+ (JSC::CodeBlock::optimizeSoon):
+ (JSC::CodeBlock::forceOptimizationSlowPathConcurrently):
+ (JSC::CodeBlock::setOptimizationThresholdBasedOnCompilationResult):
+ (JSC::CodeBlock::shouldOptimizeNow):
+ * bytecode/DFGExitProfile.cpp:
+ (JSC::DFG::ExitProfile::add):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::parseCodeBlock):
+ * dfg/DFGCFAPhase.cpp:
+ * dfg/DFGJITCode.cpp:
+ (JSC::DFG::JITCode::optimizeNextInvocation):
+ (JSC::DFG::JITCode::dontOptimizeAnytimeSoon):
+ (JSC::DFG::JITCode::optimizeAfterWarmUp):
+ (JSC::DFG::JITCode::optimizeSoon):
+ (JSC::DFG::JITCode::forceOptimizationSlowPathConcurrently):
+ (JSC::DFG::JITCode::setOSREntryBlock):
+ * dfg/DFGJumpReplacement.cpp:
+ (JSC::DFG::JumpReplacement::fire):
+ * dfg/DFGOSREntry.cpp:
+ (JSC::DFG::prepareOSREntry):
+ * dfg/DFGOSRExit.cpp:
+ (JSC::DFG::OSRExit::compileExit):
+ * dfg/DFGObjectAllocationSinkingPhase.cpp:
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGPlan.cpp:
+ (JSC::DFG::Plan::compileInThreadImpl):
+ * dfg/DFGToFTLDeferredCompilationCallback.cpp:
+ (JSC::DFG::ToFTLDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
+ (JSC::DFG::ToFTLDeferredCompilationCallback::compilationDidComplete):
+ * dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp:
+ (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
+ (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete):
+ * dfg/DFGWorklist.cpp:
+ (JSC::DFG::Worklist::completeAllReadyPlansForVM):
+ * ftl/FTLOSREntry.cpp:
+ (JSC::FTL::prepareOSREntry):
+ * heap/Heap.cpp:
+ (JSC::Heap::lastChanceToFinalize):
+ (JSC::Heap::sweepSynchronously):
+ (JSC::Heap::collectNow):
+ (JSC::Heap::runBeginPhase):
+ (JSC::Heap::runFixpointPhase):
+ (JSC::Heap::runReloopPhase):
+ (JSC::Heap::runEndPhase):
+ (JSC::Heap::finalize):
+ (JSC::Heap::willStartCollection):
+ (JSC::Heap::updateAllocationLimits):
+ (JSC::Heap::notifyIsSafeToCollect):
+ * heap/MarkStackMergingConstraint.cpp:
+ (JSC::MarkStackMergingConstraint::prepareToExecuteImpl):
+ * heap/MarkedSpace.cpp:
+ * heap/MarkingConstraint.cpp:
+ (JSC::MarkingConstraint::prepareToExecute):
+ * heap/MarkingConstraintSet.cpp:
+ (JSC::MarkingConstraintSet::executeConvergence):
+ (JSC::MarkingConstraintSet::executeConvergenceImpl):
+ (JSC::MarkingConstraintSet::executeAll):
+ * heap/MarkingConstraintSolver.cpp:
+ (JSC::MarkingConstraintSolver::execute):
+ * heap/SlotVisitor.cpp:
+ (JSC::SlotVisitor::appendToMarkStack):
+ (JSC::SlotVisitor::visitChildren):
+ (JSC::SlotVisitor::didRace):
+ * heap/StochasticSpaceTimeMutatorScheduler.cpp:
+ (JSC::StochasticSpaceTimeMutatorScheduler::beginCollection):
+ (JSC::StochasticSpaceTimeMutatorScheduler::didExecuteConstraints):
+ * jit/JIT.cpp:
+ (JSC::JIT::link):
+ * jit/JITExceptions.cpp:
+ (JSC::genericUnwind):
+ * jit/JITOperations.cpp:
+ * jit/JITToDFGDeferredCompilationCallback.cpp:
+ (JSC::JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
+ (JSC::JITToDFGDeferredCompilationCallback::compilationDidComplete):
+ * jit/JITWorklist.cpp:
+ (JSC::JITWorklist::Plan::finalize):
+ * jit/PolymorphicCallStubRoutine.cpp:
+ (JSC::PolymorphicCallNode::unlink):
+ * jit/Repatch.cpp:
+ (JSC::unlinkFor):
+ (JSC::linkVirtualFor):
+ * llint/LLIntSlowPaths.cpp:
+ (JSC::LLInt::jitCompileAndSetHeuristics):
+ (JSC::LLInt::entryOSR):
+ (JSC::LLInt::LLINT_SLOW_PATH_DECL):
+ * parser/ModuleAnalyzer.cpp:
+ (JSC::ModuleAnalyzer::analyze):
+ * runtime/JSModuleLoader.cpp:
+ (JSC::JSModuleLoader::importModule):
+ (JSC::JSModuleLoader::resolveSync):
+ (JSC::JSModuleLoader::fetch):
+ (JSC::JSModuleLoader::evaluate):
+ (JSC::moduleLoaderModuleDeclarationInstantiation):
+ * runtime/ScriptExecutable.cpp:
+ (JSC::ScriptExecutable::installCode):
+ * runtime/VM.cpp:
+ (JSC::VM::throwException):
+ * tools/CompilerTimingScope.cpp:
+ (JSC::CompilerTimingScope::CompilerTimingScope):
+ (JSC::CompilerTimingScope::~CompilerTimingScope):
+ * wasm/WasmMemory.cpp:
+ * wasm/js/JSWebAssembly.cpp:
+ (JSC::resolve):
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::jitCompile):
+ * yarr/YarrPattern.cpp:
+ (JSC::Yarr::YarrPattern::compile):
+
+2020-01-16 Robin Morisset <rmorisset@apple.com>
+
Reduce the code generated by DFGSlowPathGenerator.h
https://bugs.webkit.org/show_bug.cgi?id=206330
diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
index 33d2bee..f5852d9 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
@@ -300,7 +300,7 @@
void performGeneratorification(BytecodeGenerator& bytecodeGenerator, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)
{
- if (Options::dumpBytecodesBeforeGeneratorification())
+ if (UNLIKELY(Options::dumpBytecodesBeforeGeneratorification()))
CodeBlockBytecodeDumper<UnlinkedCodeBlock>::dumpBlock(codeBlock, instructions, WTF::dataFile());
BytecodeGeneratorification pass(bytecodeGenerator, codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex);
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
index 918e1d2..472a8d1 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
@@ -42,7 +42,7 @@
{
runLivenessFixpoint(codeBlock, codeBlock->instructions(), m_graph);
- if (Options::dumpBytecodeLivenessResults())
+ if (UNLIKELY(Options::dumpBytecodeLivenessResults()))
dumpResults(codeBlock);
}
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
index 6f1478b..d29f60e 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
@@ -209,7 +209,7 @@
if (isLinked()) {
if (stub()) {
if (!stub()->visitWeak(vm)) {
- if (Options::verboseOSR()) {
+ if (UNLIKELY(Options::verboseOSR())) {
dataLog(
"At ", m_codeOrigin, ", ", RawPointer(this), ": clearing call stub to ",
listDump(stub()->variants()), ", stub routine ", RawPointer(stub()),
@@ -220,14 +220,14 @@
}
} else if (!vm.heap.isMarked(m_calleeOrCodeBlock.get())) {
if (isDirect()) {
- if (Options::verboseOSR()) {
+ if (UNLIKELY(Options::verboseOSR())) {
dataLog(
"Clearing call to ", RawPointer(codeBlock()), " (",
pointerDump(codeBlock()), ").\n");
}
} else {
if (callee()->type() == JSFunctionType) {
- if (Options::verboseOSR()) {
+ if (UNLIKELY(Options::verboseOSR())) {
dataLog(
"Clearing call to ",
RawPointer(callee()), " (",
@@ -236,14 +236,14 @@
}
handleSpecificCallee(static_cast<JSFunction*>(callee()));
} else {
- if (Options::verboseOSR())
+ if (UNLIKELY(Options::verboseOSR()))
dataLog("Clearing call to ", RawPointer(callee()), ".\n");
m_clearedByGC = true;
}
}
unlink(vm);
} else if (isDirect() && !vm.heap.isMarked(m_lastSeenCalleeOrExecutable.get())) {
- if (Options::verboseOSR()) {
+ if (UNLIKELY(Options::verboseOSR())) {
dataLog(
"Clearing call to ", RawPointer(executable()),
" because the executable is dead.\n");
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index 3f32fe8..aaefad2 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1219,8 +1219,7 @@
StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
return;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt property access.\n");
+ dataLogLnIf(Options::verboseOSR(), "Clearing LLInt property access.");
LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
});
@@ -1228,8 +1227,7 @@
StructureID oldStructureID = metadata.m_structureID;
if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
return;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt property access.\n");
+ dataLogLnIf(Options::verboseOSR(), "Clearing LLInt property access.");
metadata.m_structureID = 0;
metadata.m_offset = 0;
});
@@ -1242,8 +1240,7 @@
&& (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
&& (!chain || vm.heap.isMarked(chain)))
return;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt put transition.\n");
+ dataLogLnIf(Options::verboseOSR(), "Clearing LLInt put transition.");
metadata.m_oldStructureID = 0;
metadata.m_offset = 0;
metadata.m_newStructureID = 0;
@@ -1292,8 +1289,7 @@
WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
return;
- if (Options::verboseOSR())
- dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
+ dataLogLnIf(Options::verboseOSR(), "Clearing dead symbolTable ", RawPointer(symbolTable.get()));
symbolTable.clear();
});
@@ -1305,8 +1301,7 @@
WriteBarrierBase<Structure>& structure = metadata.m_structure;
if (!structure || vm.heap.isMarked(structure.get()))
return;
- if (Options::verboseOSR())
- dataLogF("Clearing scope access with structure %p.\n", structure.get());
+ dataLogLnIf(Options::verboseOSR(), "Clearing scope access with structure ", RawPointer(structure.get()));
structure.clear();
};
@@ -1321,8 +1316,7 @@
auto& instruction = instructions().at(std::get<1>(pair.key));
OpcodeID opcode = instruction->opcodeID();
if (opcode == op_get_by_id) {
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt property access.\n");
+ dataLogLnIf(Options::verboseOSR(), "Clearing LLInt property access.");
LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
}
return true;
@@ -1341,8 +1335,7 @@
forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
- if (Options::verboseOSR())
- dataLog("Clearing LLInt call from ", *this, "\n");
+ dataLogLnIf(Options::verboseOSR(), "Clearing LLInt call from ", *this);
callLinkInfo.unlink();
}
if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
@@ -2178,8 +2171,7 @@
{
CodeBlock* callerCodeBlock = callerFrame->codeBlock();
- if (Options::verboseCallLink())
- dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
+ dataLogLnIf(Options::verboseCallLink(), "Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this);
#if ENABLE(DFG_JIT)
if (!m_shouldAlwaysBeInlined)
@@ -2187,8 +2179,7 @@
if (!callerCodeBlock) {
m_shouldAlwaysBeInlined = false;
- if (Options::verboseCallLink())
- dataLog(" Clearing SABI because caller is native.\n");
+ dataLogLnIf(Options::verboseCallLink(), " Clearing SABI because caller is native.");
return;
}
@@ -2203,8 +2194,7 @@
if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
m_shouldAlwaysBeInlined = false;
- if (Options::verboseCallLink())
- dataLog(" Clearing SABI because caller is too large.\n");
+ dataLogLnIf(Options::verboseCallLink(), " Clearing SABI because caller is too large.");
return;
}
@@ -2214,15 +2204,13 @@
// ensures that a function is SABI only if it is called no more frequently than
// any of its callers.
m_shouldAlwaysBeInlined = false;
- if (Options::verboseCallLink())
- dataLog(" Clearing SABI because caller is in LLInt.\n");
+ dataLogLnIf(Options::verboseCallLink(), " Clearing SABI because caller is in LLInt.");
return;
}
if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
m_shouldAlwaysBeInlined = false;
- if (Options::verboseCallLink())
- dataLog(" Clearing SABI bcause caller was already optimized.\n");
+ dataLogLnIf(Options::verboseCallLink(), " Clearing SABI bcause caller was already optimized.");
return;
}
@@ -2231,8 +2219,7 @@
// optimized anytime soon. For eval code this is particularly true since we
// delay eval optimization by a *lot*.
m_shouldAlwaysBeInlined = false;
- if (Options::verboseCallLink())
- dataLog(" Clearing SABI because caller is not a function.\n");
+ dataLogLnIf(Options::verboseCallLink(), " Clearing SABI because caller is not a function.");
return;
}
@@ -2241,8 +2228,7 @@
vm().topCallFrame->iterate(vm(), functor);
if (functor.didRecurse()) {
- if (Options::verboseCallLink())
- dataLog(" Clearing SABI because recursion was detected.\n");
+ dataLogLnIf(Options::verboseCallLink(), " Clearing SABI because recursion was detected.");
m_shouldAlwaysBeInlined = false;
return;
}
@@ -2255,8 +2241,7 @@
if (canCompile(callerCodeBlock->capabilityLevelState()))
return;
- if (Options::verboseCallLink())
- dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
+ dataLogLnIf(Options::verboseCallLink(), " Clearing SABI because the caller is not a DFG candidate.");
m_shouldAlwaysBeInlined = false;
#endif
@@ -2404,12 +2389,9 @@
result *= codeTypeThresholdMultiplier();
- if (Options::verboseOSR()) {
- dataLog(
- *this, ": bytecode cost is ", bytecodeCost,
- ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
- "\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ *this, ": bytecode cost is ", bytecodeCost,
+ ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier());
return result;
}
@@ -2498,22 +2480,19 @@
void CodeBlock::optimizeNextInvocation()
{
- if (Options::verboseOSR())
- dataLog(*this, ": Optimizing next invocation.\n");
+ dataLogLnIf(Options::verboseOSR(), *this, ": Optimizing next invocation.");
m_jitExecuteCounter.setNewThreshold(0, this);
}
void CodeBlock::dontOptimizeAnytimeSoon()
{
- if (Options::verboseOSR())
- dataLog(*this, ": Not optimizing anytime soon.\n");
+ dataLogLnIf(Options::verboseOSR(), *this, ": Not optimizing anytime soon.");
m_jitExecuteCounter.deferIndefinitely();
}
void CodeBlock::optimizeAfterWarmUp()
{
- if (Options::verboseOSR())
- dataLog(*this, ": Optimizing after warm-up.\n");
+ dataLogLnIf(Options::verboseOSR(), *this, ": Optimizing after warm-up.");
#if ENABLE(DFG_JIT)
m_jitExecuteCounter.setNewThreshold(
adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
@@ -2522,8 +2501,7 @@
void CodeBlock::optimizeAfterLongWarmUp()
{
- if (Options::verboseOSR())
- dataLog(*this, ": Optimizing after long warm-up.\n");
+ dataLogLnIf(Options::verboseOSR(), *this, ": Optimizing after long warm-up.");
#if ENABLE(DFG_JIT)
m_jitExecuteCounter.setNewThreshold(
adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
@@ -2532,8 +2510,7 @@
void CodeBlock::optimizeSoon()
{
- if (Options::verboseOSR())
- dataLog(*this, ": Optimizing soon.\n");
+ dataLogLnIf(Options::verboseOSR(), *this, ": Optimizing soon.");
#if ENABLE(DFG_JIT)
m_jitExecuteCounter.setNewThreshold(
adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
@@ -2542,8 +2519,7 @@
void CodeBlock::forceOptimizationSlowPathConcurrently()
{
- if (Options::verboseOSR())
- dataLog(*this, ": Forcing slow path concurrently.\n");
+ dataLogLnIf(Options::verboseOSR(), *this, ": Forcing slow path concurrently.");
m_jitExecuteCounter.forceSlowPathConcurrently();
}
@@ -2552,7 +2528,7 @@
{
JITType type = jitType();
if (type != JITType::BaselineJIT) {
- dataLog(*this, ": expected to have baseline code but have ", type, "\n");
+ dataLogLn(*this, ": expected to have baseline code but have ", type);
CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
}
@@ -2756,8 +2732,7 @@
bool CodeBlock::shouldOptimizeNow()
{
- if (Options::verboseOSR())
- dataLog("Considering optimizing ", *this, "...\n");
+ dataLogLnIf(Options::verboseOSR(), "Considering optimizing ", *this, "...");
if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
return true;
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
index a82f92d..2551372 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
@@ -50,8 +50,7 @@
CODEBLOCK_LOG_EVENT(owner, "frequentExit", (site));
- if (Options::verboseExitProfile())
- dataLog(pointerDump(owner), ": Adding exit site: ", site, "\n");
+ dataLogLnIf(Options::verboseExitProfile(), pointerDump(owner), ": Adding exit site: ", site);
ExitProfile& profile = owner->unlinkedCodeBlock()->exitProfile();
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 7effad0..9ffc555 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -7421,7 +7421,7 @@
deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
}
- if (Options::dumpBytecodeAtDFGTime()) {
+ if (UNLIKELY(Options::dumpBytecodeAtDFGTime())) {
dataLog("Parsing ", *codeBlock);
if (inlineCallFrame()) {
dataLog(
@@ -7435,7 +7435,7 @@
Vector<InstructionStream::Offset, 32> jumpTargets;
computePreciseJumpTargets(codeBlock, jumpTargets);
- if (Options::dumpBytecodeAtDFGTime()) {
+ if (UNLIKELY(Options::dumpBytecodeAtDFGTime())) {
dataLog("Jump targets: ");
CommaPrinter comma;
for (unsigned i = 0; i < jumpTargets.size(); ++i)
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
index a240149..f9b4597 100644
--- a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
@@ -268,7 +268,7 @@
AbstractInterpreter<InPlaceAbstractState> m_interpreter;
BlockSet m_blocksWithOSR;
- bool m_verbose;
+ const bool m_verbose;
bool m_changed;
unsigned m_count;
diff --git a/Source/JavaScriptCore/dfg/DFGJITCode.cpp b/Source/JavaScriptCore/dfg/DFGJITCode.cpp
index 9b21df3..2de9f1a 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCode.cpp
@@ -128,24 +128,21 @@
void JITCode::optimizeNextInvocation(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITType::DFGJIT);
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": FTL-optimizing next invocation.\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": FTL-optimizing next invocation.");
tierUpCounter.setNewThreshold(0, codeBlock);
}
void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITType::DFGJIT);
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": Not FTL-optimizing anytime soon.\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Not FTL-optimizing anytime soon.");
tierUpCounter.deferIndefinitely();
}
void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITType::DFGJIT);
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": FTL-optimizing after warm-up.\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": FTL-optimizing after warm-up.");
CodeBlock* baseline = codeBlock->baselineVersion();
tierUpCounter.setNewThreshold(
baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()),
@@ -155,8 +152,7 @@
void JITCode::optimizeSoon(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITType::DFGJIT);
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": FTL-optimizing soon.\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": FTL-optimizing soon.");
CodeBlock* baseline = codeBlock->baselineVersion();
tierUpCounter.setNewThreshold(
baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()),
@@ -166,8 +162,7 @@
void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITType::DFGJIT);
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": Forcing slow path concurrently for FTL entry.\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Forcing slow path concurrently for FTL entry.");
tierUpCounter.forceSlowPathConcurrently();
}
@@ -204,8 +199,8 @@
void JITCode::setOSREntryBlock(VM& vm, const JSCell* owner, CodeBlock* osrEntryBlock)
{
if (Options::verboseOSR()) {
- dataLog(RawPointer(this), ": Setting OSR entry block to ", RawPointer(osrEntryBlock), "\n");
- dataLog("OSR entries will go to ", osrEntryBlock->jitCode()->ftlForOSREntry()->addressForCall(ArityCheckNotRequired), "\n");
+ dataLogLn(RawPointer(this), ": Setting OSR entry block to ", RawPointer(osrEntryBlock));
+ dataLogLn("OSR entries will go to ", osrEntryBlock->jitCode()->ftlForOSREntry()->addressForCall(ArityCheckNotRequired));
}
m_osrEntryBlock.set(vm, owner, osrEntryBlock);
}
diff --git a/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp b/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp
index 1ebf694..9d3994c 100644
--- a/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp
@@ -36,8 +36,9 @@
void JumpReplacement::fire()
{
- if (Options::dumpDisassembly())
- dataLogF("Firing jump replacement watchpoint from %p, to %p.\n", m_source.dataLocation(), m_destination.dataLocation());
+ dataLogLnIf(Options::dumpDisassembly(),
+ "Firing jump replacement watchpoint from ", RawPointer(m_source.dataLocation()),
+ " to ", RawPointer(m_destination.dataLocation()));
MacroAssembler::replaceWithJump(m_source, m_destination);
}
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
index 4497308..a20d7d3 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
@@ -103,11 +103,9 @@
if (!Options::useOSREntryToDFG())
return nullptr;
- if (Options::verboseOSR()) {
- dataLog(
- "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
- " from ", bytecodeIndex, "\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
+ " from ", bytecodeIndex);
sanitizeStackForVM(vm);
@@ -134,8 +132,7 @@
// be super rare. For now, if it does happen, it'll cause some compilation
// thrashing.
- if (Options::verboseOSR())
- dataLog(" OSR failed because the target code block is not DFG.\n");
+ dataLogLnIf(Options::verboseOSR(), " OSR failed because the target code block is not DFG.");
return nullptr;
}
@@ -143,8 +140,7 @@
OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
if (!entry) {
- if (Options::verboseOSR())
- dataLogF(" OSR failed because the entrypoint was optimized out.\n");
+ dataLogLnIf(Options::verboseOSR(), " OSR failed because the entrypoint was optimized out.");
return nullptr;
}
@@ -182,11 +178,9 @@
value = callFrame->argument(argument - 1);
if (!entry->m_expectedValues.argument(argument).validateOSREntryValue(value, FlushedJSValue)) {
- if (Options::verboseOSR()) {
- dataLog(
- " OSR failed because argument ", argument, " is ", value,
- ", expected ", entry->m_expectedValues.argument(argument), ".\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ " OSR failed because argument ", argument, " is ", value,
+ ", expected ", entry->m_expectedValues.argument(argument));
return nullptr;
}
}
@@ -237,13 +231,11 @@
unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
if (UNLIKELY(!vm.ensureStackCapacityFor(&callFrame->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()]))) {
- if (Options::verboseOSR())
- dataLogF(" OSR failed because stack growth failed.\n");
+ dataLogLnIf(Options::verboseOSR(), " OSR failed because stack growth failed.");
return nullptr;
}
- if (Options::verboseOSR())
- dataLogF(" OSR should succeed.\n");
+ dataLogLnIf(Options::verboseOSR(), " OSR should succeed.");
// At this point we're committed to entering. We will do some work to set things up,
// but we also rely on our caller recognizing that when we return a non-null pointer,
@@ -262,8 +254,7 @@
void* targetPC = entry->m_machineCode.executableAddress();
RELEASE_ASSERT(codeBlock->jitCode()->contains(entry->m_machineCode.untaggedExecutableAddress()));
- if (Options::verboseOSR())
- dataLogF(" OSR using target PC %p.\n", targetPC);
+ dataLogLnIf(Options::verboseOSR(), " OSR using target PC ", RawPointer(targetPC));
RELEASE_ASSERT(targetPC);
*bitwise_cast<void**>(scratch + 1) = retagCodePtr(targetPC, OSREntryPtrTag, bitwise_cast<PtrTag>(callFrame));
@@ -324,8 +315,7 @@
*bitwise_cast<CodeBlock**>(pivot - 1 - CallFrameSlot::codeBlock) = codeBlock;
- if (Options::verboseOSR())
- dataLogF(" OSR returning data buffer %p.\n", scratch);
+ dataLogLnIf(Options::verboseOSR(), " OSR returning data buffer ", RawPointer(scratch));
return scratch;
}
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index 1b8e278..42cb5e6 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -1110,7 +1110,7 @@
jit.jitAssertTagsInPlace();
// Pro-forma stuff.
- if (Options::printEachOSRExit()) {
+ if (UNLIKELY(Options::printEachOSRExit())) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = jit.codeBlock();
debugInfo->kind = exit.m_kind;
diff --git a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp
index a99597f..e25b68a 100644
--- a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp
@@ -807,7 +807,7 @@
m_combinedLiveness = CombinedLiveness(m_graph);
CString graphBeforeSinking;
- if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
+ if (UNLIKELY(Options::verboseValidationFailure() && Options::validateGraphAtEachPhase())) {
StringPrintStream out;
m_graph.dump(out);
graphBeforeSinking = out.toCString();
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index bb5c835..d208479 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -3684,8 +3684,7 @@
sanitizeStackForVM(codeBlock->vm());
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": Entered reoptimize\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Entered reoptimize");
// We must be called with the baseline code block.
ASSERT(JITCode::isBaselineCode(codeBlock->jitType()));
@@ -3696,8 +3695,7 @@
// sure bet that we don't have anything else left to do.
CodeBlock* replacement = codeBlock->replacement();
if (!replacement || replacement == codeBlock) {
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": Not reoptimizing because we've already been jettisoned.\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Not reoptimizing because we've already been jettisoned.");
return;
}
@@ -3723,8 +3721,7 @@
&& optimizedCodeBlock->shouldReoptimizeFromLoopNow();
if (!didExitABunch && !didGetStuckInLoop) {
- if (Options::verboseOSR())
- dataLog(*codeBlock, ": Not reoptimizing ", *optimizedCodeBlock, " because it either didn't exit enough or didn't loop enough after exit.\n");
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Not reoptimizing ", *optimizedCodeBlock, " because it either didn't exit enough or didn't loop enough after exit.");
codeBlock->optimizeAfterLongWarmUp();
return;
}
@@ -3742,8 +3739,7 @@
{
if (codeBlock->baselineVersion()->m_didFailFTLCompilation) {
CODEBLOCK_LOG_EVENT(codeBlock, "abortFTLCompile", ());
- if (Options::verboseOSR())
- dataLog("Deferring FTL-optimization of ", *codeBlock, " indefinitely because there was an FTL failure.\n");
+ dataLogLnIf(Options::verboseOSR(), "Deferring FTL-optimization of ", *codeBlock, " indefinitely because there was an FTL failure.");
jitCode->dontOptimizeAnytimeSoon(codeBlock);
return false;
}
@@ -3751,8 +3747,7 @@
if (!codeBlock->hasOptimizedReplacement()
&& !jitCode->checkIfOptimizationThresholdReached(codeBlock)) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayFTLCompile", ("counter = ", jitCode->tierUpCounter));
- if (Options::verboseOSR())
- dataLog("Choosing not to FTL-optimize ", *codeBlock, " yet.\n");
+ dataLogLnIf(Options::verboseOSR(), "Choosing not to FTL-optimize ", *codeBlock, " yet.");
return false;
}
return true;
@@ -3794,8 +3789,7 @@
CODEBLOCK_LOG_EVENT(codeBlock, "delayFTLCompile", ("compiled and failed"));
// This means that we finished compiling, but failed somehow; in that case the
// thresholds will be set appropriately.
- if (Options::verboseOSR())
- dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
+ dataLogLnIf(Options::verboseOSR(), "Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.");
return;
}
@@ -3827,11 +3821,8 @@
JITCode* jitCode = codeBlock->jitCode()->dfg();
- if (Options::verboseOSR()) {
- dataLog(
- *codeBlock, ": Entered triggerTierUpNow with executeCounter = ",
- jitCode->tierUpCounter, "\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ *codeBlock, ": Entered triggerTierUpNow with executeCounter = ", jitCode->tierUpCounter);
if (shouldTriggerFTLCompile(codeBlock, jitCode))
triggerFTLReplacementCompile(vm, codeBlock, jitCode);
@@ -3906,8 +3897,7 @@
if (iter != jitCode->bytecodeIndexToStreamIndex.end()) {
unsigned streamIndex = iter->value;
if (CodeBlock* entryBlock = jitCode->osrEntryBlock()) {
- if (Options::verboseOSR())
- dataLog("OSR entry: From ", RawPointer(jitCode), " got entry block ", RawPointer(entryBlock), "\n");
+ dataLogLnIf(Options::verboseOSR(), "OSR entry: From ", RawPointer(jitCode), " got entry block ", RawPointer(entryBlock));
if (void* address = FTL::prepareOSREntry(vm, callFrame, codeBlock, entryBlock, originBytecodeIndex, streamIndex)) {
CODEBLOCK_LOG_EVENT(entryBlock, "osrEntry", ("at ", originBytecodeIndex));
return retagCodePtr<char*>(address, JSEntryPtrTag, bitwise_cast<PtrTag>(callFrame));
@@ -3919,8 +3909,7 @@
if (worklistState == Worklist::Compiled) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayFTLCompile", ("compiled and failed"));
// This means that compilation failed and we already set the thresholds.
- if (Options::verboseOSR())
- dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
+ dataLogLnIf(Options::verboseOSR(), "Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.");
return nullptr;
}
@@ -4005,8 +3994,7 @@
// This is where we ask the outer to loop to immediately compile itself if program
// control reaches it.
- if (Options::verboseOSR())
- dataLog("Inner-loop ", originBytecodeIndex, " in ", *codeBlock, " setting parent loop ", osrEntryCandidate, "'s trigger and backing off.\n");
+ dataLogLnIf(Options::verboseOSR(), "Inner-loop ", originBytecodeIndex, " in ", *codeBlock, " setting parent loop ", osrEntryCandidate, "'s trigger and backing off.");
jitCode->tierUpEntryTriggers.set(osrEntryCandidate, JITCode::TriggerReason::StartCompilation);
return true;
}
@@ -4060,8 +4048,7 @@
// It's possible that the for-entry compile already succeeded. In that case OSR
// entry will succeed unless we ran out of stack. It's not clear what we should do.
// We signal to try again after a while if that happens.
- if (Options::verboseOSR())
- dataLog("Immediate OSR entry: From ", RawPointer(jitCode), " got entry block ", RawPointer(jitCode->osrEntryBlock()), "\n");
+ dataLogLnIf(Options::verboseOSR(), "Immediate OSR entry: From ", RawPointer(jitCode), " got entry block ", RawPointer(jitCode->osrEntryBlock()));
void* address = FTL::prepareOSREntry(vm, callFrame, codeBlock, jitCode->osrEntryBlock(), originBytecodeIndex, streamIndex);
if (!address)
@@ -4081,17 +4068,13 @@
sanitizeStackForVM(vm);
if (codeBlock->jitType() != JITType::DFGJIT) {
- dataLog("Unexpected code block in DFG->FTL trigger tier up now in loop: ", *codeBlock, "\n");
+ dataLogLn("Unexpected code block in DFG->FTL trigger tier up now in loop: ", *codeBlock);
RELEASE_ASSERT_NOT_REACHED();
}
JITCode* jitCode = codeBlock->jitCode()->dfg();
- if (Options::verboseOSR()) {
- dataLog(
- *codeBlock, ": Entered triggerTierUpNowInLoop with executeCounter = ",
- jitCode->tierUpCounter, "\n");
- }
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Entered triggerTierUpNowInLoop with executeCounter = ", jitCode->tierUpCounter);
if (jitCode->tierUpInLoopHierarchy.contains(bytecodeIndex))
tierUpCommon(vm, callFrame, bytecodeIndex, false);
@@ -4123,11 +4106,7 @@
JITCode* jitCode = codeBlock->jitCode()->dfg();
- if (Options::verboseOSR()) {
- dataLog(
- *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ",
- jitCode->tierUpCounter, "\n");
- }
+ dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ", jitCode->tierUpCounter);
return tierUpCommon(vm, callFrame, bytecodeIndex, true);
}
diff --git a/Source/JavaScriptCore/dfg/DFGPlan.cpp b/Source/JavaScriptCore/dfg/DFGPlan.cpp
index d800079..9d6d102 100644
--- a/Source/JavaScriptCore/dfg/DFGPlan.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPlan.cpp
@@ -496,7 +496,7 @@
if (UNLIKELY(computeCompileTimes()))
m_timeBeforeFTL = MonotonicTime::now();
- if (Options::b3AlwaysFailsBeforeCompile()) {
+ if (UNLIKELY(Options::b3AlwaysFailsBeforeCompile())) {
FTL::fail(state);
return FTLPath;
}
@@ -505,7 +505,7 @@
if (safepointResult.didGetCancelled())
return CancelPath;
- if (Options::b3AlwaysFailsBeforeLink()) {
+ if (UNLIKELY(Options::b3AlwaysFailsBeforeLink())) {
FTL::fail(state);
return FTLPath;
}
diff --git a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp
index c67e7a00..eaf3516 100644
--- a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp
@@ -48,11 +48,9 @@
void ToFTLDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously(
CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock)
{
- if (Options::verboseOSR()) {
- dataLog(
- "Optimizing compilation of ", codeBlock, " (for ", profiledDFGCodeBlock,
- ") did become ready.\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Optimizing compilation of ", codeBlock, " (for ", profiledDFGCodeBlock,
+ ") did become ready.");
profiledDFGCodeBlock->jitCode()->dfg()->forceOptimizationSlowPathConcurrently(
profiledDFGCodeBlock);
@@ -61,18 +59,14 @@
void ToFTLDeferredCompilationCallback::compilationDidComplete(
CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result)
{
- if (Options::verboseOSR()) {
- dataLog(
- "Optimizing compilation of ", codeBlock, " (for ", profiledDFGCodeBlock,
- ") result: ", result, "\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Optimizing compilation of ", codeBlock, " (for ", profiledDFGCodeBlock,
+ ") result: ", result);
if (profiledDFGCodeBlock->replacement() != profiledDFGCodeBlock) {
- if (Options::verboseOSR()) {
- dataLog(
- "Dropping FTL code block ", codeBlock, " on the floor because the "
- "DFG code block ", profiledDFGCodeBlock, " was jettisoned.\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Dropping FTL code block ", codeBlock, " on the floor because the "
+ "DFG code block ", profiledDFGCodeBlock, " was jettisoned.");
return;
}
diff --git a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
index a42949c..02ac4f3 100644
--- a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
@@ -52,11 +52,9 @@
void ToFTLForOSREntryDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously(
CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock)
{
- if (Options::verboseOSR()) {
- dataLog(
- "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock,
- ") did become ready.\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock,
+ ") did become ready.");
*m_forcedOSREntryTrigger = JITCode::TriggerReason::CompilationDone;
}
@@ -64,11 +62,9 @@
void ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete(
CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result)
{
- if (Options::verboseOSR()) {
- dataLog(
- "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock,
- ") result: ", result, "\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock,
+ ") result: ", result);
JITCode* jitCode = profiledDFGCodeBlock->jitCode()->dfg();
diff --git a/Source/JavaScriptCore/dfg/DFGWorklist.cpp b/Source/JavaScriptCore/dfg/DFGWorklist.cpp
index e14eb2a8..65965aa 100644
--- a/Source/JavaScriptCore/dfg/DFGWorklist.cpp
+++ b/Source/JavaScriptCore/dfg/DFGWorklist.cpp
@@ -105,8 +105,7 @@
m_plan->notifyCompiling();
}
- if (Options::verboseCompilationQueue())
- dataLog(m_worklist, ": Compiling ", m_plan->key(), " asynchronously\n");
+ dataLogLnIf(Options::verboseCompilationQueue(), m_worklist, ": Compiling ", m_plan->key(), " asynchronously");
// There's no way for the GC to be safepointing since we own rightToRun.
if (m_plan->vm()->heap.worldIsStopped()) {
@@ -143,8 +142,7 @@
void threadDidStart() override
{
- if (Options::verboseCompilationQueue())
- dataLog(m_worklist, ": Thread started\n");
+ dataLogLnIf(Options::verboseCompilationQueue(), m_worklist, ": Thread started");
if (m_relativePriority)
Thread::current().changePriority(m_relativePriority);
@@ -156,8 +154,7 @@
{
// We're holding the Worklist::m_lock, so we should be careful not to deadlock.
- if (Options::verboseCompilationQueue())
- dataLog(m_worklist, ": Thread will stop\n");
+ dataLogLnIf(Options::verboseCompilationQueue(), m_worklist, ": Thread will stop");
ASSERT(!m_plan);
@@ -336,8 +333,7 @@
RefPtr<Plan> plan = myReadyPlans.takeLast();
CompilationKey currentKey = plan->key();
- if (Options::verboseCompilationQueue())
- dataLog(*this, ": Completing ", currentKey, "\n");
+ dataLogLnIf(Options::verboseCompilationQueue(), *this, ": Completing ", currentKey);
RELEASE_ASSERT(plan->stage() == Plan::Ready);
diff --git a/Source/JavaScriptCore/ftl/FTLOSREntry.cpp b/Source/JavaScriptCore/ftl/FTLOSREntry.cpp
index 9331771..afbf6ed 100644
--- a/Source/JavaScriptCore/ftl/FTLOSREntry.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSREntry.cpp
@@ -52,27 +52,23 @@
dfgCode->clearOSREntryBlockAndResetThresholds(dfgCodeBlock);
return 0;
}
-
- if (Options::verboseOSR()) {
- dataLog(
- "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at ",
- bytecodeIndex, ".\n");
- }
+
+ dataLogLnIf(Options::verboseOSR(),
+ "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at ",
+ bytecodeIndex);
if (bytecodeIndex)
jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true);
if (bytecodeIndex != entryCode->bytecodeIndex()) {
- if (Options::verboseOSR())
- dataLog(" OSR failed because we don't have an entrypoint for ", bytecodeIndex, "; ours is for ", entryCode->bytecodeIndex(), "\n");
+ dataLogLnIf(Options::verboseOSR(), " OSR failed because we don't have an entrypoint for ", bytecodeIndex, "; ours is for ", entryCode->bytecodeIndex());
return 0;
}
Operands<Optional<JSValue>> values;
dfgCode->reconstruct(callFrame, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
- if (Options::verboseOSR())
- dataLog(" Values at entry: ", values, "\n");
+ dataLogLnIf(Options::verboseOSR(), " Values at entry: ", values);
for (int argument = values.numberOfArguments(); argument--;) {
JSValue valueOnStack = callFrame->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue();
@@ -101,16 +97,14 @@
int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
if (UNLIKELY(!vm.ensureStackCapacityFor(&callFrame->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()]))) {
- if (Options::verboseOSR())
- dataLog(" OSR failed because stack growth failed.\n");
+ dataLogLnIf(Options::verboseOSR(), " OSR failed because stack growth failed.");
return 0;
}
callFrame->setCodeBlock(entryCodeBlock);
void* result = entryCode->addressForCall(ArityCheckNotRequired).executableAddress();
- if (Options::verboseOSR())
- dataLog(" Entry will succeed, going to address ", RawPointer(result), "\n");
+ dataLogLnIf(Options::verboseOSR(), " Entry will succeed, going to address ", RawPointer(result));
return result;
}
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index 67540d9..e3e836e 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -396,7 +396,7 @@
void Heap::lastChanceToFinalize()
{
MonotonicTime before;
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
before = MonotonicTime::now();
dataLog("[GC<", RawPointer(this), ">: shutdown ");
}
@@ -414,17 +414,15 @@
}
m_collectContinuouslyThread->waitForCompletion();
}
-
- if (Options::logGC())
- dataLog("1");
+
+ dataLogIf(Options::logGC(), "1");
// Prevent new collections from being started. This is probably not even necessary, since we're not
// going to call into anything that starts collections. Still, this makes the algorithm more
// obviously sound.
m_isSafeToCollect = false;
- if (Options::logGC())
- dataLog("2");
+ dataLogIf(Options::logGC(), "2");
bool isCollecting;
{
@@ -433,8 +431,7 @@
isCollecting = m_lastServedTicket < m_lastGrantedTicket;
}
if (isCollecting) {
- if (Options::logGC())
- dataLog("...]\n");
+ dataLogIf(Options::logGC(), "...]\n");
// Wait for the current collection to finish.
waitForCollector(
@@ -443,11 +440,9 @@
return m_lastServedTicket == m_lastGrantedTicket;
});
- if (Options::logGC())
- dataLog("[GC<", RawPointer(this), ">: shutdown ");
+ dataLogIf(Options::logGC(), "[GC<", RawPointer(this), ">: shutdown ");
}
- if (Options::logGC())
- dataLog("3");
+ dataLogIf(Options::logGC(), "3");
RELEASE_ASSERT(m_requests.isEmpty());
RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
@@ -462,14 +457,12 @@
m_threadCondition->notifyOne(locker);
}
- if (Options::logGC())
- dataLog("4");
+ dataLogIf(Options::logGC(), "4");
if (!stopped)
m_thread->join();
- if (Options::logGC())
- dataLog("5 ");
+ dataLogIf(Options::logGC(), "5 ");
if (UNLIKELY(Options::dumpHeapStatisticsAtVMDestruction()))
dumpHeapStatisticsAtVMDestruction();
@@ -483,8 +476,7 @@
m_objectSpace.freeMemory();
- if (Options::logGC())
- dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
+ dataLogIf(Options::logGC(), (MonotonicTime::now() - before).milliseconds(), "ms]\n");
}
void Heap::releaseDelayedReleasedObjects()
@@ -1050,13 +1042,13 @@
void Heap::sweepSynchronously()
{
MonotonicTime before { };
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
dataLog("Full sweep: ", capacity() / 1024, "kb ");
before = MonotonicTime::now();
}
m_objectSpace.sweepBlocks();
m_objectSpace.shrink();
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
MonotonicTime after = MonotonicTime::now();
dataLog("=> ", capacity() / 1024, "kb, ", (after - before).milliseconds(), "ms");
}
@@ -1096,11 +1088,9 @@
bool alreadySweptInCollectSync = shouldSweepSynchronously();
if (!alreadySweptInCollectSync) {
- if (Options::logGC())
- dataLog("[GC<", RawPointer(this), ">: ");
+ dataLogIf(Options::logGC(), "[GC<", RawPointer(this), ">: ");
sweepSynchronously();
- if (Options::logGC())
- dataLog("]\n");
+ dataLogIf(Options::logGC(), "]\n");
}
m_objectSpace.assertNoUnswept();
@@ -1264,8 +1254,7 @@
m_currentRequest = m_requests.first();
}
- if (Options::logGC())
- dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
+ dataLogIf(Options::logGC(), "[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
m_beforeGC = MonotonicTime::now();
@@ -1273,7 +1262,7 @@
vm().random().setSeed(cryptographicallyRandomNumber());
if (m_collectionScope) {
- dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
+ dataLogLn("Collection scope already set during GC: ", *m_collectionScope);
RELEASE_ASSERT_NOT_REACHED();
}
@@ -1334,7 +1323,7 @@
m_constraintSet->didStartMarking();
m_scheduler->beginCollection();
- if (Options::logGC())
+ if (UNLIKELY(Options::logGC()))
m_scheduler->log();
// After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
@@ -1362,7 +1351,7 @@
SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
HashMap<const char*, size_t> visitMap;
forEachSlotVisitor(
[&] (SlotVisitor& slotVisitor) {
@@ -1406,8 +1395,7 @@
m_scheduler->didExecuteConstraints();
}
- if (Options::logGC())
- dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
+ dataLogIf(Options::logGC(), slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
{
ParallelModeEnabler enabler(slotVisitor);
@@ -1433,7 +1421,7 @@
m_scheduler->willResume();
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
}
@@ -1477,12 +1465,11 @@
NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
{
- if (Options::logGC())
- dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
+ dataLogIf(Options::logGC(), "[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
m_scheduler->didStop();
- if (Options::logGC())
+ if (UNLIKELY(Options::logGC()))
m_scheduler->log();
return changePhase(conn, CollectorPhase::Fixpoint);
@@ -1549,7 +1536,7 @@
m_objectSpace.dumpBits();
}
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
}
@@ -1562,8 +1549,7 @@
}
ParkingLot::unparkAll(&m_worldState);
- if (false)
- dataLog("GC END!\n");
+ dataLogLnIf(Options::logGC(), "GC END!");
setNeedFinalize();
@@ -2090,7 +2076,7 @@
void Heap::finalize()
{
MonotonicTime before;
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
before = MonotonicTime::now();
dataLog("[GC<", RawPointer(this), ">: finalize ");
}
@@ -2113,7 +2099,7 @@
if (shouldSweepSynchronously())
sweepSynchronously();
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
MonotonicTime after = MonotonicTime::now();
dataLog((after - before).milliseconds(), "ms]\n");
}
@@ -2177,22 +2163,15 @@
void Heap::willStartCollection()
{
- if (Options::logGC())
- dataLog("=> ");
+ dataLogIf(Options::logGC(), "=> ");
if (shouldDoFullCollection()) {
m_collectionScope = CollectionScope::Full;
m_shouldDoFullCollection = false;
- if (Options::logGC())
- dataLog("FullCollection, ");
- if (false)
- dataLog("Full collection!\n");
+ dataLogIf(Options::logGC(), "FullCollection, ");
} else {
m_collectionScope = CollectionScope::Eden;
- if (Options::logGC())
- dataLog("EdenCollection, ");
- if (false)
- dataLog("Eden collection!\n");
+ dataLogIf(Options::logGC(), "EdenCollection, ");
}
if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) {
m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
@@ -2351,8 +2330,7 @@
dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
m_bytesAllocatedThisCycle = 0;
- if (Options::logGC())
- dataLog("=> ", currentHeapSize / 1024, "kb, ");
+ dataLogIf(Options::logGC(), "=> ", currentHeapSize / 1024, "kb, ");
}
void Heap::didFinishCollection()
@@ -2891,7 +2869,7 @@
void Heap::notifyIsSafeToCollect()
{
MonotonicTime before;
- if (Options::logGC()) {
+ if (UNLIKELY(Options::logGC())) {
before = MonotonicTime::now();
dataLog("[GC<", RawPointer(this), ">: starting ");
}
@@ -2931,8 +2909,7 @@
});
}
- if (Options::logGC())
- dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
+ dataLogIf(Options::logGC(), (MonotonicTime::now() - before).milliseconds(), "ms]\n");
}
void Heap::preventCollection()
diff --git a/Source/JavaScriptCore/heap/MarkStackMergingConstraint.cpp b/Source/JavaScriptCore/heap/MarkStackMergingConstraint.cpp
index 01a1fda..05b546d 100644
--- a/Source/JavaScriptCore/heap/MarkStackMergingConstraint.cpp
+++ b/Source/JavaScriptCore/heap/MarkStackMergingConstraint.cpp
@@ -53,8 +53,7 @@
size_t size = m_heap.m_mutatorMarkStack->size() + m_heap.m_raceMarkStack->size();
visitor.addToVisitCount(size);
- if (Options::logGC())
- dataLog("(", size, ")");
+ dataLogIf(Options::logGC(), "(", size, ")");
}
void MarkStackMergingConstraint::executeImpl(SlotVisitor& visitor)
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
index 149ce8d..e80b04c 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.cpp
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -45,15 +45,14 @@
[] {
result = new Vector<size_t>();
- if (Options::dumpSizeClasses()) {
+ if (UNLIKELY(Options::dumpSizeClasses())) {
dataLog("Block size: ", MarkedBlock::blockSize, "\n");
dataLog("Footer size: ", sizeof(MarkedBlock::Footer), "\n");
}
auto add = [&] (size_t sizeClass) {
sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
- if (Options::dumpSizeClasses())
- dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), "Adding JSC MarkedSpace size class: ", sizeClass);
// Perform some validation as we go.
RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
if (result->isEmpty())
@@ -73,19 +72,14 @@
// the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
// way starting at just above the precise size classes to four cells per block.
- if (Options::dumpSizeClasses())
- dataLog(" Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), " Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload));
for (unsigned i = 0; ; ++i) {
double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
-
- if (Options::dumpSizeClasses())
- dataLog(" Next size class as a double: ", approximateSize, "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), " Next size class as a double: ", approximateSize);
size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
-
- if (Options::dumpSizeClasses())
- dataLog(" Next size class as bytes: ", approximateSizeInBytes, "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), " Next size class as bytes: ", approximateSizeInBytes);
// Make sure that the computer did the math correctly.
RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
@@ -95,25 +89,19 @@
size_t sizeClass =
WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
-
- if (Options::dumpSizeClasses())
- dataLog(" Size class: ", sizeClass, "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), " Size class: ", sizeClass);
// Optimize the size class so that there isn't any slop at the end of the block's
// payload.
unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
-
- if (Options::dumpSizeClasses())
- dataLog(" Possibly better size class: ", possiblyBetterSizeClass, "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), " Possibly better size class: ", possiblyBetterSizeClass);
// The size class we just came up with is better than the other one if it reduces
// total wastage assuming we only allocate cells of that size.
size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
-
- if (Options::dumpSizeClasses())
- dataLog(" Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), " Original wastage: ", originalWastage, ", new wastage: ", newWastage);
size_t betterSizeClass;
if (newWastage > originalWastage)
@@ -121,8 +109,7 @@
else
betterSizeClass = possiblyBetterSizeClass;
- if (Options::dumpSizeClasses())
- dataLog(" Choosing size class: ", betterSizeClass, "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), " Choosing size class: ", betterSizeClass);
if (betterSizeClass == result->last()) {
// Defense for when expStep is small.
@@ -149,8 +136,7 @@
result->shrinkCapacity(it - result->begin());
}
- if (Options::dumpSizeClasses())
- dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
+ dataLogLnIf(Options::dumpSizeClasses(), "JSC Heap MarkedSpace size class dump: ", listDump(*result));
// We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
// the size class table. This checks our results against that function's assumptions.
diff --git a/Source/JavaScriptCore/heap/MarkingConstraint.cpp b/Source/JavaScriptCore/heap/MarkingConstraint.cpp
index 3bab767..6f4dc17 100644
--- a/Source/JavaScriptCore/heap/MarkingConstraint.cpp
+++ b/Source/JavaScriptCore/heap/MarkingConstraint.cpp
@@ -72,8 +72,7 @@
void MarkingConstraint::prepareToExecute(const AbstractLocker& constraintSolvingLocker, SlotVisitor& visitor)
{
- if (Options::logGC())
- dataLog(abbreviatedName());
+ dataLogIf(Options::logGC(), abbreviatedName());
VisitCounter visitCounter(visitor);
prepareToExecuteImpl(constraintSolvingLocker, visitor);
m_lastVisitCount = visitCounter.visitCount();
diff --git a/Source/JavaScriptCore/heap/MarkingConstraintSet.cpp b/Source/JavaScriptCore/heap/MarkingConstraintSet.cpp
index 1780c08..fea8082 100644
--- a/Source/JavaScriptCore/heap/MarkingConstraintSet.cpp
+++ b/Source/JavaScriptCore/heap/MarkingConstraintSet.cpp
@@ -83,8 +83,7 @@
bool MarkingConstraintSet::executeConvergence(SlotVisitor& visitor)
{
bool result = executeConvergenceImpl(visitor);
- if (Options::logGC())
- dataLog(" ");
+ dataLogIf(Options::logGC(), " ");
return result;
}
@@ -104,8 +103,7 @@
unsigned iteration = m_iteration++;
- if (Options::logGC())
- dataLog("i#", iteration, ":");
+ dataLogIf(Options::logGC(), "i#", iteration, ":");
if (iteration == 1) {
// First iteration is before any visitor draining, so it's unlikely to trigger any constraints
@@ -174,8 +172,7 @@
{
for (auto& constraint : m_set)
constraint->execute(visitor);
- if (Options::logGC())
- dataLog(" ");
+ dataLogIf(Options::logGC(), " ");
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkingConstraintSolver.cpp b/Source/JavaScriptCore/heap/MarkingConstraintSolver.cpp
index cbbbd31..3735bc9 100644
--- a/Source/JavaScriptCore/heap/MarkingConstraintSolver.cpp
+++ b/Source/JavaScriptCore/heap/MarkingConstraintSolver.cpp
@@ -61,14 +61,12 @@
RELEASE_ASSERT(!m_numThreadsThatMayProduceWork);
if (Options::useParallelMarkingConstraintSolver()) {
- if (Options::logGC())
- dataLog(preference == ParallelWorkFirst ? "P" : "N", "<");
+ dataLogIf(Options::logGC(), preference == ParallelWorkFirst ? "P" : "N", "<");
m_heap.runFunctionInParallel(
[&] (SlotVisitor& visitor) { runExecutionThread(visitor, preference, pickNext); });
- if (Options::logGC())
- dataLog(">");
+ dataLogIf(Options::logGC(), ">");
} else
runExecutionThread(m_mainVisitor, preference, pickNext);
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp
index 1a7164e..f316fde 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.cpp
+++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp
@@ -292,7 +292,7 @@
{
ASSERT(m_heap.isMarked(cell));
#if CPU(X86_64)
- if (Options::dumpZappedCellCrashData()) {
+ if (UNLIKELY(Options::dumpZappedCellCrashData())) {
if (UNLIKELY(cell->isZapped()))
reportZappedCellAndCrash(cell);
}
@@ -397,7 +397,7 @@
// FIXME: This could be so much better.
// https://bugs.webkit.org/show_bug.cgi?id=162462
#if CPU(X86_64)
- if (Options::dumpZappedCellCrashData()) {
+ if (UNLIKELY(Options::dumpZappedCellCrashData())) {
Structure* structure = cell->structure(vm());
if (LIKELY(structure)) {
const MethodTable* methodTable = &structure->classInfo()->methodTable;
@@ -795,8 +795,7 @@
void SlotVisitor::didRace(const VisitRaceKey& race)
{
- if (Options::verboseVisitRace())
- dataLog(toCString("GC visit race: ", race, "\n"));
+ dataLogLnIf(Options::verboseVisitRace(), toCString("GC visit race: ", race));
auto locker = holdLock(heap()->m_raceMarkStackLock);
JSCell* cell = race.cell();
diff --git a/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp b/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp
index 158355a..0177d97 100644
--- a/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp
+++ b/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp
@@ -77,8 +77,7 @@
Options::concurrentGCMaxHeadroom() *
std::max<double>(m_bytesAllocatedThisCycleAtTheBeginning, m_heap.m_maxEdenSize);
- if (Options::logGC())
- dataLog("ca=", m_bytesAllocatedThisCycleAtTheBeginning / 1024, "kb h=", (m_bytesAllocatedThisCycleAtTheEnd - m_bytesAllocatedThisCycleAtTheBeginning) / 1024, "kb ");
+ dataLogIf(Options::logGC(), "ca=", m_bytesAllocatedThisCycleAtTheBeginning / 1024, "kb h=", (m_bytesAllocatedThisCycleAtTheEnd - m_bytesAllocatedThisCycleAtTheBeginning) / 1024, "kb ");
m_beforeConstraints = MonotonicTime::now();
}
@@ -110,8 +109,7 @@
constraintExecutionDuration * m_pauseScale,
m_minimumPause);
- if (Options::logGC())
- dataLog("tp=", m_targetPause.milliseconds(), "ms ");
+ dataLogIf(Options::logGC(), "tp=", m_targetPause.milliseconds(), "ms ");
m_plannedResumeTime = snapshot.now() + m_targetPause;
}
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index d771794..e4248f2 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -906,7 +906,7 @@
MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck);
- if (Options::dumpDisassembly()) {
+ if (UNLIKELY(Options::dumpDisassembly())) {
m_disassembler->dump(patchBuffer);
patchBuffer.didAlreadyDisassemble();
}
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index b7d0ca2..1610dce 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -47,7 +47,7 @@
{
auto scope = DECLARE_CATCH_SCOPE(vm);
CallFrame* topJSCallFrame = vm.topJSCallFrame();
- if (Options::breakOnThrow()) {
+ if (UNLIKELY(Options::breakOnThrow())) {
CodeBlock* codeBlock = topJSCallFrame->codeBlock();
dataLog("In call frame ", RawPointer(topJSCallFrame), " for code block ", codeBlock, "\n");
CRASH();
diff --git a/Source/JavaScriptCore/jit/JITOperations.cpp b/Source/JavaScriptCore/jit/JITOperations.cpp
index 082a783..dba1013 100644
--- a/Source/JavaScriptCore/jit/JITOperations.cpp
+++ b/Source/JavaScriptCore/jit/JITOperations.cpp
@@ -1539,8 +1539,7 @@
if (!codeBlock->checkIfOptimizationThresholdReached()) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("counter = ", codeBlock->jitExecuteCounter()));
codeBlock->updateAllPredictions();
- if (UNLIKELY(Options::verboseOSR()))
- dataLog("Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.\n");
+ dataLogLnIf(Options::verboseOSR(), "Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.");
return encodeResult(0, 0);
}
@@ -1554,8 +1553,7 @@
if (codeBlock->m_shouldAlwaysBeInlined) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("should always be inlined"));
updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
- if (UNLIKELY(Options::verboseOSR()))
- dataLog("Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.\n");
+ dataLogLnIf(Options::verboseOSR(), "Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.");
return encodeResult(0, 0);
}
@@ -1610,14 +1608,12 @@
if (!codeBlock->hasOptimizedReplacement()) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("compiled and failed"));
codeBlock->updateAllPredictions();
- if (UNLIKELY(Options::verboseOSR()))
- dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
+ dataLogLnIf(Options::verboseOSR(), "Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.");
return encodeResult(0, 0);
}
} else if (codeBlock->hasOptimizedReplacement()) {
CodeBlock* replacement = codeBlock->replacement();
- if (UNLIKELY(Options::verboseOSR()))
- dataLog("Considering OSR ", codeBlock, " -> ", replacement, ".\n");
+ dataLogLnIf(Options::verboseOSR(), "Considering OSR ", codeBlock, " -> ", replacement, ".");
// If we have an optimized replacement, then it must be the case that we entered
// cti_optimize from a loop. That's because if there's an optimized replacement,
// then all calls to this function will be relinked to the replacement and so
@@ -1633,27 +1629,22 @@
// additional checking anyway, to reduce the amount of recompilation thrashing.
if (replacement->shouldReoptimizeFromLoopNow()) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("should reoptimize from loop now"));
- if (UNLIKELY(Options::verboseOSR())) {
- dataLog(
- "Triggering reoptimization of ", codeBlock,
- "(", replacement, ") (in loop).\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Triggering reoptimization of ", codeBlock,
+ "(", replacement, ") (in loop).");
replacement->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTrigger, CountReoptimization);
return encodeResult(0, 0);
}
} else {
if (!codeBlock->shouldOptimizeNow()) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("insufficient profiling"));
- if (UNLIKELY(Options::verboseOSR())) {
- dataLog(
- "Delaying optimization for ", *codeBlock,
- " because of insufficient profiling.\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Delaying optimization for ", *codeBlock,
+ " because of insufficient profiling.");
return encodeResult(0, 0);
}
- if (UNLIKELY(Options::verboseOSR()))
- dataLog("Triggering optimized compilation of ", *codeBlock, "\n");
+ dataLogLnIf(Options::verboseOSR(), "Triggering optimized compilation of ", *codeBlock);
unsigned numVarsWithValues;
if (bytecodeIndex)
@@ -1685,10 +1676,7 @@
if (void* dataBuffer = DFG::prepareOSREntry(vm, callFrame, optimizedCodeBlock, bytecodeIndex)) {
CODEBLOCK_LOG_EVENT(optimizedCodeBlock, "osrEntry", ("at bc#", bytecodeIndex));
- if (UNLIKELY(Options::verboseOSR())) {
- dataLog(
- "Performing OSR ", codeBlock, " -> ", optimizedCodeBlock, ".\n");
- }
+ dataLogLnIf(Options::verboseOSR(), "Performing OSR ", codeBlock, " -> ", optimizedCodeBlock);
codeBlock->optimizeSoon();
codeBlock->unlinkedCodeBlock()->setDidOptimize(TrueTriState);
@@ -1697,12 +1685,10 @@
return encodeResult(targetPC, dataBuffer);
}
- if (UNLIKELY(Options::verboseOSR())) {
- dataLog(
- "Optimizing ", codeBlock, " -> ", codeBlock->replacement(),
- " succeeded, OSR failed, after a delay of ",
- codeBlock->optimizationDelayCounter(), ".\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Optimizing ", codeBlock, " -> ", codeBlock->replacement(),
+ " succeeded, OSR failed, after a delay of ",
+ codeBlock->optimizationDelayCounter());
// Count the OSR failure as a speculation failure. If this happens a lot, then
// reoptimize.
@@ -1718,11 +1704,9 @@
// reoptimization trigger.
if (optimizedCodeBlock->shouldReoptimizeNow()) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("should reoptimize now"));
- if (UNLIKELY(Options::verboseOSR())) {
- dataLog(
- "Triggering reoptimization of ", codeBlock, " -> ",
- codeBlock->replacement(), " (after OSR fail).\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ "Triggering reoptimization of ", codeBlock, " -> ",
+ codeBlock->replacement(), " (after OSR fail).");
optimizedCodeBlock->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTriggerOnOSREntryFail, CountReoptimization);
return encodeResult(0, 0);
}
diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
index 9020d35..201ec9b 100644
--- a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
@@ -47,8 +47,7 @@
ASSERT_UNUSED(profiledDFGCodeBlock, !profiledDFGCodeBlock);
ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
- if (Options::verboseOSR())
- dataLog("Optimizing compilation of ", *codeBlock, " did become ready.\n");
+ dataLogLnIf(Options::verboseOSR(), "Optimizing compilation of ", *codeBlock, " did become ready.");
codeBlock->alternative()->forceOptimizationSlowPathConcurrently();
}
@@ -59,8 +58,7 @@
ASSERT(!profiledDFGCodeBlock);
ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
- if (Options::verboseOSR())
- dataLog("Optimizing compilation of ", *codeBlock, " result: ", result, "\n");
+ dataLogLnIf(Options::verboseOSR(), "Optimizing compilation of ", *codeBlock, " result: ", result);
if (result == CompilationSuccessful)
codeBlock->ownerExecutable()->installCode(codeBlock);
diff --git a/Source/JavaScriptCore/jit/JITWorklist.cpp b/Source/JavaScriptCore/jit/JITWorklist.cpp
index c648d46..1980eea 100644
--- a/Source/JavaScriptCore/jit/JITWorklist.cpp
+++ b/Source/JavaScriptCore/jit/JITWorklist.cpp
@@ -57,14 +57,12 @@
switch (result) {
case CompilationFailed:
CODEBLOCK_LOG_EVENT(m_codeBlock, "delayJITCompile", ("compilation failed"));
- if (Options::verboseOSR())
- dataLogF(" JIT compilation failed.\n");
+ dataLogLnIf(Options::verboseOSR(), " JIT compilation failed.");
m_codeBlock->dontJITAnytimeSoon();
m_codeBlock->m_didFailJITCompilation = true;
return;
case CompilationSuccessful:
- if (Options::verboseOSR())
- dataLogF(" JIT compilation successful.\n");
+ dataLogLnIf(Options::verboseOSR(), " JIT compilation successful.");
m_codeBlock->ownerExecutable()->installCode(m_codeBlock);
m_codeBlock->jitSoon();
return;
diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
index cfa6f98..3fe82f8 100644
--- a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
@@ -45,9 +45,7 @@
void PolymorphicCallNode::unlink(VM& vm)
{
if (m_callLinkInfo) {
- if (Options::dumpDisassembly())
- dataLog("Unlinking polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n");
-
+ dataLogLnIf(Options::dumpDisassembly(), "Unlinking polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin());
m_callLinkInfo->unlink(vm);
}
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
index 5918883..d13d6d5 100644
--- a/Source/JavaScriptCore/jit/Repatch.cpp
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -1038,8 +1038,7 @@
void unlinkFor(VM& vm, CallLinkInfo& callLinkInfo)
{
- if (Options::dumpDisassembly())
- dataLog("Unlinking call at ", callLinkInfo.hotPathOther(), "\n");
+ dataLogLnIf(Options::dumpDisassembly(), "Unlinking call at ", callLinkInfo.hotPathOther());
revertCall(vm, callLinkInfo, vm.getCTIStub(linkCallThunkGenerator).retagged<JITStubRoutinePtrTag>());
}
@@ -1049,8 +1048,8 @@
CallFrame* callerFrame = callFrame->callerFrame();
CodeBlock* callerCodeBlock = callerFrame->codeBlock();
- if (shouldDumpDisassemblyFor(callerCodeBlock))
- dataLog("Linking virtual call at ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), "\n");
+ dataLogLnIf(shouldDumpDisassemblyFor(callerCodeBlock),
+ "Linking virtual call at ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()));
MacroAssemblerCodeRef<JITStubRoutinePtrTag> virtualThunk = virtualThunkFor(vm, callLinkInfo);
revertCall(vm, callLinkInfo, virtualThunk);
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index ac72abc..83867d1 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -378,8 +378,7 @@
if (!codeBlock->checkIfJITThresholdReached()) {
CODEBLOCK_LOG_EVENT(codeBlock, "delayJITCompile", ("threshold not reached, counter = ", codeBlock->llintExecuteCounter()));
- if (Options::verboseOSR())
- dataLogF(" JIT threshold should be lifted.\n");
+ dataLogLnIf(Options::verboseOSR(), " JIT threshold should be lifted.");
return false;
}
@@ -387,8 +386,7 @@
switch (codeBlock->jitType()) {
case JITType::BaselineJIT: {
- if (Options::verboseOSR())
- dataLogF(" Code was already compiled.\n");
+ dataLogLnIf(Options::verboseOSR(), " Code was already compiled.");
codeBlock->jitSoon();
return true;
}
@@ -405,11 +403,9 @@
static SlowPathReturnType entryOSR(CodeBlock* codeBlock, const char *name, EntryKind kind)
{
- if (Options::verboseOSR()) {
- dataLog(
- *codeBlock, ": Entered ", name, " with executeCounter = ",
- codeBlock->llintExecuteCounter(), "\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ *codeBlock, ": Entered ", name, " with executeCounter = ",
+ codeBlock->llintExecuteCounter());
if (!shouldJIT(codeBlock)) {
codeBlock->dontJITAnytimeSoon();
@@ -471,11 +467,9 @@
UNUSED_PARAM(globalObject);
#if ENABLE(JIT)
- if (Options::verboseOSR()) {
- dataLog(
+ dataLogLnIf(Options::verboseOSR(),
*codeBlock, ": Entered loop_osr with executeCounter = ",
- codeBlock->llintExecuteCounter(), "\n");
- }
+ codeBlock->llintExecuteCounter());
auto loopOSREntryBytecodeIndex = BytecodeIndex(codeBlock->bytecodeOffset(pc));
@@ -513,11 +507,9 @@
UNUSED_PARAM(globalObject);
#if ENABLE(JIT)
- if (Options::verboseOSR()) {
- dataLog(
- *codeBlock, ": Entered replace with executeCounter = ",
- codeBlock->llintExecuteCounter(), "\n");
- }
+ dataLogLnIf(Options::verboseOSR(),
+ *codeBlock, ": Entered replace with executeCounter = ",
+ codeBlock->llintExecuteCounter());
if (shouldJIT(codeBlock))
jitCompileAndSetHeuristics(vm, codeBlock);
diff --git a/Source/JavaScriptCore/parser/ModuleAnalyzer.cpp b/Source/JavaScriptCore/parser/ModuleAnalyzer.cpp
index 02855d3..0736999 100644
--- a/Source/JavaScriptCore/parser/ModuleAnalyzer.cpp
+++ b/Source/JavaScriptCore/parser/ModuleAnalyzer.cpp
@@ -140,7 +140,7 @@
for (const auto& pair : m_moduleRecord->lexicalVariables())
exportVariable(moduleProgramNode, pair.key, pair.value);
- if (Options::dumpModuleRecord())
+ if (UNLIKELY(Options::dumpModuleRecord()))
m_moduleRecord->dump();
return m_moduleRecord.get();
diff --git a/Source/JavaScriptCore/runtime/JSModuleLoader.cpp b/Source/JavaScriptCore/runtime/JSModuleLoader.cpp
index 64dd3e6..4a1a91f 100644
--- a/Source/JavaScriptCore/runtime/JSModuleLoader.cpp
+++ b/Source/JavaScriptCore/runtime/JSModuleLoader.cpp
@@ -252,8 +252,7 @@
JSInternalPromise* JSModuleLoader::importModule(JSGlobalObject* globalObject, JSString* moduleName, JSValue parameters, const SourceOrigin& referrer)
{
- if (Options::dumpModuleLoadingState())
- dataLog("Loader [import] ", printableModuleKey(globalObject, moduleName), "\n");
+ dataLogLnIf(Options::dumpModuleLoadingState(), "Loader [import] ", printableModuleKey(globalObject, moduleName));
VM& vm = globalObject->vm();
auto throwScope = DECLARE_THROW_SCOPE(vm);
@@ -279,8 +278,7 @@
Identifier JSModuleLoader::resolveSync(JSGlobalObject* globalObject, JSValue name, JSValue referrer, JSValue scriptFetcher)
{
- if (Options::dumpModuleLoadingState())
- dataLog("Loader [resolve] ", printableModuleKey(globalObject, name), "\n");
+ dataLogLnIf(Options::dumpModuleLoadingState(), "Loader [resolve] ", printableModuleKey(globalObject, name));
if (globalObject->globalObjectMethodTable()->moduleLoaderResolve)
return globalObject->globalObjectMethodTable()->moduleLoaderResolve(globalObject, this, name, referrer, scriptFetcher);
@@ -310,8 +308,7 @@
JSInternalPromise* JSModuleLoader::fetch(JSGlobalObject* globalObject, JSValue key, JSValue parameters, JSValue scriptFetcher)
{
- if (Options::dumpModuleLoadingState())
- dataLog("Loader [fetch] ", printableModuleKey(globalObject, key), "\n");
+ dataLogLnIf(Options::dumpModuleLoadingState(), "Loader [fetch] ", printableModuleKey(globalObject, key));
VM& vm = globalObject->vm();
auto throwScope = DECLARE_THROW_SCOPE(vm);
@@ -345,8 +342,7 @@
JSValue JSModuleLoader::evaluate(JSGlobalObject* globalObject, JSValue key, JSValue moduleRecordValue, JSValue scriptFetcher)
{
- if (Options::dumpModuleLoadingState())
- dataLog("Loader [evaluate] ", printableModuleKey(globalObject, key), "\n");
+ dataLogLnIf(Options::dumpModuleLoadingState(), "Loader [evaluate] ", printableModuleKey(globalObject, key));
if (globalObject->globalObjectMethodTable()->moduleLoaderEvaluate)
return globalObject->globalObjectMethodTable()->moduleLoaderEvaluate(globalObject, this, key, moduleRecordValue, scriptFetcher);
@@ -449,8 +445,7 @@
if (!moduleRecord)
return JSValue::encode(jsUndefined());
- if (Options::dumpModuleLoadingState())
- dataLog("Loader [link] ", moduleRecord->moduleKey(), "\n");
+ dataLogLnIf(Options::dumpModuleLoadingState(), "Loader [link] ", moduleRecord->moduleKey());
moduleRecord->link(globalObject, callFrame->argument(1));
RETURN_IF_EXCEPTION(scope, encodedJSValue());
diff --git a/Source/JavaScriptCore/runtime/ScriptExecutable.cpp b/Source/JavaScriptCore/runtime/ScriptExecutable.cpp
index ff84202..7543694 100644
--- a/Source/JavaScriptCore/runtime/ScriptExecutable.cpp
+++ b/Source/JavaScriptCore/runtime/ScriptExecutable.cpp
@@ -196,8 +196,7 @@
RELEASE_ASSERT(genericCodeBlock->ownerExecutable() == this);
RELEASE_ASSERT(JITCode::isExecutableScript(genericCodeBlock->jitType()));
- if (UNLIKELY(Options::verboseOSR()))
- dataLog("Installing ", *genericCodeBlock, "\n");
+ dataLogLnIf(Options::verboseOSR(), "Installing ", *genericCodeBlock);
if (UNLIKELY(vm.m_perBytecodeProfiler))
vm.m_perBytecodeProfiler->ensureBytecodesFor(genericCodeBlock);
diff --git a/Source/JavaScriptCore/runtime/VM.cpp b/Source/JavaScriptCore/runtime/VM.cpp
index a9fe74a..8803a71 100644
--- a/Source/JavaScriptCore/runtime/VM.cpp
+++ b/Source/JavaScriptCore/runtime/VM.cpp
@@ -938,7 +938,7 @@
if (!throwOriginFrame)
throwOriginFrame = globalObject->deprecatedCallFrameForDebugger();
- if (Options::breakOnThrow()) {
+ if (UNLIKELY(Options::breakOnThrow())) {
CodeBlock* codeBlock = throwOriginFrame ? throwOriginFrame->codeBlock() : nullptr;
dataLog("Throwing exception in call frame ", RawPointer(throwOriginFrame), " for code block ", codeBlock, "\n");
CRASH();
diff --git a/Source/JavaScriptCore/tools/CompilerTimingScope.cpp b/Source/JavaScriptCore/tools/CompilerTimingScope.cpp
index 96f0dae..524a85e 100644
--- a/Source/JavaScriptCore/tools/CompilerTimingScope.cpp
+++ b/Source/JavaScriptCore/tools/CompilerTimingScope.cpp
@@ -64,13 +64,13 @@
: m_compilerName(compilerName)
, m_name(name)
{
- if (Options::logPhaseTimes())
+ if (UNLIKELY(Options::logPhaseTimes()))
m_before = MonotonicTime::now();
}
CompilerTimingScope::~CompilerTimingScope()
{
- if (Options::logPhaseTimes()) {
+ if (UNLIKELY(Options::logPhaseTimes())) {
Seconds duration = MonotonicTime::now() - m_before;
dataLog(
"[", m_compilerName, "] ", m_name, " took: ", duration.milliseconds(), " ms ",
diff --git a/Source/JavaScriptCore/wasm/WasmMemory.cpp b/Source/JavaScriptCore/wasm/WasmMemory.cpp
index fdeab94..e407269 100644
--- a/Source/JavaScriptCore/wasm/WasmMemory.cpp
+++ b/Source/JavaScriptCore/wasm/WasmMemory.cpp
@@ -120,8 +120,7 @@
m_fastMemories.size() >= m_maxFastMemoryCount / 2 ? MemoryResult::SuccessAndNotifyMemoryPressure : MemoryResult::Success);
}();
- if (Options::logWebAssemblyMemory())
- dataLog("Allocated virtual: ", result, "; state: ", *this, "\n");
+ dataLogLnIf(Options::logWebAssemblyMemory(), "Allocated virtual: ", result, "; state: ", *this);
return result;
}
@@ -134,8 +133,7 @@
m_fastMemories.removeFirst(basePtr);
}
- if (Options::logWebAssemblyMemory())
- dataLog("Freed virtual; state: ", *this, "\n");
+ dataLogLnIf(Options::logWebAssemblyMemory(), "Freed virtual; state: ", *this);
}
bool isAddressInFastMemory(void* address)
@@ -172,8 +170,7 @@
return MemoryResult::Success;
}();
- if (Options::logWebAssemblyMemory())
- dataLog("Allocated physical: ", bytes, ", ", MemoryResult::toString(result), "; state: ", *this, "\n");
+ dataLogLnIf(Options::logWebAssemblyMemory(), "Allocated physical: ", bytes, ", ", MemoryResult::toString(result), "; state: ", *this);
return result;
}
@@ -185,8 +182,7 @@
m_physicalBytes -= bytes;
}
- if (Options::logWebAssemblyMemory())
- dataLog("Freed physical: ", bytes, "; state: ", *this, "\n");
+ dataLogLnIf(Options::logWebAssemblyMemory(), "Freed physical: ", bytes, "; state: ", *this);
}
void dump(PrintStream& out) const
diff --git a/Source/JavaScriptCore/wasm/js/JSWebAssembly.cpp b/Source/JavaScriptCore/wasm/js/JSWebAssembly.cpp
index 3649e57..d53ab26 100644
--- a/Source/JavaScriptCore/wasm/js/JSWebAssembly.cpp
+++ b/Source/JavaScriptCore/wasm/js/JSWebAssembly.cpp
@@ -190,7 +190,7 @@
promise->resolve(globalObject, instance);
else if (resolveKind == Resolve::WithModuleRecord) {
auto* moduleRecord = instance->moduleNamespaceObject()->moduleRecord();
- if (Options::dumpModuleRecord())
+ if (UNLIKELY(Options::dumpModuleRecord()))
moduleRecord->dump();
promise->resolve(globalObject, moduleRecord);
} else {
diff --git a/Source/JavaScriptCore/yarr/YarrJIT.cpp b/Source/JavaScriptCore/yarr/YarrJIT.cpp
index 412a811..d2f0e8e 100644
--- a/Source/JavaScriptCore/yarr/YarrJIT.cpp
+++ b/Source/JavaScriptCore/yarr/YarrJIT.cpp
@@ -4243,7 +4243,7 @@
YarrGenerator<IncludeSubpatterns>(vm, pattern, patternString, codeBlock, charSize).compile();
if (auto failureReason = codeBlock.failureReason()) {
- if (Options::dumpCompiledRegExpPatterns()) {
+ if (UNLIKELY(Options::dumpCompiledRegExpPatterns())) {
pattern.dumpPatternString(WTF::dataFile(), patternString);
dataLog(" : ");
dumpCompileFailure(*failureReason);
diff --git a/Source/JavaScriptCore/yarr/YarrPattern.cpp b/Source/JavaScriptCore/yarr/YarrPattern.cpp
index eb60dbe..c508208 100644
--- a/Source/JavaScriptCore/yarr/YarrPattern.cpp
+++ b/Source/JavaScriptCore/yarr/YarrPattern.cpp
@@ -1177,7 +1177,7 @@
return error;
}
- if (Options::dumpCompiledRegExpPatterns())
+ if (UNLIKELY(Options::dumpCompiledRegExpPatterns()))
dumpPattern(patternString);
return ErrorCode::NoError;
diff --git a/Source/WTF/ChangeLog b/Source/WTF/ChangeLog
index f36d657..e4183e4 100644
--- a/Source/WTF/ChangeLog
+++ b/Source/WTF/ChangeLog
@@ -1,3 +1,14 @@
+2020-01-16 Robin Morisset <rmorisset@apple.com>
+
+ Use dataLogIf more regularly
+ https://bugs.webkit.org/show_bug.cgi?id=206332
+
+ Reviewed by Keith Miller.
+
+ * wtf/DataLog.h:
+ (WTF::dataLog): Marked NEVER_INLINE, since it should never be perf-sensitive
+
+
2020-01-16 Sam Weinig <weinig@apple.com>
Platform.h is out of control Part 6: Split USE_* macro definitions out of Platform.h and into a new PlatformUse.h
diff --git a/Source/WTF/wtf/DataLog.h b/Source/WTF/wtf/DataLog.h
index 0b844b5..9e3c766 100644
--- a/Source/WTF/wtf/DataLog.h
+++ b/Source/WTF/wtf/DataLog.h
@@ -40,7 +40,7 @@
WTF_EXPORT_PRIVATE void dataLogFString(const char*);
template<typename... Types>
-void dataLog(const Types&... values)
+NEVER_INLINE void dataLog(const Types&... values)
{
dataFile().print(values...);
}