Get rid of ENABLE(VALUE_PROFILER). It's on all the time now.

Rubber stamped by Mark Hahnenberg.

Source/JavaScriptCore: 

* bytecode/CallLinkStatus.cpp:
(JSC::CallLinkStatus::computeFor):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpValueProfiling):
(JSC::CodeBlock::dumpArrayProfiling):
(JSC::CodeBlock::dumpRareCaseProfile):
(JSC::CodeBlock::dumpBytecode):
(JSC::CodeBlock::CodeBlock):
(JSC::CodeBlock::setNumParameters):
(JSC::CodeBlock::shrinkToFit):
(JSC::CodeBlock::shouldOptimizeNow):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::valueProfileForBytecodeOffset):
* bytecode/GetByIdStatus.cpp:
(JSC::GetByIdStatus::computeForChain):
(JSC::GetByIdStatus::computeFor):
* bytecode/LazyOperandValueProfile.cpp:
* bytecode/LazyOperandValueProfile.h:
* bytecode/PutByIdStatus.cpp:
(JSC::PutByIdStatus::computeFor):
* bytecode/ValueProfile.h:
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::newArrayProfile):
(JSC::BytecodeGenerator::newArrayAllocationProfile):
(JSC::BytecodeGenerator::emitProfiledOpcode):
* jit/GPRInfo.h:
* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
* jit/JIT.h:
* jit/JITArithmetic.cpp:
(JSC::JIT::compileBinaryArithOp):
(JSC::JIT::emit_op_mul):
(JSC::JIT::emit_op_div):
* jit/JITArithmetic32_64.cpp:
(JSC::JIT::emitBinaryDoubleOp):
(JSC::JIT::emit_op_mul):
(JSC::JIT::emitSlow_op_mul):
(JSC::JIT::emit_op_div):
* jit/JITCall.cpp:
(JSC::JIT::emitPutCallResult):
* jit/JITCall32_64.cpp:
(JSC::JIT::emitPutCallResult):
* jit/JITInlines.h:
(JSC::JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile):
(JSC::JIT::emitValueProfilingSite):
(JSC::JIT::emitArrayProfilingSiteForBytecodeIndex):
(JSC::JIT::emitArrayProfileStoreToHoleSpecialCase):
(JSC::JIT::emitArrayProfileOutOfBoundsSpecialCase):
(JSC::arrayProfileSaw):
(JSC::JIT::chooseArrayMode):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_from_scope):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_from_scope):
* llint/LLIntOfflineAsmConfig.h:
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:
* profiler/ProfilerBytecodeSequence.cpp:
(JSC::Profiler::BytecodeSequence::BytecodeSequence):
* runtime/CommonSlowPaths.cpp:

Source/WTF: 

* wtf/Platform.h:



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@161364 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index ccbca8a..e2ce2cc 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,5 +1,87 @@
 2014-01-06  Filip Pizlo  <fpizlo@apple.com>
 
+        Get rid of ENABLE(VALUE_PROFILER). It's on all the time now.
+
+        Rubber stamped by Mark Hahnenberg.
+
+        * bytecode/CallLinkStatus.cpp:
+        (JSC::CallLinkStatus::computeFor):
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::dumpValueProfiling):
+        (JSC::CodeBlock::dumpArrayProfiling):
+        (JSC::CodeBlock::dumpRareCaseProfile):
+        (JSC::CodeBlock::dumpBytecode):
+        (JSC::CodeBlock::CodeBlock):
+        (JSC::CodeBlock::setNumParameters):
+        (JSC::CodeBlock::shrinkToFit):
+        (JSC::CodeBlock::shouldOptimizeNow):
+        * bytecode/CodeBlock.h:
+        (JSC::CodeBlock::valueProfileForBytecodeOffset):
+        * bytecode/GetByIdStatus.cpp:
+        (JSC::GetByIdStatus::computeForChain):
+        (JSC::GetByIdStatus::computeFor):
+        * bytecode/LazyOperandValueProfile.cpp:
+        * bytecode/LazyOperandValueProfile.h:
+        * bytecode/PutByIdStatus.cpp:
+        (JSC::PutByIdStatus::computeFor):
+        * bytecode/ValueProfile.h:
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::newArrayProfile):
+        (JSC::BytecodeGenerator::newArrayAllocationProfile):
+        (JSC::BytecodeGenerator::emitProfiledOpcode):
+        * jit/GPRInfo.h:
+        * jit/JIT.cpp:
+        (JSC::JIT::JIT):
+        (JSC::JIT::privateCompileSlowCases):
+        (JSC::JIT::privateCompile):
+        * jit/JIT.h:
+        * jit/JITArithmetic.cpp:
+        (JSC::JIT::compileBinaryArithOp):
+        (JSC::JIT::emit_op_mul):
+        (JSC::JIT::emit_op_div):
+        * jit/JITArithmetic32_64.cpp:
+        (JSC::JIT::emitBinaryDoubleOp):
+        (JSC::JIT::emit_op_mul):
+        (JSC::JIT::emitSlow_op_mul):
+        (JSC::JIT::emit_op_div):
+        * jit/JITCall.cpp:
+        (JSC::JIT::emitPutCallResult):
+        * jit/JITCall32_64.cpp:
+        (JSC::JIT::emitPutCallResult):
+        * jit/JITInlines.h:
+        (JSC::JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile):
+        (JSC::JIT::emitValueProfilingSite):
+        (JSC::JIT::emitArrayProfilingSiteForBytecodeIndex):
+        (JSC::JIT::emitArrayProfileStoreToHoleSpecialCase):
+        (JSC::JIT::emitArrayProfileOutOfBoundsSpecialCase):
+        (JSC::arrayProfileSaw):
+        (JSC::JIT::chooseArrayMode):
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::emit_op_get_argument_by_val):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::emit_op_get_argument_by_val):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emit_op_get_by_val):
+        (JSC::JIT::emitSlow_op_get_by_val):
+        (JSC::JIT::emit_op_get_by_id):
+        (JSC::JIT::emit_op_get_from_scope):
+        * jit/JITPropertyAccess32_64.cpp:
+        (JSC::JIT::emit_op_get_by_val):
+        (JSC::JIT::emitSlow_op_get_by_val):
+        (JSC::JIT::emit_op_get_by_id):
+        (JSC::JIT::emit_op_get_from_scope):
+        * llint/LLIntOfflineAsmConfig.h:
+        * llint/LLIntSlowPaths.cpp:
+        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
+        * llint/LowLevelInterpreter.asm:
+        * llint/LowLevelInterpreter32_64.asm:
+        * llint/LowLevelInterpreter64.asm:
+        * profiler/ProfilerBytecodeSequence.cpp:
+        (JSC::Profiler::BytecodeSequence::BytecodeSequence):
+        * runtime/CommonSlowPaths.cpp:
+
+2014-01-06  Filip Pizlo  <fpizlo@apple.com>
+
         LLInt shouldn't check for ENABLE(JIT).
 
         Rubber stamped by Mark Hahnenberg.
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
index 445310b..b64c967 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
@@ -101,7 +101,7 @@
     
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
+#if ENABLE(JIT)
     if (!profiledBlock->hasBaselineJITProfiling())
         return computeFromLLInt(profiledBlock, bytecodeIndex);
     
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index 08f3838..0a6050f1 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -622,16 +622,11 @@
     ConcurrentJITLocker locker(m_lock);
     
     ++it;
-#if ENABLE(VALUE_PROFILER)
     CString description = it->u.profile->briefDescription(locker);
     if (!description.length())
         return;
     beginDumpProfiling(out, hasPrintedProfiling);
     out.print(description);
-#else
-    UNUSED_PARAM(out);
-    UNUSED_PARAM(hasPrintedProfiling);
-#endif
 }
 
 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
@@ -639,7 +634,6 @@
     ConcurrentJITLocker locker(m_lock);
     
     ++it;
-#if ENABLE(VALUE_PROFILER)
     if (!it->u.arrayProfile)
         return;
     CString description = it->u.arrayProfile->briefDescription(locker, this);
@@ -647,13 +641,8 @@
         return;
     beginDumpProfiling(out, hasPrintedProfiling);
     out.print(description);
-#else
-    UNUSED_PARAM(out);
-    UNUSED_PARAM(hasPrintedProfiling);
-#endif
 }
 
-#if ENABLE(VALUE_PROFILER)
 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
 {
     if (!profile || !profile->m_counter)
@@ -662,7 +651,6 @@
     beginDumpProfiling(out, hasPrintedProfiling);
     out.print(name, profile->m_counter);
 }
-#endif
 
 void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, const StubInfoMap& map)
 {
@@ -1422,10 +1410,8 @@
 #endif
     }
 
-#if ENABLE(VALUE_PROFILER)
     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
     dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
-#endif
     
 #if ENABLE(DFG_JIT)
     Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
@@ -1799,12 +1785,10 @@
         }
 
         case op_get_from_scope: {
-#if ENABLE(VALUE_PROFILER)
             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
             ASSERT(profile->m_bytecodeOffset == -1);
             profile->m_bytecodeOffset = i;
             instructions[i + opLength - 1] = profile;
-#endif
 
             // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
             const Identifier& ident = identifier(pc[i + 3].u.operand);
@@ -1924,9 +1908,7 @@
 {
     m_numParameters = newValue;
 
-#if ENABLE(VALUE_PROFILER)
     m_argumentValueProfiles.resizeToFit(newValue);
-#endif
 }
 
 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
@@ -2596,10 +2578,8 @@
 #if ENABLE(JIT)
     m_callLinkInfos.shrinkToFit();
 #endif
-#if ENABLE(VALUE_PROFILER)
     m_rareCaseProfiles.shrinkToFit();
     m_specialFastCaseProfiles.shrinkToFit();
-#endif
     
     if (shrinkMode == EarlyShrink) {
         m_additionalIdentifiers.shrinkToFit();
@@ -3181,7 +3161,6 @@
 }
 #endif
 
-#if ENABLE(VALUE_PROFILER)
 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
 {
     for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
@@ -3254,10 +3233,6 @@
     if (Options::verboseOSR())
         dataLog("Considering optimizing ", *this, "...\n");
 
-#if ENABLE(VERBOSE_VALUE_PROFILE)
-    dumpValueProfiles();
-#endif
-
     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
         return true;
     
@@ -3286,7 +3261,6 @@
     optimizeAfterWarmUp();
     return false;
 }
-#endif
 
 #if ENABLE(DFG_JIT)
 void CodeBlock::tallyFrequentExitSites()
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 7ddf040..2ff11a9 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -277,12 +277,12 @@
         return result;
     }
 
-#if ENABLE(JIT)
     bool hasBaselineJITProfiling() const
     {
         return jitType() == JITCode::BaselineJIT;
     }
     
+#if ENABLE(JIT)
     virtual CodeBlock* replacement() = 0;
 
     virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
@@ -410,7 +410,6 @@
     CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
 #endif
 
-#if ENABLE(VALUE_PROFILER)
     unsigned numberOfArgumentValueProfiles()
     {
         ASSERT(m_numParameters >= 0);
@@ -429,13 +428,12 @@
     ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
     {
         ValueProfile* result = binarySearch<ValueProfile, int>(
-                                                               m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
-                                                               getValueProfileBytecodeOffset<ValueProfile>);
+            m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
+            getValueProfileBytecodeOffset<ValueProfile>);
         ASSERT(result->m_bytecodeOffset != -1);
         ASSERT(instructions()[bytecodeOffset + opcodeLength(
-                                                            m_vm->interpreter->getOpcodeID(
-                                                                                           instructions()[
-                                                                                                          bytecodeOffset].u.opcode)) - 1].u.profile == result);
+            m_vm->interpreter->getOpcodeID(
+                instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
         return result;
     }
     SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
@@ -543,7 +541,6 @@
     }
     ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
     ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
-#endif
 
     // Exception handling support
 
@@ -884,22 +881,11 @@
     unsigned numberOfDFGCompiles() { return 0; }
 #endif
 
-#if ENABLE(VALUE_PROFILER)
     bool shouldOptimizeNow();
     void updateAllValueProfilePredictions();
     void updateAllArrayPredictions();
     void updateAllPredictions();
-#else
-    bool updateAllPredictionsAndCheckIfShouldOptimizeNow() { return false; }
-    void updateAllValueProfilePredictions() { }
-    void updateAllArrayPredictions() { }
-    void updateAllPredictions() { }
-#endif
 
-#if ENABLE(VERBOSE_VALUE_PROFILE)
-    void dumpValueProfiles();
-#endif
-    
     unsigned frameRegisterCount();
 
     // FIXME: Make these remaining members private.
@@ -959,9 +945,7 @@
     ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
 #endif
         
-#if ENABLE(VALUE_PROFILER)
     void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
-#endif
 
     void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
     {
@@ -996,9 +980,7 @@
     void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
     void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
     void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
-#if ENABLE(VALUE_PROFILER)
     void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
-#endif
         
 #if ENABLE(DFG_JIT)
     bool shouldImmediatelyAssumeLivenessDuringScan()
@@ -1080,14 +1062,12 @@
     DFG::ExitProfile m_exitProfile;
     CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
 #endif
-#if ENABLE(VALUE_PROFILER)
     Vector<ValueProfile> m_argumentValueProfiles;
     Vector<ValueProfile> m_valueProfiles;
     SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
     SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
     Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
     ArrayProfileVector m_arrayProfiles;
-#endif
     Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
 
     // Constant Pool
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index ada7dda..fbb3da1 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -69,7 +69,7 @@
 
 void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid)
 {
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
+#if ENABLE(JIT)
     // Validate the chain. If the chain is invalid, then currently the best thing
     // we can do is to assume that TakesSlow is true. In the future, it might be
     // worth exploring reifying the structure chain from the structure we've got
@@ -123,7 +123,7 @@
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
     UNUSED_PARAM(uid);
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
+#if ENABLE(JIT)
     StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
     if (!stubInfo || !stubInfo->seen)
         return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
index 4e90647..a8ad779 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
@@ -26,8 +26,6 @@
 #include "config.h"
 #include "LazyOperandValueProfile.h"
 
-#if ENABLE(VALUE_PROFILER)
-
 #include "Operations.h"
 
 namespace JSC {
@@ -100,5 +98,3 @@
 
 } // namespace JSC
 
-#endif // ENABLE(VALUE_PROFILER)
-
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
index 483e5b5..95ef941 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
@@ -26,10 +26,6 @@
 #ifndef LazyOperandValueProfile_h
 #define LazyOperandValueProfile_h
 
-#include <wtf/Platform.h>
-
-#if ENABLE(VALUE_PROFILER)
-
 #include "ConcurrentJITLock.h"
 #include "ValueProfile.h"
 #include "VirtualRegister.h"
@@ -188,8 +184,6 @@
 
 } // namespace JSC
 
-#endif // ENABLE(VALUE_PROFILER)
-
 #endif // LazyOperandValueProfile_h
 
 
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
index 1b38fbb..17cf708 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
@@ -88,7 +88,7 @@
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
     UNUSED_PARAM(uid);
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
+#if ENABLE(JIT)
     if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
         return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
     
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h
index 0db9166..0790f79 100644
--- a/Source/JavaScriptCore/bytecode/ValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.h
@@ -29,10 +29,6 @@
 #ifndef ValueProfile_h
 #define ValueProfile_h
 
-#include <wtf/Platform.h>
-
-#if ENABLE(VALUE_PROFILER)
-
 #include "ConcurrentJITLock.h"
 #include "Heap.h"
 #include "JSArray.h"
@@ -212,7 +208,5 @@
 
 } // namespace JSC
 
-#endif // ENABLE(VALUE_PROFILER)
-
 #endif // ValueProfile_h
 
diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
index 891b5de..68a55f1 100644
--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -636,20 +636,12 @@
 
 UnlinkedArrayProfile BytecodeGenerator::newArrayProfile()
 {
-#if ENABLE(VALUE_PROFILER)
     return m_codeBlock->addArrayProfile();
-#else
-    return 0;
-#endif
 }
 
 UnlinkedArrayAllocationProfile BytecodeGenerator::newArrayAllocationProfile()
 {
-#if ENABLE(VALUE_PROFILER)
     return m_codeBlock->addArrayAllocationProfile();
-#else
-    return 0;
-#endif
 }
 
 UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()
@@ -659,11 +651,7 @@
 
 UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
 {
-#if ENABLE(VALUE_PROFILER)
     UnlinkedValueProfile result = m_codeBlock->addValueProfile();
-#else
-    UnlinkedValueProfile result = 0;
-#endif
     emitOpcode(opcodeID);
     return result;
 }
diff --git a/Source/JavaScriptCore/jit/GPRInfo.h b/Source/JavaScriptCore/jit/GPRInfo.h
index a952c46..393a56b 100644
--- a/Source/JavaScriptCore/jit/GPRInfo.h
+++ b/Source/JavaScriptCore/jit/GPRInfo.h
@@ -574,10 +574,6 @@
     static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1
     static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2;
 
-#if ENABLE(VALUE_PROFILER)
-    static const GPRReg bucketCounterRegister = ARM64Registers::x7;
-#endif
-
     // GPRReg mapping is direct, the machine regsiter numbers can
     // be used directly as indices into the GPR RegisterBank.
     COMPILE_ASSERT(ARM64Registers::q0 == 0, q0_is_0);
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index e01e26b..c3508b0 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -81,10 +81,8 @@
     , m_byValInstructionIndex(UINT_MAX)
     , m_callLinkInfoIndex(UINT_MAX)
     , m_randomGenerator(cryptographicallyRandomNumber())
-#if ENABLE(VALUE_PROFILER)
     , m_canBeOptimized(false)
     , m_shouldEmitProfiling(false)
-#endif
 {
 }
 
@@ -329,7 +327,6 @@
     m_byValInstructionIndex = 0;
     m_callLinkInfoIndex = 0;
     
-#if ENABLE(VALUE_PROFILER)
     // Use this to assert that slow-path code associates new profiling sites with existing
     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
@@ -337,7 +334,6 @@
     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
-#endif
 
     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
         m_bytecodeOffset = iter->to;
@@ -346,11 +342,9 @@
 
         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
         
-#if ENABLE(VALUE_PROFILER)
         RareCaseProfile* rareCaseProfile = 0;
         if (shouldEmitProfiling())
             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
-#endif
 
 #if ENABLE(JIT_VERBOSE)
         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
@@ -431,10 +425,8 @@
         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
         
-#if ENABLE(VALUE_PROFILER)
         if (shouldEmitProfiling())
             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
-#endif
 
         emitJumpSlowToHot(jump(), 0);
     }
@@ -442,9 +434,7 @@
     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
-#if ENABLE(VALUE_PROFILER)
     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
-#endif
 
 #ifndef NDEBUG
     // Reset this, in order to guard its use with ASSERTs.
@@ -454,7 +444,6 @@
 
 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
 {
-#if ENABLE(VALUE_PROFILER)
     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
     switch (level) {
     case DFG::CannotCompile:
@@ -489,7 +478,6 @@
         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
         break;
     }
-#endif
     
     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
@@ -521,7 +509,6 @@
 
     Jump stackCheck;
     if (m_codeBlock->codeType() == FunctionCode) {
-#if ENABLE(VALUE_PROFILER)
         ASSERT(m_bytecodeOffset == (unsigned)-1);
         if (shouldEmitProfiling()) {
             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
@@ -536,10 +523,9 @@
                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
 #endif
-                emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), regT4);
+                emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
             }
         }
-#endif
 
         addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1);
         stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1);
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 3cd79bb..2980757 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -331,16 +331,11 @@
         template<typename StructureType> // StructureType can be RegisterID or ImmPtr.
         void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch);
         
-#if ENABLE(VALUE_PROFILER)
         // This assumes that the value to profile is in regT0 and that regT3 is available for
         // scratch.
-        void emitValueProfilingSite(ValueProfile*, RegisterID);
-        void emitValueProfilingSite(unsigned bytecodeOffset, RegisterID);
-        void emitValueProfilingSite(RegisterID);
-#else
-        void emitValueProfilingSite(unsigned, RegisterID) { }
-        void emitValueProfilingSite(RegisterID) { }
-#endif
+        void emitValueProfilingSite(ValueProfile*);
+        void emitValueProfilingSite(unsigned bytecodeOffset);
+        void emitValueProfilingSite();
         void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
         void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
         void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
@@ -827,11 +822,9 @@
         WeakRandom m_randomGenerator;
         static CodeRef stringGetByValStubGenerator(VM*);
 
-#if ENABLE(VALUE_PROFILER)
         bool m_canBeOptimized;
         bool m_canBeOptimizedOrInlined;
         bool m_shouldEmitProfiling;
-#endif
     } JIT_CLASS_ALIGNMENT;
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index 438ee4f..b9c7057 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -668,16 +668,13 @@
     emitGetVirtualRegisters(op1, regT0, op2, regT1);
     emitJumpSlowCaseIfNotImmediateInteger(regT0);
     emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if ENABLE(VALUE_PROFILER)
     RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
     if (opcodeID == op_add)
         addSlowCase(branchAdd32(Overflow, regT1, regT0));
     else if (opcodeID == op_sub)
         addSlowCase(branchSub32(Overflow, regT1, regT0));
     else {
         ASSERT(opcodeID == op_mul);
-#if ENABLE(VALUE_PROFILER)
         if (shouldEmitProfiling()) {
             // We want to be able to measure if this is taking the slow case just
             // because of negative zero. If this produces positive zero, then we
@@ -701,10 +698,6 @@
             addSlowCase(branchMul32(Overflow, regT1, regT0));
             addSlowCase(branchTest32(Zero, regT0));
         }
-#else
-        addSlowCase(branchMul32(Overflow, regT1, regT0));
-        addSlowCase(branchTest32(Zero, regT0));
-#endif
     }
     emitFastArithIntToImmNoCheck(regT0, regT0);
 }
@@ -849,19 +842,15 @@
     // For now, only plant a fast int case if the constant operand is greater than zero.
     int32_t value;
     if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
-#if ENABLE(VALUE_PROFILER)
         // Add a special fast case profile because the DFG JIT will expect one.
         m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
         emitGetVirtualRegister(op2, regT0);
         emitJumpSlowCaseIfNotImmediateInteger(regT0);
         addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
         emitFastArithReTagImmediate(regT1, regT0);
     } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
-#if ENABLE(VALUE_PROFILER)
         // Add a special fast case profile because the DFG JIT will expect one.
         m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
         emitGetVirtualRegister(op1, regT0);
         emitJumpSlowCaseIfNotImmediateInteger(regT0);
         addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
@@ -930,7 +919,6 @@
     }
     divDouble(fpRegT1, fpRegT0);
     
-#if ENABLE(VALUE_PROFILER)
     // Is the result actually an integer? The DFG JIT would really like to know. If it's
     // not an integer, we increment a count. If this together with the slow case counter
     // are below threshold then the DFG JIT will compile this division with a specualtion
@@ -957,11 +945,6 @@
     move(tagTypeNumberRegister, regT0);
     trueDouble.link(this);
     isInteger.link(this);
-#else
-    // Double result.
-    moveDoubleTo64(fpRegT0, regT0);
-    sub64(tagTypeNumberRegister, regT0);
-#endif
 
     emitPutVirtualRegister(dst, regT0);
 }
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 6c62976d..53ac738 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -742,7 +742,6 @@
                 emitLoadDouble(op1, fpRegT1);
                 divDouble(fpRegT0, fpRegT1);
 
-#if ENABLE(VALUE_PROFILER)
                 // Is the result actually an integer? The DFG JIT would really like to know. If it's
                 // not an integer, we increment a count. If this together with the slow case counter
                 // are below threshold then the DFG JIT will compile this division with a specualtion
@@ -766,9 +765,6 @@
                 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
                 emitStoreDouble(dst, fpRegT1);
                 isInteger.link(this);
-#else
-                emitStoreDouble(dst, fpRegT1);
-#endif
                 break;
             }
             case op_jless:
@@ -846,7 +842,6 @@
             case op_div: {
                 emitLoadDouble(op2, fpRegT2);
                 divDouble(fpRegT2, fpRegT0);
-#if ENABLE(VALUE_PROFILER)
                 // Is the result actually an integer? The DFG JIT would really like to know. If it's
                 // not an integer, we increment a count. If this together with the slow case counter
                 // are below threshold then the DFG JIT will compile this division with a specualtion
@@ -870,9 +865,6 @@
                 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
                 emitStoreDouble(dst, fpRegT0);
                 isInteger.link(this);
-#else
-                emitStoreDouble(dst, fpRegT0);
-#endif
                 break;
             }
             case op_jless:
@@ -924,9 +916,7 @@
     int op2 = currentInstruction[3].u.operand;
     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
 
-#if ENABLE(VALUE_PROFILER)
     m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
 
     JumpList notInt32Op1;
     JumpList notInt32Op2;
@@ -969,12 +959,10 @@
     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
 
     negZero.link(this);
-#if ENABLE(VALUE_PROFILER)
     // We only get here if we have a genuine negative zero. Record this,
     // so that the speculative JIT knows that we failed speculation
     // because of a negative zero.
     add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
-#endif
     overflow.link(this);
 
     if (!supportsFloatingPoint()) {
@@ -1005,9 +993,7 @@
     int op2 = currentInstruction[3].u.operand;
     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
 
-#if ENABLE(VALUE_PROFILER)
     m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
 
     if (!supportsFloatingPoint()) {
         addSlowCase(jump());
@@ -1028,7 +1014,6 @@
     convertInt32ToDouble(regT0, fpRegT0);
     convertInt32ToDouble(regT2, fpRegT1);
     divDouble(fpRegT1, fpRegT0);
-#if ENABLE(VALUE_PROFILER)
     // Is the result actually an integer? The DFG JIT would really like to know. If it's
     // not an integer, we increment a count. If this together with the slow case counter
     // are below threshold then the DFG JIT will compile this division with a specualtion
@@ -1051,9 +1036,6 @@
     notInteger.link(this);
     add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
     emitStoreDouble(dst, fpRegT0);
-#else
-    emitStoreDouble(dst, fpRegT0);
-#endif
     end.append(jump());
 
     // Double divide.
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index deb23e7..90c2e4f 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -48,7 +48,7 @@
 void JIT::emitPutCallResult(Instruction* instruction)
 {
     int dst = instruction[1].u.operand;
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitPutVirtualRegister(dst);
 }
 
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index 194cf81..6086038 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -47,7 +47,7 @@
 void JIT::emitPutCallResult(Instruction* instruction)
 {
     int dst = instruction[1].u.operand;
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitStore(dst, regT1, regT0);
 }
 
diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h
index f6a32813..9330e77 100644
--- a/Source/JavaScriptCore/jit/JITInlines.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -142,7 +142,7 @@
 ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr& function, int dst)
 {
     MacroAssembler::Call call = appendCallWithExceptionCheck(function);
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
 #if USE(JSVALUE64)
     emitPutVirtualRegister(dst, returnValueGPR);
 #else
@@ -680,8 +680,7 @@
     storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset()));
 }
 
-#if ENABLE(VALUE_PROFILER)
-inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile, RegisterID bucketCounterRegister)
+inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
 {
     ASSERT(shouldEmitProfiling());
     ASSERT(valueProfile);
@@ -690,47 +689,29 @@
 #if USE(JSVALUE32_64)
     const RegisterID valueTag = regT1;
 #endif
-    const RegisterID scratch = regT3;
     
-    if (ValueProfile::numberOfBuckets == 1) {
-        // We're in a simple configuration: only one bucket, so we can just do a direct
-        // store.
+    // We're in a simple configuration: only one bucket, so we can just do a direct
+    // store.
 #if USE(JSVALUE64)
-        store64(value, valueProfile->m_buckets);
+    store64(value, valueProfile->m_buckets);
 #else
-        EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
-        store32(value, &descriptor->asBits.payload);
-        store32(valueTag, &descriptor->asBits.tag);
-#endif
-        return;
-    }
-    
-    if (m_randomGenerator.getUint32() & 1)
-        add32(TrustedImm32(1), bucketCounterRegister);
-    else
-        add32(TrustedImm32(3), bucketCounterRegister);
-    and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
-    move(TrustedImmPtr(valueProfile->m_buckets), scratch);
-#if USE(JSVALUE64)
-    store64(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
-#elif USE(JSVALUE32_64)
-    store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
-    store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+    EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
+    store32(value, &descriptor->asBits.payload);
+    store32(valueTag, &descriptor->asBits.tag);
 #endif
 }
 
-inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset, RegisterID bucketCounterRegister)
+inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset)
 {
     if (!shouldEmitProfiling())
         return;
-    emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset), bucketCounterRegister);
+    emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset));
 }
 
-inline void JIT::emitValueProfilingSite(RegisterID bucketCounterRegister)
+inline void JIT::emitValueProfilingSite()
 {
-    emitValueProfilingSite(m_bytecodeOffset, bucketCounterRegister);
+    emitValueProfilingSite(m_bytecodeOffset);
 }
-#endif // ENABLE(VALUE_PROFILER)
 
 inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
 {
@@ -747,46 +728,26 @@
 
 inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
 {
-#if ENABLE(VALUE_PROFILER)
     emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
-#else
-    UNUSED_PARAM(bytecodeIndex);
-    emitArrayProfilingSite(structureAndIndexingType, scratch, 0);
-#endif
 }
 
 inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
 {
-#if ENABLE(VALUE_PROFILER)    
     store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole());
-#else
-    UNUSED_PARAM(arrayProfile);
-#endif
 }
 
 inline void JIT::emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile* arrayProfile)
 {
-#if ENABLE(VALUE_PROFILER)    
     store8(TrustedImm32(1), arrayProfile->addressOfOutOfBounds());
-#else
-    UNUSED_PARAM(arrayProfile);
-#endif
 }
 
 static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capability)
 {
-#if ENABLE(VALUE_PROFILER)
     return arrayModesInclude(arrayModes, capability);
-#else
-    UNUSED_PARAM(arrayModes);
-    UNUSED_PARAM(capability);
-    return false;
-#endif
 }
 
 inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
 {
-#if ENABLE(VALUE_PROFILER)
     ConcurrentJITLocker locker(m_codeBlock->m_lock);
     profile->computeUpdatedPrediction(locker, m_codeBlock);
     ArrayModes arrayModes = profile->observedArrayModes(locker);
@@ -797,10 +758,6 @@
     if (arrayProfileSaw(arrayModes, ArrayStorageShape))
         return JITArrayStorage;
     return JITContiguous;
-#else
-    UNUSED_PARAM(profile);
-    return JITContiguous;
-#endif
 }
 
 #if USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index be6cba8..394d81a 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -1071,7 +1071,7 @@
 
     signExtend32ToPtr(regT1, regT1);
     load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitPutVirtualRegister(dst, regT0);
 }
 
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 4d2a46a5..fd0c883 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -1170,7 +1170,7 @@
     
     loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
     loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT1);
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitStore(dst, regT1, regT0);
 }
 
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 7231174..4241baf 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -143,7 +143,7 @@
     resultOK.link(this);
 #endif
 
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitPutVirtualRegister(dst);
     
     m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
@@ -232,7 +232,7 @@
     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
     m_byValInstructionIndex++;
 
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
 }
 
 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
@@ -525,7 +525,7 @@
     addSlowCase(gen.slowPathJump());
     m_getByIds.append(gen);
 
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitPutVirtualRegister(resultVReg);
 }
 
@@ -743,7 +743,7 @@
         break;
     }
     emitPutVirtualRegister(dst);
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
 }
 
 void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index c724cde..5bc8d1a 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -168,7 +168,7 @@
     resultOK.link(this);
 #endif
 
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitStore(dst, regT1, regT0);
     
     m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
@@ -263,7 +263,7 @@
     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
     m_byValInstructionIndex++;
 
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
 }
 
 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
@@ -487,7 +487,7 @@
     addSlowCase(gen.slowPathJump());
     m_getByIds.append(gen);
 
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitStore(dst, regT1, regT0);
 }
 
@@ -783,7 +783,7 @@
         addSlowCase(jump());
         break;
     }
-    emitValueProfilingSite(regT4);
+    emitValueProfilingSite();
     emitStore(dst, regT1, regT0);
 }
 
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
index a4a9788..423069d 100644
--- a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -148,10 +148,4 @@
 #define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 0
 #endif
 
-#if ENABLE(VALUE_PROFILER)
-#define OFFLINE_ASM_VALUE_PROFILER 1
-#else
-#define OFFLINE_ASM_VALUE_PROFILER 0
-#endif
-
 #endif // LLIntOfflineAsmConfig_h
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index 4274ef9..4c7f489 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -119,7 +119,6 @@
         LLINT_END_IMPL();                       \
     } while (false)
 
-#if ENABLE(VALUE_PROFILER)
 #define LLINT_RETURN_PROFILED(opcode, value) do {               \
         JSValue __rp_returnValue = (value);                     \
         LLINT_CHECK_EXCEPTION();                                \
@@ -133,13 +132,6 @@
         JSValue::encode(value);                  \
     } while (false)
 
-#else // ENABLE(VALUE_PROFILER)
-#define LLINT_RETURN_PROFILED(opcode, value) LLINT_RETURN(value)
-
-#define LLINT_PROFILE_VALUE(opcode, value) do { } while (false)
-
-#endif // ENABLE(VALUE_PROFILER)
-
 #define LLINT_CALL_END_IMPL(exec, callTarget) LLINT_RETURN_TWO((callTarget), (exec))
 
 #define LLINT_CALL_THROW(exec, exceptionToThrow) do {                   \
@@ -565,16 +557,12 @@
         && isJSArray(baseValue)
         && ident == exec->propertyNames().length) {
         pc[0].u.opcode = LLInt::getOpcode(llint_op_get_array_length);
-#if ENABLE(VALUE_PROFILER)
         ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(pc - codeBlock->instructions().begin());
         arrayProfile->observeStructure(baseValue.asCell()->structure());
         pc[4].u.arrayProfile = arrayProfile;
-#endif
     }
 
-#if ENABLE(VALUE_PROFILER)    
     pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result);
-#endif
     LLINT_END();
 }
 
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index aabd477..5c1612d 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -259,9 +259,7 @@
 macro arrayProfile(structureAndIndexingType, profile, scratch)
     const structure = structureAndIndexingType
     const indexingType = structureAndIndexingType
-    if VALUE_PROFILER
-        storep structure, ArrayProfile::m_lastSeenStructure[profile]
-    end
+    storep structure, ArrayProfile::m_lastSeenStructure[profile]
     loadb Structure::m_indexingType[structure], indexingType
 end
 
@@ -343,37 +341,35 @@
 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
 # Must call dispatch(0) after calling this.
 macro functionInitialization(profileArgSkip)
-    if VALUE_PROFILER
-        # Profile the arguments. Unfortunately, we have no choice but to do this. This
-        # code is pretty horrendous because of the difference in ordering between
-        # arguments and value profiles, the desire to have a simple loop-down-to-zero
-        # loop, and the desire to use only three registers so as to preserve the PC and
-        # the code block. It is likely that this code should be rewritten in a more
-        # optimal way for architectures that have more than five registers available
-        # for arbitrary use in the interpreter.
-        loadi CodeBlock::m_numParameters[t1], t0
-        addp -profileArgSkip, t0 # Use addi because that's what has the peephole
-        assert(macro (ok) bpgteq t0, 0, ok end)
-        btpz t0, .argumentProfileDone
-        loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
-        mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
-        lshiftp 3, t0
-        addp t2, t3
-    .argumentProfileLoop:
-        if JSVALUE64
-            loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
-            subp sizeof ValueProfile, t3
-            storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
-        else
-            loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
-            subp sizeof ValueProfile, t3
-            storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
-            loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
-            storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
-        end
-        baddpnz -8, t0, .argumentProfileLoop
-    .argumentProfileDone:
+    # Profile the arguments. Unfortunately, we have no choice but to do this. This
+    # code is pretty horrendous because of the difference in ordering between
+    # arguments and value profiles, the desire to have a simple loop-down-to-zero
+    # loop, and the desire to use only three registers so as to preserve the PC and
+    # the code block. It is likely that this code should be rewritten in a more
+    # optimal way for architectures that have more than five registers available
+    # for arbitrary use in the interpreter.
+    loadi CodeBlock::m_numParameters[t1], t0
+    addp -profileArgSkip, t0 # Use addi because that's what has the peephole
+    assert(macro (ok) bpgteq t0, 0, ok end)
+    btpz t0, .argumentProfileDone
+    loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
+    mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
+    lshiftp 3, t0
+    addp t2, t3
+.argumentProfileLoop:
+    if JSVALUE64
+        loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        subp sizeof ValueProfile, t3
+        storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
+    else
+        loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        subp sizeof ValueProfile, t3
+        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
+        loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
     end
+    baddpnz -8, t0, .argumentProfileLoop
+.argumentProfileDone:
         
     # Check stack height.
     loadi CodeBlock::m_numCalleeRegisters[t1], t0
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index 92ff430..f498448 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -496,11 +496,9 @@
 end
 
 macro valueProfile(tag, payload, operand, scratch)
-    if VALUE_PROFILER
-        loadp operand[PC], scratch
-        storei tag, ValueProfile::m_buckets + TagOffset[scratch]
-        storei payload, ValueProfile::m_buckets + PayloadOffset[scratch]
-    end
+    loadp operand[PC], scratch
+    storei tag, ValueProfile::m_buckets + TagOffset[scratch]
+    storei payload, ValueProfile::m_buckets + PayloadOffset[scratch]
 end
 
 
@@ -1483,10 +1481,8 @@
     dispatch(6)
 
 .opGetByValOutOfBounds:
-    if VALUE_PROFILER
-        loadpFromInstruction(4, t0)
-        storeb 1, ArrayProfile::m_outOfBounds[t0]
-    end
+    loadpFromInstruction(4, t0)
+    storeb 1, ArrayProfile::m_outOfBounds[t0]
 .opGetByValSlow:
     callSlowPath(_llint_slow_path_get_by_val)
     dispatch(6)
@@ -1556,10 +1552,8 @@
 
 .outOfBounds:
     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
-    if VALUE_PROFILER
-        loadp 16[PC], t2
-        storeb 1, ArrayProfile::m_mayStoreToHole[t2]
-    end
+    loadp 16[PC], t2
+    storeb 1, ArrayProfile::m_mayStoreToHole[t2]
     addi 1, t3, t2
     storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
     jmp .storeResult
@@ -1626,10 +1620,8 @@
     dispatch(5)
 
 .opPutByValArrayStorageEmpty:
-    if VALUE_PROFILER
-        loadp 16[PC], t1
-        storeb 1, ArrayProfile::m_mayStoreToHole[t1]
-    end
+    loadp 16[PC], t1
+    storeb 1, ArrayProfile::m_mayStoreToHole[t1]
     addi 1, ArrayStorage::m_numValuesInVector[t0]
     bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
     addi 1, t3, t1
@@ -1637,10 +1629,8 @@
     jmp .opPutByValArrayStorageStoreResult
 
 .opPutByValOutOfBounds:
-    if VALUE_PROFILER
-        loadpFromInstruction(4, t0)
-        storeb 1, ArrayProfile::m_outOfBounds[t0]
-    end
+    loadpFromInstruction(4, t0)
+    storeb 1, ArrayProfile::m_outOfBounds[t0]
 .opPutByValSlow:
     callSlowPath(slowPath)
     dispatch(5)
@@ -1857,16 +1847,14 @@
 
 
 macro arrayProfileForCall()
-    if VALUE_PROFILER
-        loadi 16[PC], t3
-        negi t3
-        bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
-        loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
-        loadp JSCell::m_structure[t0], t0
-        loadp 24[PC], t1
-        storep t0, ArrayProfile::m_lastSeenStructure[t1]
-    .done:
-    end
+    loadi 16[PC], t3
+    negi t3
+    bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
+    loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
+    loadp JSCell::m_structure[t0], t0
+    loadp 24[PC], t1
+    storep t0, ArrayProfile::m_lastSeenStructure[t1]
+.done:
 end
 
 macro doCall(slowPath)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index e5236f1..c92156c 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -336,10 +336,8 @@
 end
 
 macro valueProfile(value, operand, scratch)
-    if VALUE_PROFILER
-        loadpFromInstruction(operand, scratch)
-        storeq value, ValueProfile::m_buckets[scratch]
-    end
+    loadpFromInstruction(operand, scratch)
+    storeq value, ValueProfile::m_buckets[scratch]
 end
 
 
@@ -1284,10 +1282,8 @@
     dispatch(6)
 
 .opGetByValOutOfBounds:
-    if VALUE_PROFILER
-        loadpFromInstruction(4, t0)
-        storeb 1, ArrayProfile::m_outOfBounds[t0]
-    end
+    loadpFromInstruction(4, t0)
+    storeb 1, ArrayProfile::m_outOfBounds[t0]
 .opGetByValSlow:
     callSlowPath(_llint_slow_path_get_by_val)
     dispatch(6)
@@ -1357,10 +1353,8 @@
 
 .outOfBounds:
     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
-    if VALUE_PROFILER
-        loadp 32[PB, PC, 8], t2
-        storeb 1, ArrayProfile::m_mayStoreToHole[t2]
-    end
+    loadp 32[PB, PC, 8], t2
+    storeb 1, ArrayProfile::m_mayStoreToHole[t2]
     addi 1, t3, t2
     storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
     jmp .storeResult
@@ -1423,10 +1417,8 @@
     dispatch(5)
 
 .opPutByValArrayStorageEmpty:
-    if VALUE_PROFILER
-        loadpFromInstruction(4, t1)
-        storeb 1, ArrayProfile::m_mayStoreToHole[t1]
-    end
+    loadpFromInstruction(4, t1)
+    storeb 1, ArrayProfile::m_mayStoreToHole[t1]
     addi 1, ArrayStorage::m_numValuesInVector[t0]
     bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
     addi 1, t3, t1
@@ -1434,10 +1426,8 @@
     jmp .opPutByValArrayStorageStoreResult
 
 .opPutByValOutOfBounds:
-    if VALUE_PROFILER
-        loadpFromInstruction(4, t0)
-        storeb 1, ArrayProfile::m_outOfBounds[t0]
-    end
+    loadpFromInstruction(4, t0)
+    storeb 1, ArrayProfile::m_outOfBounds[t0]
 .opPutByValSlow:
     callSlowPath(slowPath)
     dispatch(5)
@@ -1659,16 +1649,14 @@
 
 
 macro arrayProfileForCall()
-    if VALUE_PROFILER
-        loadisFromInstruction(4, t3)
-        negp t3
-        loadq ThisArgumentOffset[cfr, t3, 8], t0
-        btqnz t0, tagMask, .done
-        loadp JSCell::m_structure[t0], t0
-        loadpFromInstruction(6, t1)
-        storep t0, ArrayProfile::m_lastSeenStructure[t1]
-    .done:
-    end
+    loadisFromInstruction(4, t3)
+    negp t3
+    loadq ThisArgumentOffset[cfr, t3, 8], t0
+    btqnz t0, tagMask, .done
+    loadp JSCell::m_structure[t0], t0
+    loadpFromInstruction(6, t1)
+    storep t0, ArrayProfile::m_lastSeenStructure[t1]
+.done:
 end
 
 macro doCall(slowPath)
diff --git a/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp b/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp
index d00e371..838153b 100644
--- a/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp
+++ b/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp
@@ -38,7 +38,6 @@
 {
     StringPrintStream out;
     
-#if ENABLE(VALUE_PROFILER)
     for (unsigned i = 0; i < codeBlock->numberOfArgumentValueProfiles(); ++i) {
         ConcurrentJITLocker locker(codeBlock->m_lock);
         CString description = codeBlock->valueProfileForArgument(i)->briefDescription(locker);
@@ -48,7 +47,6 @@
         out.print("arg", i, " (r", virtualRegisterForArgument(i).offset(), "): ", description);
         m_header.append(out.toCString());
     }
-#endif // ENABLE(VALUE_PROFILER)
     
     for (unsigned bytecodeIndex = 0; bytecodeIndex < codeBlock->instructions().size();) {
         out.reset();
diff --git a/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp b/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp
index a650492..f6a142e 100644
--- a/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp
+++ b/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp
@@ -124,7 +124,6 @@
         END_IMPL();                       \
     } while (false)
 
-#if ENABLE(VALUE_PROFILER)
 #define RETURN_PROFILED(opcode, value) do {                  \
         JSValue rpPeturnValue = (value);                     \
         CHECK_EXCEPTION();                                   \
@@ -138,13 +137,6 @@
         JSValue::encode(value);                  \
     } while (false)
 
-#else // ENABLE(VALUE_PROFILER)
-#define RETURN_PROFILED(opcode, value) RETURN(value)
-
-#define PROFILE_VALUE(opcode, value) do { } while (false)
-
-#endif // ENABLE(VALUE_PROFILER)
-
 #define CALL_END_IMPL(exec, callTarget) RETURN_TWO((callTarget), (exec))
 
 #define CALL_THROW(exec, pc, exceptionToThrow) do {                     \