Get rid of the lastResultRegister optimization in the baseline JIT
https://bugs.webkit.org/show_bug.cgi?id=124171

Rubber stamped by Mark Hahnenberg.
        
The baseline JIT no longer needs amazing throughput. And this optimization has caused
way too many OSR exit bugs. And it constrains how much we can do in the DFG/FTL. So,
I'm getting rid of it.

* dfg/DFGOSRExit.cpp:
(JSC::DFG::OSRExit::OSRExit):
(JSC::DFG::OSRExit::convertToForward):
* dfg/DFGOSRExit.h:
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::SpeculativeJIT):
(JSC::DFG::SpeculativeJIT::compileMovHint):
(JSC::DFG::SpeculativeJIT::compileCurrentBlock):
* dfg/DFGSpeculativeJIT.h:
* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::LowerDFGToLLVM::LowerDFGToLLVM):
(JSC::FTL::LowerDFGToLLVM::compileZombieHint):
(JSC::FTL::LowerDFGToLLVM::compileInvalidationPoint):
(JSC::FTL::LowerDFGToLLVM::appendOSRExit):
(JSC::FTL::LowerDFGToLLVM::observeMovHint):
* ftl/FTLOSRExit.cpp:
(JSC::FTL::OSRExit::OSRExit):
(JSC::FTL::OSRExit::convertToForward):
* ftl/FTLOSRExit.h:
* ftl/FTLOSRExitCompiler.cpp:
(JSC::FTL::compileStub):
* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
* jit/JIT.h:
(JSC::JIT::appendCall):
* jit/JITArithmetic32_64.cpp:
(JSC::JIT::emit_op_lshift):
(JSC::JIT::emitRightShift):
(JSC::JIT::emit_op_bitand):
(JSC::JIT::emit_op_bitor):
(JSC::JIT::emit_op_bitxor):
(JSC::JIT::emit_op_inc):
(JSC::JIT::emit_op_dec):
* jit/JITCall.cpp:
(JSC::JIT::emitPutCallResult):
(JSC::JIT::compileLoadVarargs):
* jit/JITInlines.h:
(JSC::JIT::emitGetFromCallFrameHeaderPtr):
(JSC::JIT::emitGetFromCallFrameHeader32):
(JSC::JIT::emitGetFromCallFrameHeader64):
(JSC::JIT::emitLoadTag):
(JSC::JIT::emitLoadPayload):
(JSC::JIT::emitLoad2):
(JSC::JIT::emitGetVirtualRegister):
(JSC::JIT::emitGetVirtualRegisters):
(JSC::JIT::emitPutVirtualRegister):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_mov):
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_new_func):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_mov):
(JSC::JIT::emit_op_to_primitive):
(JSC::JIT::emit_op_to_number):
(JSC::JIT::emit_op_catch):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emit_op_resolve_scope):
(JSC::JIT::emit_op_get_from_scope):
(JSC::JIT::emit_op_put_to_scope):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_by_pname):
(JSC::JIT::emitResolveClosure):
(JSC::JIT::emit_op_resolve_scope):
(JSC::JIT::emit_op_get_from_scope):
(JSC::JIT::emit_op_init_global_const):
* jit/SlowPathCall.h:
(JSC::JITSlowPathCall::call):



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@159091 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index 3b7fcf5..10bcbd8 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,5 +1,92 @@
 2013-11-11  Filip Pizlo  <fpizlo@apple.com>
 
+        Get rid of the lastResultRegister optimization in the baseline JIT
+        https://bugs.webkit.org/show_bug.cgi?id=124171
+
+        Rubber stamped by Mark Hahnenberg.
+        
+        The baseline JIT no longer needs amazing throughput. And this optimization has caused
+        way too many OSR exit bugs. And it constrains how much we can do in the DFG/FTL. So,
+        I'm getting rid of it.
+
+        * dfg/DFGOSRExit.cpp:
+        (JSC::DFG::OSRExit::OSRExit):
+        (JSC::DFG::OSRExit::convertToForward):
+        * dfg/DFGOSRExit.h:
+        * dfg/DFGOSRExitCompiler32_64.cpp:
+        (JSC::DFG::OSRExitCompiler::compileExit):
+        * dfg/DFGOSRExitCompiler64.cpp:
+        (JSC::DFG::OSRExitCompiler::compileExit):
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::SpeculativeJIT):
+        (JSC::DFG::SpeculativeJIT::compileMovHint):
+        (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
+        * dfg/DFGSpeculativeJIT.h:
+        * ftl/FTLLowerDFGToLLVM.cpp:
+        (JSC::FTL::LowerDFGToLLVM::LowerDFGToLLVM):
+        (JSC::FTL::LowerDFGToLLVM::compileZombieHint):
+        (JSC::FTL::LowerDFGToLLVM::compileInvalidationPoint):
+        (JSC::FTL::LowerDFGToLLVM::appendOSRExit):
+        (JSC::FTL::LowerDFGToLLVM::observeMovHint):
+        * ftl/FTLOSRExit.cpp:
+        (JSC::FTL::OSRExit::OSRExit):
+        (JSC::FTL::OSRExit::convertToForward):
+        * ftl/FTLOSRExit.h:
+        * ftl/FTLOSRExitCompiler.cpp:
+        (JSC::FTL::compileStub):
+        * jit/JIT.cpp:
+        (JSC::JIT::JIT):
+        (JSC::JIT::privateCompileMainPass):
+        (JSC::JIT::privateCompileSlowCases):
+        * jit/JIT.h:
+        (JSC::JIT::appendCall):
+        * jit/JITArithmetic32_64.cpp:
+        (JSC::JIT::emit_op_lshift):
+        (JSC::JIT::emitRightShift):
+        (JSC::JIT::emit_op_bitand):
+        (JSC::JIT::emit_op_bitor):
+        (JSC::JIT::emit_op_bitxor):
+        (JSC::JIT::emit_op_inc):
+        (JSC::JIT::emit_op_dec):
+        * jit/JITCall.cpp:
+        (JSC::JIT::emitPutCallResult):
+        (JSC::JIT::compileLoadVarargs):
+        * jit/JITInlines.h:
+        (JSC::JIT::emitGetFromCallFrameHeaderPtr):
+        (JSC::JIT::emitGetFromCallFrameHeader32):
+        (JSC::JIT::emitGetFromCallFrameHeader64):
+        (JSC::JIT::emitLoadTag):
+        (JSC::JIT::emitLoadPayload):
+        (JSC::JIT::emitLoad2):
+        (JSC::JIT::emitGetVirtualRegister):
+        (JSC::JIT::emitGetVirtualRegisters):
+        (JSC::JIT::emitPutVirtualRegister):
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::emit_op_mov):
+        (JSC::JIT::emit_op_catch):
+        (JSC::JIT::emit_op_new_func):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::emit_op_mov):
+        (JSC::JIT::emit_op_to_primitive):
+        (JSC::JIT::emit_op_to_number):
+        (JSC::JIT::emit_op_catch):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emit_op_resolve_scope):
+        (JSC::JIT::emit_op_get_from_scope):
+        (JSC::JIT::emit_op_put_to_scope):
+        * jit/JITPropertyAccess32_64.cpp:
+        (JSC::JIT::emit_op_get_by_val):
+        (JSC::JIT::emit_op_get_by_id):
+        (JSC::JIT::emit_op_get_by_pname):
+        (JSC::JIT::emitResolveClosure):
+        (JSC::JIT::emit_op_resolve_scope):
+        (JSC::JIT::emit_op_get_from_scope):
+        (JSC::JIT::emit_op_init_global_const):
+        * jit/SlowPathCall.h:
+        (JSC::JITSlowPathCall::call):
+
+2013-11-11  Filip Pizlo  <fpizlo@apple.com>
+
         Remove ConstantFoldingPhase's weirdo compile-time optimization
         https://bugs.webkit.org/show_bug.cgi?id=124169
 
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index 9fcb085..2975ed4 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -42,7 +42,6 @@
     , m_patchableCodeOffset(0)
     , m_recoveryIndex(recoveryIndex)
     , m_streamIndex(streamIndex)
-    , m_lastSetOperand(jit->m_lastSetOperand)
 {
     ASSERT(m_codeOrigin.isSet());
 }
@@ -85,7 +84,6 @@
     
     ASSERT(lastMovHint);
     ASSERT(lastMovHint->child1() == currentNode);
-    m_lastSetOperand = lastMovHint->local();
     m_valueRecoveryOverride = adoptRef(
         new ValueRecoveryOverride(lastMovHint->local(), valueRecovery));
 }
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h
index 8fe18d7..716bdd2 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h
@@ -105,7 +105,6 @@
     void convertToForward(BasicBlock*, Node*, unsigned nodeIndex, const ValueRecovery&);
 
     unsigned m_streamIndex;
-    VirtualRegister m_lastSetOperand;
     
     RefPtr<ValueRecoveryOverride> m_valueRecoveryOverride;
 };
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index 1a74f37..ed0fa04 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -493,14 +493,7 @@
         }
     }
     
-    // 11) Load the result of the last bytecode operation into regT0.
-    
-    if (exit.m_lastSetOperand.isValid()) {
-        m_jit.load32(AssemblyHelpers::payloadFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
-        m_jit.load32(AssemblyHelpers::tagFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
-    }
-    
-    // 12) And finish.
+    // 11) And finish.
     
     adjustAndJumpToTarget(m_jit, exit);
 }
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index 4ba99b1..92019bf 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -432,12 +432,7 @@
         }
     }
     
-    // 11) Load the result of the last bytecode operation into regT0.
-    
-    if (exit.m_lastSetOperand.isValid())
-        m_jit.load64(AssemblyHelpers::addressFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
-    
-    // 12) And finish.
+    // 11) And finish.
     
     adjustAndJumpToTarget(m_jit, exit);
 }
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 723f551..3166ac8 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -47,7 +47,6 @@
     , m_currentNode(0)
     , m_indexInBlock(0)
     , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
-    , m_lastSetOperand(VirtualRegister())
     , m_state(m_jit.graph())
     , m_interpreter(m_jit.graph(), m_state)
     , m_stream(&jit.jitCode()->variableEventStream)
@@ -1539,8 +1538,6 @@
 {
     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
     
-    m_lastSetOperand = node->local();
-
     Node* child = node->child1().node();
     noticeOSRBirth(child);
     
@@ -1621,7 +1618,6 @@
             VariableEvent::setLocal(virtualRegisterForLocal(i), variable->machineLocal(), format));
     }
     
-    m_lastSetOperand = VirtualRegister();
     m_codeOriginForExitTarget = CodeOrigin();
     m_codeOriginForExitProfile = CodeOrigin();
     
@@ -1667,7 +1663,6 @@
                 break;
                 
             case ZombieHint: {
-                m_lastSetOperand = m_currentNode->local();
                 recordSetLocal(DataFormatDead);
                 break;
             }
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 89a6989..e723da7 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -2254,7 +2254,6 @@
     };
     Vector<BranchRecord, 8> m_branches;
 
-    VirtualRegister m_lastSetOperand;
     CodeOrigin m_codeOriginForExitTarget;
     CodeOrigin m_codeOriginForExitProfile;
     
diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
index 6222964..7f68da7 100644
--- a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
+++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
@@ -71,7 +71,6 @@
         , m_heaps(state.context)
         , m_out(state.context)
         , m_valueSources(OperandsLike, state.graph.block(0)->variablesAtHead)
-        , m_lastSetOperand(VirtualRegister())
         , m_state(state.graph)
         , m_interpreter(state.graph, m_state)
         , m_stackmapIDs(0)
@@ -676,7 +675,6 @@
     void compileZombieHint()
     {
         VariableAccessData* data = m_node->variableAccessData();
-        m_lastSetOperand = data->local();
         m_valueSources.operand(data->local()) = ValueSource(SourceIsDead);
     }
     
@@ -2550,7 +2548,7 @@
         
         m_ftlState.jitCode->osrExit.append(OSRExit(
             UncountableInvalidation, InvalidValueFormat, MethodOfGettingAValueProfile(),
-            m_codeOriginForExitTarget, m_codeOriginForExitProfile, m_lastSetOperand.offset(),
+            m_codeOriginForExitTarget, m_codeOriginForExitProfile,
             m_valueSources.numberOfArguments(), m_valueSources.numberOfLocals()));
         m_ftlState.finalizer->osrExit.append(OSRExitCompilationInfo());
         
@@ -3897,7 +3895,7 @@
         
         m_ftlState.jitCode->osrExit.append(OSRExit(
             kind, lowValue.format(), m_graph.methodOfGettingAValueProfileFor(highValue),
-            m_codeOriginForExitTarget, m_codeOriginForExitProfile, m_lastSetOperand.offset(),
+            m_codeOriginForExitTarget, m_codeOriginForExitProfile,
             m_valueSources.numberOfArguments(), m_valueSources.numberOfLocals()));
         m_ftlState.finalizer->osrExit.append(OSRExitCompilationInfo());
         
@@ -4116,7 +4114,6 @@
         
         VirtualRegister operand = node->local();
         
-        m_lastSetOperand = operand;
         m_valueSources.operand(operand) = ValueSource(node->child1().node());
     }
     
@@ -4280,7 +4277,6 @@
     HashMap<Node*, LValue> m_phis;
     
     Operands<ValueSource> m_valueSources;
-    VirtualRegister m_lastSetOperand;
     
     InPlaceAbstractState m_state;
     AbstractInterpreter<InPlaceAbstractState> m_interpreter;
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExit.cpp b/Source/JavaScriptCore/ftl/FTLOSRExit.cpp
index 65f53a6..3fbe7db 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExit.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSRExit.cpp
@@ -43,13 +43,12 @@
 OSRExit::OSRExit(
     ExitKind exitKind, ValueFormat profileValueFormat,
     MethodOfGettingAValueProfile valueProfile, CodeOrigin codeOrigin,
-    CodeOrigin originForProfile, int lastSetOperand, unsigned numberOfArguments,
+    CodeOrigin originForProfile, unsigned numberOfArguments,
     unsigned numberOfLocals)
     : OSRExitBase(exitKind, codeOrigin, originForProfile)
     , m_profileValueFormat(profileValueFormat)
     , m_valueProfile(valueProfile)
     , m_patchableCodeOffset(0)
-    , m_lastSetOperand(lastSetOperand)
     , m_values(numberOfArguments, numberOfLocals)
 {
 }
@@ -79,7 +78,6 @@
         return;
     
     VirtualRegister overriddenOperand = lastMovHint->local();
-    m_lastSetOperand = overriddenOperand;
     
     // Is the value for this operand being passed as an argument to the exit, or is
     // it something else? If it's an argument already, then replace that argument;
@@ -94,7 +92,7 @@
     
     unsigned argument = arguments.size();
     arguments.append(value.value());
-    m_values.operand(m_lastSetOperand) = ExitValue::exitArgument(
+    m_values.operand(overriddenOperand) = ExitValue::exitArgument(
         ExitArgument(value.format(), argument));
 }
 
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExit.h b/Source/JavaScriptCore/ftl/FTLOSRExit.h
index 45d2fef..1460f4f 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExit.h
+++ b/Source/JavaScriptCore/ftl/FTLOSRExit.h
@@ -145,7 +145,7 @@
 struct OSRExit : public DFG::OSRExitBase {
     OSRExit(
         ExitKind, ValueFormat profileValueFormat, MethodOfGettingAValueProfile,
-        CodeOrigin, CodeOrigin originForProfile, int lastSetOperand,
+        CodeOrigin, CodeOrigin originForProfile,
         unsigned numberOfArguments, unsigned numberOfLocals);
     
     MacroAssemblerCodeRef m_code;
@@ -161,8 +161,6 @@
     // Offset within the exit stubs of the stub for this exit.
     unsigned m_patchableCodeOffset;
     
-    VirtualRegister m_lastSetOperand;
-    
     Operands<ExitValue> m_values;
     
     uint32_t m_stackmapID;
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
index 1813993..cb8f5f9 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
@@ -164,12 +164,6 @@
     
     handleExitCounts(jit, exit);
     reifyInlinedCallFrames(jit, exit);
-    
-    if (exit.m_lastSetOperand.isValid()) {
-        jit.load64(
-            AssemblyHelpers::addressFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
-    }
-    
     adjustAndJumpToTarget(jit, exit);
     
     LinkBuffer patchBuffer(*vm, &jit, codeBlock);
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 6d253c7..2d3a1df 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -80,16 +80,6 @@
     , m_putByIdIndex(UINT_MAX)
     , m_byValInstructionIndex(UINT_MAX)
     , m_callLinkInfoIndex(UINT_MAX)
-#if USE(JSVALUE32_64)
-    , m_jumpTargetIndex(0)
-    , m_mappedBytecodeOffset((unsigned)-1)
-    , m_mappedVirtualRegisterIndex(UINT_MAX)
-    , m_mappedTag((RegisterID)-1)
-    , m_mappedPayload((RegisterID)-1)
-#else
-    , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
-    , m_jumpTargetsPosition(0)
-#endif
     , m_randomGenerator(cryptographicallyRandomNumber())
 #if ENABLE(VALUE_PROFILER)
     , m_canBeOptimized(false)
@@ -181,11 +171,6 @@
             sampleInstruction(currentInstruction);
 #endif
 
-#if USE(JSVALUE64)
-        if (atJumpTarget())
-            killLastResultRegister();
-#endif
-
         m_labels[m_bytecodeOffset] = label();
 
 #if ENABLE(JIT_VERBOSE)
@@ -376,10 +361,6 @@
 #endif
 
     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
-#if USE(JSVALUE64)
-        killLastResultRegister();
-#endif
-
         m_bytecodeOffset = iter->to;
 
         unsigned firstTo = m_bytecodeOffset;
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index f8f309f..a967b15 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -266,11 +266,6 @@
         {
             Call functionCall = call();
             m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value()));
-#if USE(JSVALUE32_64)
-            unmap();
-#else
-            killLastResultRegister();
-#endif
             return functionCall;
         }
 
@@ -390,19 +385,10 @@
         void emitStore(int index, const JSValue constant, RegisterID base = callFrameRegister);
         void emitStoreInt32(int index, RegisterID payload, bool indexIsInt32 = false);
         void emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32 = false);
-        void emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength);
         void emitStoreCell(int index, RegisterID payload, bool indexIsCell = false);
         void emitStoreBool(int index, RegisterID payload, bool indexIsBool = false);
         void emitStoreDouble(int index, FPRegisterID value);
 
-        bool isLabeled(unsigned bytecodeOffset);
-        void map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload);
-        void unmap(RegisterID);
-        void unmap();
-        bool isMapped(int virtualRegisterIndex);
-        bool getMappedPayload(int virtualRegisterIndex, RegisterID& payload);
-        bool getMappedTag(int virtualRegisterIndex, RegisterID& tag);
-        
         void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex);
         void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag);
 
@@ -435,8 +421,6 @@
 
         int32_t getConstantOperandImmediateInt(int src);
 
-        void killLastResultRegister();
-
         Jump emitJumpIfJSCell(RegisterID);
         Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
         void emitJumpSlowCaseIfJSCell(RegisterID);
@@ -645,8 +629,6 @@
         bool isOperandConstantImmediateInt(int src);
         bool isOperandConstantImmediateChar(int src);
 
-        bool atJumpTarget();
-
         Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
         {
             return iter++->from;
@@ -815,17 +797,6 @@
         unsigned m_byValInstructionIndex;
         unsigned m_callLinkInfoIndex;
 
-#if USE(JSVALUE32_64)
-        unsigned m_jumpTargetIndex;
-        unsigned m_mappedBytecodeOffset;
-        int m_mappedVirtualRegisterIndex;
-        RegisterID m_mappedTag;
-        RegisterID m_mappedPayload;
-#else
-        int m_lastResultBytecodeRegister;
-#endif
-        unsigned m_jumpTargetsPosition;
-
         OwnPtr<JITDisassembler> m_disassembler;
         RefPtr<Profiler::Compilation> m_compilation;
         WeakRandom m_randomGenerator;
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 9b8f046..3bc025a 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -175,7 +175,7 @@
         emitLoad(op1, regT1, regT0);
         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
         lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
-        emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
+        emitStoreInt32(dst, regT0, dst == op1);
         return;
     }
 
@@ -184,7 +184,7 @@
         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
     lshift32(regT2, regT0);
-    emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
+    emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
 }
 
 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -223,7 +223,7 @@
                 rshift32(Imm32(shift), regT0);
         } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
             addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
-        emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
+        emitStoreInt32(dst, regT0, dst == op1);
     } else {
         emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
         if (!isOperandConstantImmediateInt(op1))
@@ -234,7 +234,7 @@
             addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
         } else
             rshift32(regT2, regT0);
-        emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
+        emitStoreInt32(dst, regT0, dst == op1);
     }
 }
 
@@ -337,7 +337,7 @@
         emitLoad(op, regT1, regT0);
         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
         and32(Imm32(constant), regT0);
-        emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
+        emitStoreInt32(dst, regT0, dst == op);
         return;
     }
 
@@ -345,7 +345,7 @@
     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
     and32(regT2, regT0);
-    emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
+    emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
 }
 
 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -377,7 +377,7 @@
         emitLoad(op, regT1, regT0);
         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
         or32(Imm32(constant), regT0);
-        emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
+        emitStoreInt32(dst, regT0, op == dst);
         return;
     }
 
@@ -385,7 +385,7 @@
     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
     or32(regT2, regT0);
-    emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
+    emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
 }
 
 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -417,7 +417,7 @@
         emitLoad(op, regT1, regT0);
         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
         xor32(Imm32(constant), regT0);
-        emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
+        emitStoreInt32(dst, regT0, op == dst);
         return;
     }
 
@@ -425,7 +425,7 @@
     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
     xor32(regT2, regT0);
-    emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
+    emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
 }
 
 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -451,7 +451,7 @@
 
     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
     addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
-    emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_inc));
+    emitStoreInt32(srcDst, regT0, true);
 }
 
 void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -474,7 +474,7 @@
 
     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
     addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
-    emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_dec));
+    emitStoreInt32(srcDst, regT0, true);
 }
 
 void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 9b68db2..731710f 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -55,13 +55,6 @@
     int dst = instruction[1].u.operand;
     emitValueProfilingSite(regT4);
     emitPutVirtualRegister(dst);
-    if (canBeOptimizedOrInlined()) {
-        // Make lastResultRegister tracking simpler in the DFG. This is needed because
-        // the DFG may have the SetLocal corresponding to this Call's return value in
-        // a different basic block, if inlining happened. The DFG isn't smart enough to
-        // track the baseline JIT's last result register across basic blocks.
-        killLastResultRegister();
-    }
 }
 
 void JIT::compileLoadVarargs(Instruction* instruction)
@@ -70,8 +63,6 @@
     int arguments = instruction[4].u.operand;
     int firstFreeRegister = instruction[5].u.operand;
 
-    killLastResultRegister();
-
     JumpList slowCase;
     JumpList end;
     bool canOptimize = m_codeBlock->usesArguments()
diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h
index 0782357..bc88ecd 100644
--- a/Source/JavaScriptCore/jit/JITInlines.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -57,24 +57,17 @@
 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
 {
     loadPtr(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
-    killLastResultRegister();
-#endif
 }
 
 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
 {
     load32(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
-    killLastResultRegister();
-#endif
 }
 
 #if USE(JSVALUE64)
 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
 {
     load64(Address(from, entry * sizeof(Register)), to);
-    killLastResultRegister();
 }
 #endif
 
@@ -106,16 +99,6 @@
     return nakedCall;
 }
 
-ALWAYS_INLINE bool JIT::atJumpTarget()
-{
-    while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
-        if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
-            return true;
-        ++m_jumpTargetsPosition;
-    }
-    return false;
-}
-
 ALWAYS_INLINE void JIT::updateTopCallFrame()
 {
     ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
@@ -806,40 +789,22 @@
 
 inline void JIT::emitLoadTag(int index, RegisterID tag)
 {
-    RegisterID mappedTag;
-    if (getMappedTag(index, mappedTag)) {
-        move(mappedTag, tag);
-        unmap(tag);
-        return;
-    }
-
     if (m_codeBlock->isConstantRegisterIndex(index)) {
         move(Imm32(getConstantOperand(index).tag()), tag);
-        unmap(tag);
         return;
     }
 
     load32(tagFor(index), tag);
-    unmap(tag);
 }
 
 inline void JIT::emitLoadPayload(int index, RegisterID payload)
 {
-    RegisterID mappedPayload;
-    if (getMappedPayload(index, mappedPayload)) {
-        move(mappedPayload, payload);
-        unmap(payload);
-        return;
-    }
-
     if (m_codeBlock->isConstantRegisterIndex(index)) {
         move(Imm32(getConstantOperand(index).payload()), payload);
-        unmap(payload);
         return;
     }
 
     load32(payloadFor(index), payload);
-    unmap(payload);
 }
 
 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
@@ -871,11 +836,6 @@
 
 inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
 {
-    if (isMapped(index1)) {
-        emitLoad(index1, tag1, payload1);
-        emitLoad(index2, tag2, payload2);
-        return;
-    }
     emitLoad(index2, tag2, payload2);
     emitLoad(index1, tag1, payload1);
 }
@@ -912,12 +872,6 @@
         store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
 }
 
-inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength)
-{
-    emitStoreInt32(index, payload, indexIsInt32);
-    map(m_bytecodeOffset + opcodeLength, index, tag, payload);
-}
-
 inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
 {
     store32(payload, payloadFor(index, callFrameRegister));
@@ -955,81 +909,6 @@
     emitStore(dst, jsUndefined());
 }
 
-inline bool JIT::isLabeled(unsigned bytecodeOffset)
-{
-    for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
-        unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
-        if (jumpTarget == bytecodeOffset)
-            return true;
-        if (jumpTarget > bytecodeOffset)
-            return false;
-    }
-    return false;
-}
-
-inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload)
-{
-    if (isLabeled(bytecodeOffset))
-        return;
-
-    m_mappedBytecodeOffset = bytecodeOffset;
-    m_mappedVirtualRegisterIndex = virtualRegisterIndex;
-    m_mappedTag = tag;
-    m_mappedPayload = payload;
-    
-    ASSERT(!canBeOptimizedOrInlined() || m_mappedPayload == regT0);
-    ASSERT(!canBeOptimizedOrInlined() || m_mappedTag == regT1);
-}
-
-inline void JIT::unmap(RegisterID registerID)
-{
-    if (m_mappedTag == registerID)
-        m_mappedTag = (RegisterID)-1;
-    else if (m_mappedPayload == registerID)
-        m_mappedPayload = (RegisterID)-1;
-}
-
-inline void JIT::unmap()
-{
-    m_mappedBytecodeOffset = (unsigned)-1;
-    m_mappedVirtualRegisterIndex = UINT_MAX;
-    m_mappedTag = (RegisterID)-1;
-    m_mappedPayload = (RegisterID)-1;
-}
-
-inline bool JIT::isMapped(int virtualRegisterIndex)
-{
-    if (m_mappedBytecodeOffset != m_bytecodeOffset)
-        return false;
-    if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
-        return false;
-    return true;
-}
-
-inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload)
-{
-    if (m_mappedBytecodeOffset != m_bytecodeOffset)
-        return false;
-    if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
-        return false;
-    if (m_mappedPayload == (RegisterID)-1)
-        return false;
-    payload = m_mappedPayload;
-    return true;
-}
-
-inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag)
-{
-    if (m_mappedBytecodeOffset != m_bytecodeOffset)
-        return false;
-    if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
-        return false;
-    if (m_mappedTag == (RegisterID)-1)
-        return false;
-    tag = m_mappedTag;
-    return true;
-}
-
 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
 {
     if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
@@ -1074,11 +953,6 @@
 
 #else // USE(JSVALUE32_64)
 
-ALWAYS_INLINE void JIT::killLastResultRegister()
-{
-    m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
-}
-
 // get arg puts an arg from the SF register array into a h/w register
 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
 {
@@ -1091,20 +965,10 @@
             move(TrustedImm64(JSValue::encode(value)), dst);
         else
             move(Imm64(JSValue::encode(value)), dst);
-        killLastResultRegister();
-        return;
-    }
-
-    if (src == m_lastResultBytecodeRegister && operandIsLocal(src) && m_codeBlock->isTemporaryRegisterIndex(VirtualRegister(src).toLocal()) && !atJumpTarget()) {
-        // The argument we want is already stored in eax
-        if (dst != cachedResultRegister)
-            move(cachedResultRegister, dst);
-        killLastResultRegister();
         return;
     }
 
     load64(Address(callFrameRegister, src * sizeof(Register)), dst);
-    killLastResultRegister();
 }
 
 ALWAYS_INLINE void JIT::emitGetVirtualRegister(VirtualRegister src, RegisterID dst)
@@ -1114,13 +978,8 @@
 
 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
 {
-    if (src2 == m_lastResultBytecodeRegister) {
-        emitGetVirtualRegister(src2, dst2);
-        emitGetVirtualRegister(src1, dst1);
-    } else {
-        emitGetVirtualRegister(src1, dst1);
-        emitGetVirtualRegister(src2, dst2);
-    }
+    emitGetVirtualRegister(src1, dst1);
+    emitGetVirtualRegister(src2, dst2);
 }
 
 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(VirtualRegister src1, RegisterID dst1, VirtualRegister src2, RegisterID dst2)
@@ -1141,7 +1000,6 @@
 ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
 {
     store64(from, Address(callFrameRegister, dst * sizeof(Register)));
-    m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
 }
 
 ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index ce167d3..5110ec0 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -55,30 +55,10 @@
     int dst = currentInstruction[1].u.operand;
     int src = currentInstruction[2].u.operand;
 
-    if (canBeOptimizedOrInlined()) {
-        // Use simpler approach, since the DFG thinks that the last result register
-        // is always set to the destination on every operation.
-        emitGetVirtualRegister(src, regT0);
-        emitPutVirtualRegister(dst);
-    } else {
-        if (m_codeBlock->isConstantRegisterIndex(src)) {
-            if (!getConstantOperand(src).isNumber())
-                store64(TrustedImm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
-            else
-                store64(Imm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
-            if (dst == m_lastResultBytecodeRegister)
-                killLastResultRegister();
-        } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
-            // If either the src or dst is the cached register go though
-            // get/put registers to make sure we track this correctly.
-            emitGetVirtualRegister(src, regT0);
-            emitPutVirtualRegister(dst);
-        } else {
-            // Perform the copy via regT1; do not disturb any mapping in regT0.
-            load64(Address(callFrameRegister, src * sizeof(Register)), regT1);
-            store64(regT1, Address(callFrameRegister, dst * sizeof(Register)));
-        }
-    }
+    // Use simpler approach, since the DFG thinks that the last result register
+    // is always set to the destination on every operation.
+    emitGetVirtualRegister(src, regT0);
+    emitPutVirtualRegister(dst);
 }
 
 void JIT::emit_op_end(Instruction* currentInstruction)
@@ -661,7 +641,6 @@
 
 void JIT::emit_op_catch(Instruction* currentInstruction)
 {
-    killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
     move(regT0, callFrameRegister);
     move(TrustedImmPtr(m_vm), regT3);
     load64(Address(regT3, VM::exceptionOffset()), regT0);
@@ -1188,14 +1167,8 @@
     FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand);
     callOperation(operationNewFunction, dst, funcExec);
 
-    if (currentInstruction[3].u.operand) {
-#if USE(JSVALUE32_64)        
-        unmap();
-#else
-        killLastResultRegister();
-#endif
+    if (currentInstruction[3].u.operand)
         lazyJump.link(this);
-    }
 }
 
 void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 266f625..c081e80 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -204,7 +204,6 @@
     else {
         emitLoad(src, regT1, regT0);
         emitStore(dst, regT1, regT0);
-        map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
     }
 }
 
@@ -432,7 +431,6 @@
 
     if (dst != src)
         emitStore(dst, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
 }
 
 void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -956,7 +954,6 @@
 
     if (src != dst)
         emitStore(dst, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_to_number), dst, regT1, regT0);
 }
 
 void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -990,7 +987,6 @@
 
     unsigned exception = currentInstruction[1].u.operand;
     emitStore(exception, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
 }
 
 void JIT::emit_op_switch_imm(Instruction* currentInstruction)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index ed07903..fa5bf9c 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -675,7 +675,6 @@
         emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth);
         break;
     case Dynamic:
-        killLastResultRegister();
         addSlowCase(jump());
         break;
     }
@@ -744,7 +743,6 @@
         emitGetClosureVar(scope, *operandSlot);
         break;
     case Dynamic:
-        killLastResultRegister();
         addSlowCase(jump());
         break;
     }
@@ -812,7 +810,6 @@
         emitPutClosureVar(scope, *operandSlot, value);
         break;
     case Dynamic:
-        killLastResultRegister();
         addSlowCase(jump());
         break;
     }
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index e9d7796..5f0dbe8 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -175,7 +175,6 @@
 
     emitValueProfilingSite(regT4);
     emitStore(dst, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
     
     m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
 }
@@ -491,7 +490,6 @@
 
     emitValueProfilingSite(regT4);
     emitStore(dst, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
 }
 
 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -646,7 +644,6 @@
     compileGetDirectOffset(regT2, regT1, regT0, regT3);    
     
     emitStore(dst, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
 }
 
 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -687,7 +684,6 @@
     for (unsigned i = 0; i < depth; ++i)
         loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
     emitStore(dst, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_resolve_scope), dst, regT1, regT0);
 }
 
 void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
@@ -705,7 +701,6 @@
         move(TrustedImm32(JSValue::CellTag), regT1);
         move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
         emitStore(dst, regT1, regT0);
-        map(m_bytecodeOffset + OPCODE_LENGTH(op_resolve_scope), dst, regT1, regT0);
         break;
     case ClosureVar:
     case ClosureVarWithVarInjectionChecks:
@@ -788,7 +783,6 @@
     }
     emitValueProfilingSite(regT4);
     emitStore(dst, regT1, regT0);
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_get_from_scope), dst, regT1, regT0);
 }
 
 void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -887,7 +881,6 @@
 
     store32(regT1, registerPointer->tagPointer());
     store32(regT0, registerPointer->payloadPointer());
-    map(m_bytecodeOffset + OPCODE_LENGTH(op_init_global_const), value, regT1, regT0);
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/jit/SlowPathCall.h b/Source/JavaScriptCore/jit/SlowPathCall.h
index 090a4ad..f0aa28e 100644
--- a/Source/JavaScriptCore/jit/SlowPathCall.h
+++ b/Source/JavaScriptCore/jit/SlowPathCall.h
@@ -77,11 +77,6 @@
             m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
 #endif
         
-#if USE(JSVALUE32_64)
-        m_jit->unmap();
-#else
-        m_jit->killLastResultRegister();
-#endif   
         m_jit->exceptionCheck();
         return call;
     }