fourthTier: LLInt shouldn't store an offset call PC during op_call-like calls
https://bugs.webkit.org/show_bug.cgi?id=117048

Reviewed by Mark Hahnenberg.

This just makes everything consistent in the LLInt: anytime any op calls out,
it stores its PC and never the next op's PC.

* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpBytecode):
(JSC::CodeBlock::linkIncomingCall):
(JSC::CodeBlock::bytecodeOffset):
* bytecode/CodeBlock.h:
* bytecode/Opcode.h:
(JSC::padOpcodeName):
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::emitCallVarargs):
* llint/LLIntExceptions.cpp:
(JSC::LLInt::interpreterThrowInCaller):
(JSC::LLInt::returnToThrow):
(JSC::LLInt::callToThrow):
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter.cpp:
(JSC::CLoop::execute):
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@153199 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index 14f4bea..619ec86 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,3 +1,34 @@
+2013-05-30  Filip Pizlo  <fpizlo@apple.com>
+
+        fourthTier: LLInt shouldn't store an offset call PC during op_call-like calls
+        https://bugs.webkit.org/show_bug.cgi?id=117048
+
+        Reviewed by Mark Hahnenberg.
+        
+        This just makes everything consistent in the LLInt: anytime any op calls out,
+        it stores its PC and never the next op's PC.
+        
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::dumpBytecode):
+        (JSC::CodeBlock::linkIncomingCall):
+        (JSC::CodeBlock::bytecodeOffset):
+        * bytecode/CodeBlock.h:
+        * bytecode/Opcode.h:
+        (JSC::padOpcodeName):
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::emitCallVarargs):
+        * llint/LLIntExceptions.cpp:
+        (JSC::LLInt::interpreterThrowInCaller):
+        (JSC::LLInt::returnToThrow):
+        (JSC::LLInt::callToThrow):
+        * llint/LLIntSlowPaths.cpp:
+        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
+        * llint/LowLevelInterpreter.asm:
+        * llint/LowLevelInterpreter.cpp:
+        (JSC::CLoop::execute):
+        * llint/LowLevelInterpreter32_64.asm:
+        * llint/LowLevelInterpreter64.asm:
+
 2013-05-28  Filip Pizlo  <fpizlo@apple.com>
 
         fourthTier: FTL should support ArithAbs
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index 9ed8b00..351b48d 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1287,6 +1287,7 @@
             int thisValue = (++it)->u.operand;
             int arguments = (++it)->u.operand;
             int firstFreeRegister = (++it)->u.operand;
+            ++it;
             out.printf("[%4d] call_varargs\t %s, %s, %s, %d", location, registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
             break;
         }
@@ -2619,58 +2620,6 @@
     noticeIncomingCall(callerFrame);
     m_incomingLLIntCalls.push(incoming);
 }
-
-Instruction* CodeBlock::adjustPCIfAtCallSite(Instruction* potentialReturnPC)
-{
-    ASSERT(potentialReturnPC);
-
-    unsigned returnPCOffset = potentialReturnPC - instructions().begin();
-    Instruction* adjustedPC;
-    unsigned opcodeLength;
-
-    // If we are at a callsite, the LLInt stores the PC after the call
-    // instruction rather than the PC of the call instruction. This requires
-    // some correcting. If so, we can rely on the fact that the preceding
-    // instruction must be one of the call instructions, so either it's a
-    // call_varargs or it's a call, construct, or eval.
-    //
-    // If we are not at a call site, then we need to guard against the
-    // possibility of peeking past the start of the bytecode range for this
-    // codeBlock. Hence, we do a bounds check before we peek at the
-    // potential "preceding" instruction.
-    //     The bounds check is done by comparing the offset of the potential
-    // returnPC with the length of the opcode. If there is room for a call
-    // instruction before the returnPC, then the offset of the returnPC must
-    // be greater than the size of the call opcode we're looking for.
-
-    // The determination of the call instruction present (if we are at a
-    // callsite) depends on the following assumptions. So, assert that
-    // they are still true:
-    ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
-    ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
-    ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
-
-    // Check for the case of a preceeding op_call_varargs:
-    opcodeLength = OPCODE_LENGTH(op_call_varargs);
-    adjustedPC = potentialReturnPC - opcodeLength;
-    if ((returnPCOffset >= opcodeLength)
-        && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_varargs))) {
-        return adjustedPC;
-    }
-
-    // Check for the case of the other 3 call instructions:
-    opcodeLength = OPCODE_LENGTH(op_call);
-    adjustedPC = potentialReturnPC - opcodeLength;
-    if ((returnPCOffset >= opcodeLength)
-        && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call)
-            || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_construct)
-            || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_eval))) {
-        return adjustedPC;
-    }
-
-    // Not a call site. No need to adjust PC. Just return the original.
-    return potentialReturnPC;
-}
 #endif // ENABLE(LLINT)
 
 #if ENABLE(JIT)
@@ -2730,7 +2679,6 @@
         Instruction* instruction = exec->currentVPC();
         RELEASE_ASSERT(instruction);
 
-        instruction = adjustPCIfAtCallSite(instruction);
         return bytecodeOffset(instruction);
     }
 #endif // !ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 45643bd..49030fe 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -197,9 +197,6 @@
     }
 #endif // ENABLE(JIT)
 
-#if ENABLE(LLINT)
-    Instruction* adjustPCIfAtCallSite(Instruction*);
-#endif
     unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
 
 #if ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index ebebbdb..553a823 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -179,7 +179,7 @@
     macro(op_new_func_exp, 3) \
     macro(op_call, 6) \
     macro(op_call_eval, 6) \
-    macro(op_call_varargs, 5) \
+    macro(op_call_varargs, 6) \
     macro(op_tear_off_activation, 2) \
     macro(op_tear_off_arguments, 3) \
     macro(op_ret, 2) \
diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
index 2da5eee..eff2846 100644
--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -1888,6 +1888,7 @@
     instructions().append(thisRegister->index());
     instructions().append(arguments->index());
     instructions().append(firstFreeRegister->index());
+    instructions().append(0); // Pad to make it as big as an op_call.
     if (dst != ignoredResult()) {
         UnlinkedValueProfile profile = emitProfiledOpcode(op_call_put_result);
         instructions().append(kill(dst));
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
index d88c16e..f0046c2 100644
--- a/Source/JavaScriptCore/llint/LLIntExceptions.cpp
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
@@ -38,14 +38,6 @@
 
 namespace JSC { namespace LLInt {
 
-static void fixupPCforExceptionIfNeeded(ExecState* exec)
-{
-    CodeBlock* codeBlock = exec->codeBlock();
-    ASSERT(!!codeBlock);
-    Instruction* pc = exec->currentVPC();
-    exec->setCurrentVPC(codeBlock->adjustPCIfAtCallSite(pc));
-}
-
 void interpreterThrowInCaller(ExecState* exec, ReturnAddressPtr pc)
 {
     VM* vm = &exec->vm();
@@ -53,7 +45,6 @@
 #if LLINT_SLOW_PATH_TRACING
     dataLog("Throwing exception ", vm->exception, ".\n");
 #endif
-    fixupPCforExceptionIfNeeded(exec);
     genericThrow(
         vm, exec, vm->exception,
         exec->codeBlock()->bytecodeOffset(exec, pc));
@@ -69,7 +60,6 @@
 {
     VM* vm = &exec->vm();
     NativeCallFrameTracer tracer(vm, exec);
-    fixupPCforExceptionIfNeeded(exec);
     genericThrow(vm, exec, vm->exception, pc - exec->codeBlock()->instructions().begin());
 }
 
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index cc46fba..b7d4441 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -1500,7 +1500,7 @@
     
     execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
     execCallee->setCallerFrame(exec);
-    exec->setCurrentVPC(pc + OPCODE_LENGTH(op_call_varargs));
+    exec->setCurrentVPC(pc);
     
     return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
 }
@@ -1518,7 +1518,7 @@
     execCallee->setScope(exec->scope());
     execCallee->setReturnPC(LLInt::getCodePtr(llint_generic_return_point));
     execCallee->setCodeBlock(0);
-    exec->setCurrentVPC(pc + OPCODE_LENGTH(op_call_eval));
+    exec->setCurrentVPC(pc);
     
     if (!isHostFunction(calleeAsValue, globalFuncEval))
         return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index e32bae6..208b7ec 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions
@@ -223,9 +223,8 @@
     end
 end
 
-macro slowPathForCall(advance, slowPath)
+macro slowPathForCall(slowPath)
     callCallSlowPath(
-        advance,
         slowPath,
         macro (callee)
             if C_LOOP
@@ -1090,7 +1089,7 @@
 
 _llint_op_call_varargs:
     traceExecution()
-    slowPathForCall(6, _llint_slow_path_call_varargs)
+    slowPathForCall(_llint_slow_path_call_varargs)
 
 
 _llint_op_call_eval:
@@ -1129,7 +1128,7 @@
     # and a PC to call, and that PC may be a dummy thunk that just
     # returns the JS value that the eval returned.
     
-    slowPathForCall(4, _llint_slow_path_call_eval)
+    slowPathForCall(_llint_slow_path_call_eval)
 
 
 _llint_generic_return_point:
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
index a616ce9..4340ed7 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
@@ -467,7 +467,7 @@
         // So, we need to implement the equivalent of dispatchAfterCall() here
         // before dispatching to the PC.
 
-        vPC = callFrame->currentVPC();
+        vPC = callFrame->currentVPC() + OPCODE_LENGTH(op_call);
 
 #if USE(JSVALUE64)
         // Based on LowLevelInterpreter64.asm's dispatchAfterCall():
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index d4bf2d8..847c3b1 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions
@@ -101,7 +101,7 @@
 
 macro dispatchAfterCall()
     loadi ArgumentCount + TagOffset[cfr], PC
-    jmp [PC]
+    dispatch(6)
 end
 
 macro cCall2(function, arg1, arg2)
@@ -179,9 +179,8 @@
 end
 
 # Call a slowPath for call opcodes.
-macro callCallSlowPath(advance, slowPath, action)
-    addp advance * 4, PC, t0
-    storep t0, ArgumentCount + TagOffset[cfr]
+macro callCallSlowPath(slowPath, action)
+    storep PC, ArgumentCount + TagOffset[cfr]
     cCall2(slowPath, cfr, PC)
     move t1, cfr
     action(t0)
@@ -1594,13 +1593,12 @@
     loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
     bineq t3, t2, .opCallSlow
     loadi 12[PC], t3
-    addp 24, PC
     lshifti 3, t3
     addp cfr, t3  # t3 contains the new value of cfr
     loadp JSFunction::m_scope[t2], t0
     storei t2, Callee + PayloadOffset[t3]
     storei t0, ScopeChain + PayloadOffset[t3]
-    loadi 8 - 24[PC], t2
+    loadi 8[PC], t2
     storei PC, ArgumentCount + TagOffset[cfr]
     storep cfr, CallerFrame[t3]
     storei t2, ArgumentCount + PayloadOffset[t3]
@@ -1610,7 +1608,7 @@
     callTargetFunction(t1)
 
 .opCallSlow:
-    slowPathForCall(6, slowPath)
+    slowPathForCall(slowPath)
 end
 
 
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index 73d6015..775529c 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions
@@ -55,7 +55,7 @@
     loadi ArgumentCount + TagOffset[cfr], PC
     loadp CodeBlock[cfr], PB
     loadp CodeBlock::m_instructions[PB], PB
-    jumpToInstruction()
+    dispatch(6)
 end
 
 macro cCall2(function, arg1, arg2)
@@ -120,9 +120,8 @@
 end
 
 # Call a slow path for call call opcodes.
-macro callCallSlowPath(advance, slowPath, action)
-    addi advance, PC, t0
-    storei t0, ArgumentCount + TagOffset[cfr]
+macro callCallSlowPath(slowPath, action)
+    storei PC, ArgumentCount + TagOffset[cfr]
     prepareStateForCCall()
     cCall2(slowPath, cfr, PC)
     move t1, cfr
@@ -1432,13 +1431,12 @@
     loadConstantOrVariable(t0, t3)
     bqneq t3, t2, .opCallSlow
     loadisFromInstruction(3, t3)
-    addi 6, PC
     lshifti 3, t3
     addp cfr, t3
     loadp JSFunction::m_scope[t2], t0
     storeq t2, Callee[t3]
     storeq t0, ScopeChain[t3]
-    loadisFromInstruction(-4, t2)
+    loadisFromInstruction(2, t2)
     storei PC, ArgumentCount + TagOffset[cfr]
     storeq cfr, CallerFrame[t3]
     storei t2, ArgumentCount + PayloadOffset[t3]
@@ -1446,7 +1444,7 @@
     callTargetFunction(t1)
 
 .opCallSlow:
-    slowPathForCall(6, slowPath)
+    slowPathForCall(slowPath)
 end