2008-12-04 Gavin Barraclough <barraclough@apple.com>
Reviewed by Geoff Garen.
Start porting the JIT to use the MacroAssembler.
https://bugs.webkit.org/show_bug.cgi?id=22671
No change in performance.
* assembler/MacroAssembler.h:
(JSC::MacroAssembler::Jump::operator X86Assembler::JmpSrc):
(JSC::MacroAssembler::add32):
(JSC::MacroAssembler::and32):
(JSC::MacroAssembler::lshift32):
(JSC::MacroAssembler::rshift32):
(JSC::MacroAssembler::storePtr):
(JSC::MacroAssembler::store32):
(JSC::MacroAssembler::poke):
(JSC::MacroAssembler::move):
(JSC::MacroAssembler::compareImm32ForBranchEquality):
(JSC::MacroAssembler::jnePtr):
(JSC::MacroAssembler::jnset32):
(JSC::MacroAssembler::jset32):
(JSC::MacroAssembler::jzeroSub32):
(JSC::MacroAssembler::joverAdd32):
(JSC::MacroAssembler::call):
* assembler/X86Assembler.h:
(JSC::X86Assembler::shll_i8r):
* jit/JIT.cpp:
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompile):
(JSC::JIT::privateCompileCTIMachineTrampolines):
* jit/JIT.h:
* jit/JITArithmetic.cpp:
(JSC::JIT::compileBinaryArithOp):
* jit/JITInlineMethods.h:
(JSC::JIT::emitGetVirtualRegister):
(JSC::JIT::emitPutCTIArg):
(JSC::JIT::emitPutCTIArgConstant):
(JSC::JIT::emitGetCTIArg):
(JSC::JIT::emitPutCTIArgFromVirtualRegister):
(JSC::JIT::emitPutCTIParam):
(JSC::JIT::emitGetCTIParam):
(JSC::JIT::emitPutToCallFrameHeader):
(JSC::JIT::emitPutImmediateToCallFrameHeader):
(JSC::JIT::emitGetFromCallFrameHeader):
(JSC::JIT::emitPutVirtualRegister):
(JSC::JIT::emitInitRegister):
(JSC::JIT::emitNakedCall):
(JSC::JIT::restoreArgumentReference):
(JSC::JIT::restoreArgumentReferenceForTrampoline):
(JSC::JIT::emitCTICall):
(JSC::JIT::checkStructure):
(JSC::JIT::emitJumpSlowCaseIfNotJSCell):
(JSC::JIT::emitJumpSlowCaseIfNotImmNum):
(JSC::JIT::emitJumpSlowCaseIfNotImmNums):
(JSC::JIT::emitFastArithDeTagImmediate):
(JSC::JIT::emitFastArithDeTagImmediateJumpIfZero):
(JSC::JIT::emitFastArithReTagImmediate):
(JSC::JIT::emitFastArithPotentiallyReTagImmediate):
(JSC::JIT::emitFastArithImmToInt):
(JSC::JIT::emitFastArithIntToImmOrSlowCase):
(JSC::JIT::emitFastArithIntToImmNoCheck):
(JSC::JIT::emitTagAsBoolImmediate):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::privateCompilePutByIdTransition):
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@39020 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/JavaScriptCore/ChangeLog b/JavaScriptCore/ChangeLog
index 3449b5f..4b5b0f9 100644
--- a/JavaScriptCore/ChangeLog
+++ b/JavaScriptCore/ChangeLog
@@ -1,3 +1,70 @@
+2008-12-04 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Geoff Garen.
+
+ Start porting the JIT to use the MacroAssembler.
+
+ https://bugs.webkit.org/show_bug.cgi?id=22671
+ No change in performance.
+
+ * assembler/MacroAssembler.h:
+ (JSC::MacroAssembler::Jump::operator X86Assembler::JmpSrc):
+ (JSC::MacroAssembler::add32):
+ (JSC::MacroAssembler::and32):
+ (JSC::MacroAssembler::lshift32):
+ (JSC::MacroAssembler::rshift32):
+ (JSC::MacroAssembler::storePtr):
+ (JSC::MacroAssembler::store32):
+ (JSC::MacroAssembler::poke):
+ (JSC::MacroAssembler::move):
+ (JSC::MacroAssembler::compareImm32ForBranchEquality):
+ (JSC::MacroAssembler::jnePtr):
+ (JSC::MacroAssembler::jnset32):
+ (JSC::MacroAssembler::jset32):
+ (JSC::MacroAssembler::jzeroSub32):
+ (JSC::MacroAssembler::joverAdd32):
+ (JSC::MacroAssembler::call):
+ * assembler/X86Assembler.h:
+ (JSC::X86Assembler::shll_i8r):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ (JSC::JIT::privateCompile):
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ * jit/JIT.h:
+ * jit/JITArithmetic.cpp:
+ (JSC::JIT::compileBinaryArithOp):
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::emitGetVirtualRegister):
+ (JSC::JIT::emitPutCTIArg):
+ (JSC::JIT::emitPutCTIArgConstant):
+ (JSC::JIT::emitGetCTIArg):
+ (JSC::JIT::emitPutCTIArgFromVirtualRegister):
+ (JSC::JIT::emitPutCTIParam):
+ (JSC::JIT::emitGetCTIParam):
+ (JSC::JIT::emitPutToCallFrameHeader):
+ (JSC::JIT::emitPutImmediateToCallFrameHeader):
+ (JSC::JIT::emitGetFromCallFrameHeader):
+ (JSC::JIT::emitPutVirtualRegister):
+ (JSC::JIT::emitInitRegister):
+ (JSC::JIT::emitNakedCall):
+ (JSC::JIT::restoreArgumentReference):
+ (JSC::JIT::restoreArgumentReferenceForTrampoline):
+ (JSC::JIT::emitCTICall):
+ (JSC::JIT::checkStructure):
+ (JSC::JIT::emitJumpSlowCaseIfNotJSCell):
+ (JSC::JIT::emitJumpSlowCaseIfNotImmNum):
+ (JSC::JIT::emitJumpSlowCaseIfNotImmNums):
+ (JSC::JIT::emitFastArithDeTagImmediate):
+ (JSC::JIT::emitFastArithDeTagImmediateJumpIfZero):
+ (JSC::JIT::emitFastArithReTagImmediate):
+ (JSC::JIT::emitFastArithPotentiallyReTagImmediate):
+ (JSC::JIT::emitFastArithImmToInt):
+ (JSC::JIT::emitFastArithIntToImmOrSlowCase):
+ (JSC::JIT::emitFastArithIntToImmNoCheck):
+ (JSC::JIT::emitTagAsBoolImmediate):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::privateCompilePutByIdTransition):
+
2008-12-04 Geoffrey Garen <ggaren@apple.com>
Reviewed by Oliver Hunt.
diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h
index dfc8a5b..cea05cd 100644
--- a/JavaScriptCore/assembler/MacroAssembler.h
+++ b/JavaScriptCore/assembler/MacroAssembler.h
@@ -188,6 +188,12 @@
ASSERT(m_assembler);
m_assembler->link(m_jmp, label.m_label);
}
+
+ // FIXME: transitionary method, while we replace JmpSrces with Jumps.
+ operator X86Assembler::JmpSrc()
+ {
+ return m_jmp;
+ }
private:
X86Assembler* m_assembler;
@@ -268,6 +274,11 @@
#endif
}
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addl_rr(src, dest);
+ }
+
void add32(Imm32 imm, RegisterID dest)
{
if (CAN_SIGN_EXTEND_8_32(imm.m_value))
@@ -281,6 +292,16 @@
m_assembler.addl_mr(src.offset, src.base, dest);
}
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andl_rr(src, dest);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.shll_i8r(imm.m_value, dest);
+ }
+
void or32(Imm32 imm, RegisterID dest)
{
if (CAN_SIGN_EXTEND_8_32(imm.m_value))
@@ -289,6 +310,11 @@
m_assembler.orl_i32r(imm.m_value, dest);
}
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.sarl_i8r(imm.m_value, dest);
+ }
+
void sub32(Imm32 imm, RegisterID dest)
{
if (CAN_SIGN_EXTEND_8_32(imm.m_value))
@@ -364,6 +390,16 @@
#endif
}
+#if !PLATFORM(X86_64)
+ void storePtr(void* value, ImplicitAddress address)
+ {
+ if (address.offset)
+ m_assembler.movl_i32m(reinterpret_cast<unsigned>(value), address.offset, address.base);
+ else
+ m_assembler.movl_i32m(reinterpret_cast<unsigned>(value), address.base);
+ }
+#endif
+
void store32(RegisterID src, ImplicitAddress address)
{
if (address.offset)
@@ -374,9 +410,18 @@
void store32(Imm32 imm, ImplicitAddress address)
{
- // FIXME: add a version that doesn't take an offset
- m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+ if (address.offset)
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+ else
+ m_assembler.movl_i32m(imm.m_value, address.base);
}
+
+#if !PLATFORM(X86_64)
+ void store32(Imm32 imm, void* address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address);
+ }
+#endif
// Stack manipulation operations:
@@ -420,6 +465,17 @@
storePtr(src, Address(X86::esp, (index * sizeof(void *))));
}
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, Address(X86::esp, (index * sizeof(void *))));
+ }
+
+#if !PLATFORM(X86_64)
+ void poke(void* value, int index = 0)
+ {
+ storePtr(value, Address(X86::esp, (index * sizeof(void *))));
+ }
+#endif
// Register move operations:
//
@@ -446,6 +502,13 @@
#endif
}
+#if !PLATFORM(X86_64)
+ void move(void* value, RegisterID dest)
+ {
+ m_assembler.movl_i32r(reinterpret_cast<int32_t>(value), dest);
+ }
+#endif
+
// Forwards / external control flow operations:
//
@@ -481,6 +544,21 @@
m_assembler.cmpl_i32r(imm, reg);
}
+ void compareImm32ForBranchEquality(Address address, int32_t imm)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ if (address.offset)
+ m_assembler.cmpl_i8m(imm, address.offset, address.base);
+ else
+ m_assembler.cmpl_i8m(imm, address.base);
+ } else {
+ if (address.offset)
+ m_assembler.cmpl_i32m(imm, address.offset, address.base);
+ else
+ m_assembler.cmpl_i32m(imm, address.base);
+ }
+ }
+
public:
Jump jae32(RegisterID left, Imm32 right)
{
@@ -551,7 +629,15 @@
compareImm32ForBranch(left, right.m_value);
return Jump(m_assembler, m_assembler.jle());
}
-
+
+#if !PLATFORM(X86_64)
+ Jump jnePtr(void* ptr, Address address)
+ {
+ compareImm32ForBranchEquality(address, reinterpret_cast<uint32_t>(ptr));
+ return Jump(m_assembler, m_assembler.jne());
+ }
+#endif
+
Jump jne32(RegisterID op1, RegisterID op2)
{
m_assembler.cmpl_rr(op1, op2);
@@ -564,6 +650,26 @@
return Jump(m_assembler, m_assembler.jne());
}
+ Jump jnset32(Imm32 imm, RegisterID reg)
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if ((imm.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(imm.m_value, reg);
+ else
+ m_assembler.testl_i32r(imm.m_value, reg);
+ return Jump(m_assembler, m_assembler.je());
+ }
+
+ Jump jset32(Imm32 imm, RegisterID reg)
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if ((imm.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(imm.m_value, reg);
+ else
+ m_assembler.testl_i32r(imm.m_value, reg);
+ return Jump(m_assembler, m_assembler.jne());
+ }
+
Jump jump()
{
return Jump(m_assembler, m_assembler.jmp());
@@ -626,6 +732,32 @@
}
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump jzSub32(Imm32 imm, RegisterID dest)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm.m_value))
+ m_assembler.subl_i8r(imm.m_value, dest);
+ else
+ m_assembler.subl_i32r(imm.m_value, dest);
+ return Jump(m_assembler, m_assembler.je());
+ }
+
+ Jump joAdd32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addl_rr(src, dest);
+ return Jump(m_assembler, m_assembler.jo());
+ }
+
+
// Miscellaneous operations:
void breakpoint()
@@ -633,6 +765,16 @@
m_assembler.int3();
}
+ Jump call()
+ {
+ return Jump(m_assembler, m_assembler.call());
+ }
+
+ Jump call(RegisterID target)
+ {
+ return Jump(m_assembler, m_assembler.call(target));
+ }
+
void ret()
{
m_assembler.ret();
diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h
index 2f6e00d..5f320a4 100644
--- a/JavaScriptCore/assembler/X86Assembler.h
+++ b/JavaScriptCore/assembler/X86Assembler.h
@@ -114,6 +114,7 @@
OP_JMP_rel32 = 0xE9,
PRE_SSE_F2 = 0xF2,
OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
OP_GROUP3_Ev = 0xF7,
OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
OP_GROUP5_Ev = 0xFF,
@@ -390,6 +391,20 @@
}
#endif
+ void cmpl_i8m(int imm, RegisterID dst)
+ {
+ m_buffer.putByte(OP_GROUP1_EvIb);
+ modRm_opm(GROUP1_OP_CMP, dst);
+ m_buffer.putByte(imm);
+ }
+
+ void cmpl_i8m(int imm, int offset, RegisterID dst)
+ {
+ m_buffer.putByte(OP_GROUP1_EvIb);
+ modRm_opm(GROUP1_OP_CMP, dst, offset);
+ m_buffer.putByte(imm);
+ }
+
void cmpl_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_buffer.putByte(OP_GROUP1_EvIb);
@@ -496,6 +511,14 @@
modRm_rm(dst, base, offset);
}
+ void testb_i8r(int imm, RegisterID dst)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_GROUP3_EbIb);
+ modRm_opr_Unchecked(GROUP3_OP_TEST, dst);
+ m_buffer.putByteUnchecked(imm);
+ }
+
void testl_i32r(int imm, RegisterID dst)
{
m_buffer.ensureSpace(maxInstructionSize);
@@ -555,7 +578,7 @@
modRm_opr(GROUP2_OP_SAR, dst);
}
- void shl_i8r(int imm, RegisterID dst)
+ void shll_i8r(int imm, RegisterID dst)
{
if (imm == 1) {
m_buffer.putByte(OP_GROUP2_Ev1);
@@ -719,6 +742,14 @@
m_buffer.putInt(imm);
}
+ void movl_i32m(int imm, RegisterID base)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_GROUP11_EvIz);
+ modRm_opm_Unchecked(GROUP11_MOV, base);
+ m_buffer.putIntUnchecked(imm);
+ }
+
void movl_i32m(int imm, int offset, RegisterID base)
{
m_buffer.ensureSpace(maxInstructionSize);
@@ -1089,28 +1120,6 @@
return copy;
}
-#if USE(CTI_ARGUMENT)
- void restoreArgumentReference()
- {
-#if USE(FAST_CALL_CTI_ARGUMENT)
- movl_rr(X86::esp, X86::ecx);
-#else
- movl_rm(X86::esp, 0, X86::esp);
-#endif
- }
-
- void restoreArgumentReferenceForTrampoline()
- {
-#if USE(FAST_CALL_CTI_ARGUMENT)
- movl_rr(X86::esp, X86::ecx);
- addl_i32r(4, X86::ecx);
-#endif
- }
-#else
- void restoreArgumentReference() {}
- void restoreArgumentReferenceForTrampoline() {}
-#endif
-
private:
void modRm_rr(RegisterID reg, RegisterID rm)
{
@@ -1131,6 +1140,16 @@
}
#endif
+ void modRm_rm_Unchecked(RegisterID reg, RegisterID base)
+ {
+ if (base == X86::esp) {
+ m_buffer.putByteUnchecked(MODRM(0, reg, X86::hasSib));
+ m_buffer.putByteUnchecked(SIB(0, X86::noScale, X86::esp));
+ } else {
+ m_buffer.putByteUnchecked(MODRM(0, reg, base));
+ }
+ }
+
void modRm_rm(RegisterID reg, RegisterID base)
{
if (base == X86::esp) {
@@ -1212,6 +1231,11 @@
modRm_rm(static_cast<RegisterID>(opcodeID), base);
}
+ void modRm_opm_Unchecked(OpcodeID opcodeID, RegisterID base)
+ {
+ modRm_rm_Unchecked(static_cast<RegisterID>(opcodeID), base);
+ }
+
void modRm_opm_Unchecked(OpcodeID opcodeID, RegisterID base, int offset)
{
modRm_rm_Unchecked(static_cast<RegisterID>(opcodeID), base, offset);
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index a382f51..f3565b5 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -565,7 +565,7 @@
case op_get_scoped_var: {
int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
- emitGetVirtualRegister(RegisterFile::ScopeChain, X86::eax, i);
+ emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::eax);
while (skip--)
__ movl_mr(FIELD_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
@@ -578,7 +578,7 @@
case op_put_scoped_var: {
int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
- emitGetVirtualRegister(RegisterFile::ScopeChain, X86::edx, i);
+ emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::edx);
emitGetVirtualRegister(instruction[i + 3].u.operand, X86::eax, i);
while (skip--)
__ movl_mr(FIELD_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
@@ -608,10 +608,10 @@
emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
// Grab the return address.
- emitGetVirtualRegister(RegisterFile::ReturnPC, X86::edx, i);
+ emitGetFromCallFrameHeader(RegisterFile::ReturnPC, X86::edx);
// Restore our caller's "r".
- emitGetVirtualRegister(RegisterFile::CallerFrame, X86::edi, i);
+ emitGetFromCallFrameHeader(RegisterFile::CallerFrame, X86::edi);
// Return.
__ pushl_r(X86::edx);
@@ -947,7 +947,7 @@
}
case op_eq: {
emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
- emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx, i);
__ cmpl_rr(X86::edx, X86::eax);
__ sete_r(X86::eax);
__ movzbl_rr(X86::eax, X86::eax);
@@ -1071,7 +1071,7 @@
CTI_COMPILE_BINARY_OP(op_less)
case op_neq: {
emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
- emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx, i);
__ cmpl_rr(X86::eax, X86::edx);
__ setne_r(X86::eax);
@@ -1098,7 +1098,7 @@
CTI_COMPILE_BINARY_OP(op_urshift)
case op_bitxor: {
emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
- emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx, i);
__ xorl_rr(X86::edx, X86::eax);
emitFastArithReTagImmediate(X86::eax);
emitPutVirtualRegister(instruction[i + 1].u.operand);
@@ -1115,7 +1115,7 @@
}
case op_bitor: {
emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
- emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx, i);
__ orl_rr(X86::edx, X86::eax);
emitPutVirtualRegister(instruction[i + 1].u.operand);
i += 5;
@@ -2005,7 +2005,7 @@
JmpDst afterRegisterFileCheck;
if (m_codeBlock->codeType == FunctionCode) {
// In the case of a fast linked call, we do not set this up in the caller.
- __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edi);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
emitGetCTIParam(CTI_ARGS_registerFile, X86::eax);
__ leal_mr(m_codeBlock->numCalleeRegisters * sizeof(Register), X86::edi, X86::edx);
@@ -2140,7 +2140,7 @@
__ testl_rr(X86::eax, X86::eax);
JmpSrc hasCodeBlock1 = __ jne();
__ popl_r(X86::ebx);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callJSFunction1 = __ call();
emitGetCTIArg(0, X86::ecx);
@@ -2154,7 +2154,7 @@
__ popl_r(X86::ebx);
emitPutCTIArg(X86::ebx, 4);
emitPutCTIArg(X86::eax, 12);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callArityCheck1 = __ call();
__ movl_rr(X86::edx, X86::edi);
@@ -2167,7 +2167,7 @@
__ popl_r(X86::ebx);
emitPutCTIArg(X86::ebx, 4);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callDontLazyLinkCall = __ call();
__ pushl_r(X86::ebx);
@@ -2182,7 +2182,7 @@
__ testl_rr(X86::eax, X86::eax);
JmpSrc hasCodeBlock2 = __ jne();
__ popl_r(X86::ebx);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callJSFunction2 = __ call();
emitGetCTIArg(0, X86::ecx);
@@ -2196,7 +2196,7 @@
__ popl_r(X86::ebx);
emitPutCTIArg(X86::ebx, 4);
emitPutCTIArg(X86::eax, 12);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callArityCheck2 = __ call();
__ movl_rr(X86::edx, X86::edi);
@@ -2209,7 +2209,7 @@
__ popl_r(X86::ebx);
emitPutCTIArg(X86::ebx, 4);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callLazyLinkCall = __ call();
__ pushl_r(X86::ebx);
@@ -2224,7 +2224,7 @@
__ testl_rr(X86::eax, X86::eax);
JmpSrc hasCodeBlock3 = __ jne();
__ popl_r(X86::ebx);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callJSFunction3 = __ call();
emitGetCTIArg(0, X86::ecx);
@@ -2238,7 +2238,7 @@
__ popl_r(X86::ebx);
emitPutCTIArg(X86::ebx, 4);
emitPutCTIArg(X86::eax, 12);
- __ restoreArgumentReference();
+ restoreArgumentReference();
emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
JmpSrc callArityCheck3 = __ call();
__ movl_rr(X86::edx, X86::edi);
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index 7243b5a..ace7e67 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -35,7 +35,7 @@
#include "Interpreter.h"
#include "Opcode.h"
#include "RegisterFile.h"
-#include "X86Assembler.h"
+#include "MacroAssembler.h"
#include "Profiler.h"
#include <wtf/AlwaysInline.h>
#include <wtf/Vector.h>
@@ -260,12 +260,14 @@
void ctiSetReturnAddress(void** where, void* what);
void ctiRepatchCallByReturnAddress(void* where, void* what);
- class JIT {
+ class JIT : private MacroAssembler {
typedef X86Assembler::RegisterID RegisterID;
typedef X86Assembler::XMMRegisterID XMMRegisterID;
typedef X86Assembler::JmpSrc JmpSrc;
typedef X86Assembler::JmpDst JmpDst;
+ static const RegisterID callFrameRegister = X86::edi;
+
static const int repatchGetByIdDefaultStructure = -1;
// Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
// will compress the displacement, and we may not be able to fit a repatched offset.
@@ -415,6 +417,7 @@
void emitPutCTIArg(RegisterID src, unsigned offset);
void emitPutCTIArgFromVirtualRegister(unsigned src, unsigned offset, RegisterID scratch);
void emitPutCTIArgConstant(unsigned value, unsigned offset);
+ void emitPutCTIArgConstant(void* value, unsigned offset);
void emitGetCTIArg(unsigned offset, RegisterID dst);
void emitInitRegister(unsigned dst);
@@ -424,6 +427,7 @@
void emitGetCTIParam(unsigned name, RegisterID to);
void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
+ void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
void emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to);
JSValue* getConstantImmediateNumericArg(unsigned src);
@@ -434,7 +438,7 @@
void emitJumpSlowCaseIfNotJSCell(RegisterID, unsigned bytecodeIndex, int VReg);
void emitJumpSlowCaseIfNotImmNum(RegisterID, unsigned bytecodeIndex);
- void emitJumpSlowCaseIfNotImmNums(RegisterID, RegisterID, unsigned bytecodeIndex);
+ void emitJumpSlowCaseIfNotImmNums(RegisterID, RegisterID, RegisterID, unsigned bytecodeIndex);
JmpSrc checkStructure(RegisterID reg, Structure* structure);
@@ -448,6 +452,9 @@
void emitTagAsBoolImmediate(RegisterID reg);
+ void restoreArgumentReference();
+ void restoreArgumentReferenceForTrampoline();
+
JmpSrc emitNakedCall(unsigned bytecodeIndex, RegisterID);
JmpSrc emitNakedCall(unsigned bytecodeIndex, void* function);
JmpSrc emitCTICall(unsigned bytecodeIndex, CTIHelper_j);
@@ -468,7 +475,6 @@
void killLastResultRegister();
- X86Assembler m_assembler;
Interpreter* m_interpreter;
JSGlobalData* m_globalData;
CodeBlock* m_codeBlock;
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index e961bb5..7fe8060 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -254,7 +254,7 @@
__ link(op1imm, __ label());
emitJumpSlowCaseIfNotImmNum(X86::edx, i);
} else
- emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx, i);
if (opcodeID == op_add) {
emitFastArithDeTagImmediate(X86::eax);
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 0741a32..a5526e6 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -30,7 +30,7 @@
#if ENABLE(JIT)
-#define __ m_assembler.
+#define __ m_assembler.
#if PLATFORM(WIN)
#undef FIELD_OFFSET // Fix conflict with winnt.h.
@@ -61,7 +61,7 @@
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue* value = m_codeBlock->getConstant(src);
- __ movl_i32r(asInteger(value), dst);
+ move(value, dst);
killLastResultRegister();
return;
}
@@ -77,13 +77,13 @@
if (!atJumpTarget) {
// The argument we want is already stored in eax
if (dst != X86::eax)
- __ movl_rr(X86::eax, dst);
+ move(X86::eax, dst);
killLastResultRegister();
return;
}
}
- __ movl_mr(src * sizeof(Register), X86::edi, dst);
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
killLastResultRegister();
}
@@ -98,35 +98,25 @@
}
}
-// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
-ALWAYS_INLINE void JIT::emitPutCTIArgFromVirtualRegister(unsigned src, unsigned offset, RegisterID scratch)
-{
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue* value = m_codeBlock->getConstant(src);
- __ movl_i32m(asInteger(value), offset + sizeof(void*), X86::esp);
- } else {
- __ movl_mr(src * sizeof(Register), X86::edi, scratch);
- __ movl_rm(scratch, offset + sizeof(void*), X86::esp);
- }
-
- killLastResultRegister();
-}
-
// puts an arg onto the stack, as an arg to a context threaded function.
ALWAYS_INLINE void JIT::emitPutCTIArg(RegisterID src, unsigned offset)
{
- __ movl_rm(src, offset + sizeof(void*), X86::esp);
+ poke(src, (offset / sizeof(void*)) + 1);
+}
+
+ALWAYS_INLINE void JIT::emitPutCTIArgConstant(unsigned value, unsigned offset)
+{
+ poke(Imm32(value), (offset / sizeof(void*)) + 1);
+}
+
+ALWAYS_INLINE void JIT::emitPutCTIArgConstant(void* value, unsigned offset)
+{
+ poke(value, (offset / sizeof(void*)) + 1);
}
ALWAYS_INLINE void JIT::emitGetCTIArg(unsigned offset, RegisterID dst)
{
- __ movl_mr(offset + sizeof(void*), X86::esp, dst);
-}
-
-
-ALWAYS_INLINE void JIT::emitPutCTIArgConstant(unsigned value, unsigned offset)
-{
- __ movl_i32m(value, offset + sizeof(void*), X86::esp);
+ peek(dst, (offset / sizeof(void*)) + 1);
}
ALWAYS_INLINE JSValue* JIT::getConstantImmediateNumericArg(unsigned src)
@@ -138,190 +128,226 @@
return noValue();
}
+// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
+ALWAYS_INLINE void JIT::emitPutCTIArgFromVirtualRegister(unsigned src, unsigned offset, RegisterID scratch)
+{
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue* value = m_codeBlock->getConstant(src);
+ emitPutCTIArgConstant(value, offset);
+ } else {
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
+ emitPutCTIArg(scratch, offset);
+ }
+
+ killLastResultRegister();
+}
+
ALWAYS_INLINE void JIT::emitPutCTIParam(void* value, unsigned name)
{
- __ movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
+ poke(value, name);
}
ALWAYS_INLINE void JIT::emitPutCTIParam(RegisterID from, unsigned name)
{
- __ movl_rm(from, name * sizeof(void*), X86::esp);
+ poke(from, name);
}
ALWAYS_INLINE void JIT::emitGetCTIParam(unsigned name, RegisterID to)
{
- __ movl_mr(name * sizeof(void*), X86::esp, to);
+ peek(to, name);
killLastResultRegister();
}
ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
{
- __ movl_rm(from, entry * sizeof(Register), X86::edi);
+ storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
+}
+
+ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(value, Address(callFrameRegister, entry * sizeof(Register)));
}
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to)
{
- __ movl_mr(entry * sizeof(Register), X86::edi, to);
+ loadPtr(Address(callFrameRegister, entry * sizeof(Register)), to);
killLastResultRegister();
}
ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
{
- __ movl_rm(from, dst * sizeof(Register), X86::edi);
+ storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
m_lastResultBytecodeRegister = (from == X86::eax) ? dst : std::numeric_limits<int>::max();
// FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
}
ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
{
- __ movl_i32m(asInteger(jsUndefined()), dst * sizeof(Register), X86::edi);
+ storePtr(jsUndefined(), Address(callFrameRegister, dst * sizeof(Register)));
// FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
}
ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, X86::RegisterID r)
{
- JmpSrc call = __ call(r);
- m_calls.append(CallRecord(call, bytecodeIndex));
-
- return call;
+ JmpSrc nakedCall = call(r);
+ m_calls.append(CallRecord(nakedCall, bytecodeIndex));
+ return nakedCall;
}
-ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, void* function)
+ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, void* function)
{
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, reinterpret_cast<CTIHelper_v>(function), bytecodeIndex));
- return call;
+ JmpSrc nakedCall = call();
+ m_calls.append(CallRecord(nakedCall, reinterpret_cast<CTIHelper_v>(function), bytecodeIndex));
+ return nakedCall;
}
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+#if USE(CTI_ARGUMENT)
+#if USE(FAST_CALL_CTI_ARGUMENT)
+ movl_rr(X86::esp, X86::ecx);
+#else
+ movl_rm(X86::esp, 0, X86::esp);
+#endif
+#endif
+}
+
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+{
+#if USE(CTI_ARGUMENT) && USE(FAST_CALL_CTI_ARGUMENT)
+ movl_rr(X86::esp, X86::ecx);
+ addl_i32r(4, X86::ecx);
+#endif
+}
+
+
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_j helper)
{
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
#endif
- __ restoreArgumentReference();
- emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, helper, bytecodeIndex));
+ restoreArgumentReference();
+ emitPutCTIParam(callFrameRegister, CTI_ARGS_callFrame);
+ JmpSrc ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, helper, bytecodeIndex));
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
#endif
killLastResultRegister();
- return call;
+ return ctiCall;
}
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_o helper)
{
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
#endif
- __ restoreArgumentReference();
- emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, helper, bytecodeIndex));
+ restoreArgumentReference();
+ emitPutCTIParam(callFrameRegister, CTI_ARGS_callFrame);
+ JmpSrc ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, helper, bytecodeIndex));
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
#endif
killLastResultRegister();
- return call;
+ return ctiCall;
}
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_p helper)
{
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
#endif
- __ restoreArgumentReference();
- emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, helper, bytecodeIndex));
+ restoreArgumentReference();
+ emitPutCTIParam(callFrameRegister, CTI_ARGS_callFrame);
+ JmpSrc ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, helper, bytecodeIndex));
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
#endif
killLastResultRegister();
- return call;
+ return ctiCall;
}
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_b helper)
{
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
#endif
- __ restoreArgumentReference();
- emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, helper, bytecodeIndex));
+ restoreArgumentReference();
+ emitPutCTIParam(callFrameRegister, CTI_ARGS_callFrame);
+ JmpSrc ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, helper, bytecodeIndex));
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
#endif
killLastResultRegister();
- return call;
+ return ctiCall;
}
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_v helper)
{
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
#endif
- __ restoreArgumentReference();
- emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, helper, bytecodeIndex));
+ restoreArgumentReference();
+ emitPutCTIParam(callFrameRegister, CTI_ARGS_callFrame);
+ JmpSrc ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, helper, bytecodeIndex));
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
#endif
killLastResultRegister();
- return call;
+ return ctiCall;
}
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_s helper)
{
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
#endif
- __ restoreArgumentReference();
- emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, helper, bytecodeIndex));
+ restoreArgumentReference();
+ emitPutCTIParam(callFrameRegister, CTI_ARGS_callFrame);
+ JmpSrc ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, helper, bytecodeIndex));
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
#endif
killLastResultRegister();
- return call;
+ return ctiCall;
}
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_2 helper)
{
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
#endif
- __ restoreArgumentReference();
- emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, helper, bytecodeIndex));
+ restoreArgumentReference();
+ emitPutCTIParam(callFrameRegister, CTI_ARGS_callFrame);
+ JmpSrc ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, helper, bytecodeIndex));
#if ENABLE(OPCODE_SAMPLING)
- __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
#endif
killLastResultRegister();
- return call;
+ return ctiCall;
}
ALWAYS_INLINE JmpSrc JIT::checkStructure(RegisterID reg, Structure* structure)
{
- __ cmpl_i32m(reinterpret_cast<uint32_t>(structure), FIELD_OFFSET(JSCell, m_structure), reg);
- return __ jne();
+ return jnePtr(structure, Address(reg, FIELD_OFFSET(JSCell, m_structure)));
}
ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, unsigned bytecodeIndex)
{
- __ testl_i32r(JSImmediate::TagMask, reg);
- m_slowCases.append(SlowCaseEntry(__ jne(), bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jset32(Imm32(JSImmediate::TagMask), reg), bytecodeIndex));
}
ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, unsigned bytecodeIndex, int vReg)
@@ -343,15 +369,14 @@
ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNum(RegisterID reg, unsigned bytecodeIndex)
{
- __ testl_i32r(JSImmediate::TagBitTypeInteger, reg);
- m_slowCases.append(SlowCaseEntry(__ je(), bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jnset32(Imm32(JSImmediate::TagBitTypeInteger), reg), bytecodeIndex));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNums(RegisterID reg1, RegisterID reg2, unsigned bytecodeIndex)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNums(RegisterID reg1, RegisterID reg2, RegisterID scratch, unsigned bytecodeIndex)
{
- __ movl_rr(reg1, X86::ecx);
- __ andl_rr(reg2, X86::ecx);
- emitJumpSlowCaseIfNotImmNum(X86::ecx, bytecodeIndex);
+ move(reg1, scratch);
+ and32(reg2, scratch);
+ emitJumpSlowCaseIfNotImmNum(scratch, bytecodeIndex);
}
ALWAYS_INLINE unsigned JIT::getDeTaggedConstantImmediate(JSValue* imm)
@@ -362,47 +387,45 @@
ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
{
- __ subl_i8r(JSImmediate::TagBitTypeInteger, reg);
+ sub32(Imm32(JSImmediate::TagBitTypeInteger), reg);
}
ALWAYS_INLINE JmpSrc JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
{
- __ subl_i8r(JSImmediate::TagBitTypeInteger, reg);
- return __ je();
+ return jzSub32(Imm32(JSImmediate::TagBitTypeInteger), reg);
}
ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID reg)
{
- __ addl_i8r(JSImmediate::TagBitTypeInteger, reg);
+ add32(Imm32(JSImmediate::TagBitTypeInteger), reg);
}
ALWAYS_INLINE void JIT::emitFastArithPotentiallyReTagImmediate(RegisterID reg)
{
- __ orl_i8r(JSImmediate::TagBitTypeInteger, reg);
+ or32(Imm32(JSImmediate::TagBitTypeInteger), reg);
}
ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
{
- __ sarl_i8r(1, reg);
+ rshift32(Imm32(1), reg);
}
ALWAYS_INLINE void JIT::emitFastArithIntToImmOrSlowCase(RegisterID reg, unsigned bytecodeIndex)
{
- __ addl_rr(reg, reg);
- m_slowCases.append(SlowCaseEntry(__ jo(), bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(joAdd32(reg, reg), bytecodeIndex));
emitFastArithReTagImmediate(reg);
}
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID reg)
{
- __ addl_rr(reg, reg);
+ add32(reg, reg);
emitFastArithReTagImmediate(reg);
}
ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
{
- __ shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
- __ orl_i8r(JSImmediate::FullTagTypeBool, reg);
+ lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
+ or32(Imm32(JSImmediate::FullTagTypeBool), reg);
}
}
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index ffb70e0..a4cba0f 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -281,7 +281,7 @@
if (failureCases.size()) {
for (unsigned i = 0; i < failureCases.size(); ++i)
__ link(failureCases[i], __ label());
- __ restoreArgumentReferenceForTrampoline();
+ restoreArgumentReferenceForTrampoline();
failureJump = __ jmp();
}