blob: 2c3aa7d6070dfec10311eacc2c964d7c163a9368 [file] [log] [blame]
fpizlo@apple.com64b92852012-02-26 00:19:07 +00001# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
2#
3# Redistribution and use in source and binary forms, with or without
4# modification, are permitted provided that the following conditions
5# are met:
6# 1. Redistributions of source code must retain the above copyright
7# notice, this list of conditions and the following disclaimer.
8# 2. Redistributions in binary form must reproduce the above copyright
9# notice, this list of conditions and the following disclaimer in the
10# documentation and/or other materials provided with the distribution.
11#
12# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22# THE POSSIBILITY OF SUCH DAMAGE.
23
24
25# Crash course on the language that this is written in (which I just call
26# "assembly" even though it's more than that):
27#
28# - Mostly gas-style operand ordering. The last operand tends to be the
29# destination. So "a := b" is written as "mov b, a". But unlike gas,
30# comparisons are in-order, so "if (a < b)" is written as
31# "bilt a, b, ...".
32#
33# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
34# Currently this is just 32-bit so "i" and "p" are interchangeable
35# except when an op supports one but not the other.
36#
37# - In general, valid operands for macro invocations and instructions are
38# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
39# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
40# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
41# macros as operands. Instructions cannot take anonymous macros.
42#
43# - Labels must have names that begin with either "_" or ".". A "." label
44# is local and gets renamed before code gen to minimize namespace
45# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
46# may or may not be removed during code gen depending on whether the asm
47# conventions for C name mangling on the target platform mandate a "_"
48# prefix.
49#
50# - A "macro" is a lambda expression, which may be either anonymous or
51# named. But this has caveats. "macro" can take zero or more arguments,
52# which may be macros or any valid operands, but it can only return
53# code. But you can do Turing-complete things via continuation passing
54# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
55# that, since you'll just crash the assembler.
56#
57# - An "if" is a conditional on settings. Any identifier supplied in the
58# predicate of an "if" is assumed to be a #define that is available
59# during code gen. So you can't use "if" for computation in a macro, but
60# you can use it to select different pieces of code for different
61# platforms.
62#
63# - Arguments to macros follow lexical scoping rather than dynamic scoping.
64# Const's also follow lexical scoping and may override (hide) arguments
65# or other consts. All variables (arguments and constants) can be bound
66# to operands. Additionally, arguments (but not constants) can be bound
67# to macros.
68
69
70# Below we have a bunch of constant declarations. Each constant must have
71# a corresponding ASSERT() in LLIntData.cpp.
72
73# These declarations must match interpreter/RegisterFile.h.
74const CallFrameHeaderSize = 48
75const ArgumentCount = -48
76const CallerFrame = -40
77const Callee = -32
78const ScopeChain = -24
79const ReturnPC = -16
80const CodeBlock = -8
81
82const ThisArgumentOffset = -CallFrameHeaderSize - 8
83
84# Declare some aliases for the registers we will use.
85const PC = t4
86
87# Offsets needed for reasoning about value representation.
88if BIG_ENDIAN
89 const TagOffset = 0
90 const PayloadOffset = 4
91else
92 const TagOffset = 4
93 const PayloadOffset = 0
94end
95
96# Value representation constants.
97const Int32Tag = -1
98const BooleanTag = -2
99const NullTag = -3
100const UndefinedTag = -4
101const CellTag = -5
102const EmptyValueTag = -6
103const DeletedValueTag = -7
104const LowestTag = DeletedValueTag
105
106# Type constants.
107const StringType = 5
108const ObjectType = 13
109
110# Type flags constants.
111const MasqueradesAsUndefined = 1
112const ImplementsHasInstance = 2
113const ImplementsDefaultHasInstance = 8
114
115# Heap allocation constants.
116const JSFinalObjectSizeClassIndex = 3
117
118# Bytecode operand constants.
119const FirstConstantRegisterIndex = 0x40000000
120
121# Code type constants.
122const GlobalCode = 0
123const EvalCode = 1
124const FunctionCode = 2
125
126# The interpreter steals the tag word of the argument count.
127const LLIntReturnPC = ArgumentCount + TagOffset
128
129# This must match wtf/Vector.h.
130const VectorSizeOffset = 0
131const VectorBufferOffset = 4
132
133# String flags.
134const HashFlags8BitBuffer = 64
135
136# Utilities
137macro crash()
138 storei 0, 0xbbadbeef[]
139 move 0, t0
140 call t0
141end
142
143macro assert(assertion)
144 if ASSERT_ENABLED
145 assertion(.ok)
146 crash()
147 .ok:
148 end
149end
150
151macro preserveReturnAddressAfterCall(destinationRegister)
152 if ARMv7
153 move lr, destinationRegister
154 elsif X86
155 pop destinationRegister
156 else
157 error
158 end
159end
160
161macro restoreReturnAddressBeforeReturn(sourceRegister)
162 if ARMv7
163 move sourceRegister, lr
164 elsif X86
165 push sourceRegister
166 else
167 error
168 end
169end
170
171macro dispatch(advance)
172 addp advance * 4, PC
173 jmp [PC]
174end
175
176macro dispatchBranchWithOffset(pcOffset)
177 lshifti 2, pcOffset
178 addp pcOffset, PC
179 jmp [PC]
180end
181
182macro dispatchBranch(pcOffset)
183 loadi pcOffset, t0
184 dispatchBranchWithOffset(t0)
185end
186
187macro dispatchAfterCall()
188 loadi ArgumentCount + TagOffset[cfr], PC
189 jmp [PC]
190end
191
192macro cCall2(function, arg1, arg2)
193 if ARMv7
194 move arg1, t0
195 move arg2, t1
196 elsif X86
197 poke arg1, 0
198 poke arg2, 1
199 else
200 error
201 end
202 call function
203end
204
205# This barely works. arg3 and arg4 should probably be immediates.
206macro cCall4(function, arg1, arg2, arg3, arg4)
207 if ARMv7
208 move arg1, t0
209 move arg2, t1
210 move arg3, t2
211 move arg4, t3
212 elsif X86
213 poke arg1, 0
214 poke arg2, 1
215 poke arg3, 2
216 poke arg4, 3
217 else
218 error
219 end
220 call function
221end
222
223macro callSlowPath(slow_path)
224 cCall2(slow_path, cfr, PC)
225 move t0, PC
226 move t1, cfr
227end
228
229# Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
230# should be an immediate integer - any integer you like; use it to identify the place you're
231# debugging from. operand should likewise be an immediate, and should identify the operand
232# in the instruction stream you'd like to print out.
233macro traceOperand(fromWhere, operand)
234 cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
235 move t0, PC
236 move t1, cfr
237end
238
239# Debugging operation if you'd like to print the value of an operand in the instruction
240# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
241# value.
242macro traceValue(fromWhere, operand)
243 cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
244 move t0, PC
245 move t1, cfr
246end
247
248macro traceExecution()
249 if EXECUTION_TRACING
250 callSlowPath(_llint_trace)
251 end
252end
253
254# Call a slow_path for call opcodes.
255macro callCallSlowPath(advance, slow_path, action)
256 addp advance * 4, PC, t0
257 storep t0, ArgumentCount + TagOffset[cfr]
258 cCall2(slow_path, cfr, PC)
259 move t1, cfr
260 action(t0)
261end
262
263macro slowPathForCall(advance, slow_path)
264 callCallSlowPath(
265 advance,
266 slow_path,
267 macro (callee)
268 call callee
269 dispatchAfterCall()
270 end)
271end
272
273macro checkSwitchToJIT(increment, action)
274 if JIT_ENABLED
275 loadp CodeBlock[cfr], t0
276 baddis increment, CodeBlock::m_llintExecuteCounter[t0], .continue
277 action()
278 .continue:
279 end
280end
281
282macro checkSwitchToJITForLoop()
283 checkSwitchToJIT(
284 1,
285 macro ()
286 storei PC, ArgumentCount + TagOffset[cfr]
287 cCall2(_llint_loop_osr, cfr, PC)
288 move t1, cfr
289 btpz t0, .recover
290 jmp t0
291 .recover:
292 loadi ArgumentCount + TagOffset[cfr], PC
293 end)
294end
295
296macro checkSwitchToJITForEpilogue()
297 checkSwitchToJIT(
298 10,
299 macro ()
300 callSlowPath(_llint_replace)
301 end)
302end
303
304macro assertNotConstant(index)
305 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
306end
307
308# Index, tag, and payload must be different registers. Index is not
309# changed.
310macro loadConstantOrVariable(index, tag, payload)
311 bigteq index, FirstConstantRegisterIndex, .constant
312 loadi TagOffset[cfr, index, 8], tag
313 loadi PayloadOffset[cfr, index, 8], payload
314 jmp .done
315.constant:
316 loadp CodeBlock[cfr], payload
317 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
318 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
319 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
320 loadp TagOffset[payload, index, 8], tag
321 loadp PayloadOffset[payload, index, 8], payload
322.done:
323end
324
325# Index and payload may be the same register. Index may be clobbered.
326macro loadConstantOrVariable2Reg(index, tag, payload)
327 bigteq index, FirstConstantRegisterIndex, .constant
328 loadi TagOffset[cfr, index, 8], tag
329 loadi PayloadOffset[cfr, index, 8], payload
330 jmp .done
331.constant:
332 loadp CodeBlock[cfr], tag
333 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
334 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
335 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
336 lshifti 3, index
337 addp index, tag
338 loadp PayloadOffset[tag], payload
339 loadp TagOffset[tag], tag
340.done:
341end
342
343macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
344 bigteq index, FirstConstantRegisterIndex, .constant
345 tagCheck(TagOffset[cfr, index, 8])
346 loadi PayloadOffset[cfr, index, 8], payload
347 jmp .done
348.constant:
349 loadp CodeBlock[cfr], payload
350 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
351 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
352 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
353 tagCheck(TagOffset[payload, index, 8])
354 loadp PayloadOffset[payload, index, 8], payload
355.done:
356end
357
358# Index and payload must be different registers. Index is not mutated. Use
359# this if you know what the tag of the variable should be. Doing the tag
360# test as part of loading the variable reduces register use, but may not
361# be faster than doing loadConstantOrVariable followed by a branch on the
362# tag.
363macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
364 loadConstantOrVariablePayloadTagCustom(
365 index,
366 macro (actualTag) bineq actualTag, expectedTag, slow end,
367 payload)
368end
369
370macro loadConstantOrVariablePayloadUnchecked(index, payload)
371 loadConstantOrVariablePayloadTagCustom(
372 index,
373 macro (actualTag) end,
374 payload)
375end
376
377macro writeBarrier(tag, payload)
378 # Nothing to do, since we don't have a generational or incremental collector.
379end
380
381macro valueProfile(tag, payload, profile)
382 if VALUE_PROFILER
383 storei tag, ValueProfile::m_buckets + TagOffset[profile]
384 storei payload, ValueProfile::m_buckets + PayloadOffset[profile]
385 end
386end
387
388
389# Indicate the beginning of LLInt.
390_llint_begin:
391 crash()
392
393
394# Entrypoints into the interpreter
395
396macro functionForCallCodeBlockGetter(targetRegister)
397 loadp Callee[cfr], targetRegister
398 loadp JSFunction::m_executable[targetRegister], targetRegister
399 loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
400end
401
402macro functionForConstructCodeBlockGetter(targetRegister)
403 loadp Callee[cfr], targetRegister
404 loadp JSFunction::m_executable[targetRegister], targetRegister
405 loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
406end
407
408macro notFunctionCodeBlockGetter(targetRegister)
409 loadp CodeBlock[cfr], targetRegister
410end
411
412macro functionCodeBlockSetter(sourceRegister)
413 storep sourceRegister, CodeBlock[cfr]
414end
415
416macro notFunctionCodeBlockSetter(sourceRegister)
417 # Nothing to do!
418end
419
420# Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
421# in t1. May also trigger prologue entry OSR.
422macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
423 preserveReturnAddressAfterCall(t2)
424
425 # Set up the call frame and check if we should OSR.
426 storep t2, ReturnPC[cfr]
427 if EXECUTION_TRACING
428 callSlowPath(traceSlowPath)
429 end
430 codeBlockGetter(t1)
431 if JIT_ENABLED
432 baddis 5, CodeBlock::m_llintExecuteCounter[t1], .continue
433 cCall2(osrSlowPath, cfr, PC)
434 move t1, cfr
435 btpz t0, .recover
436 loadp ReturnPC[cfr], t2
437 restoreReturnAddressBeforeReturn(t2)
438 jmp t0
439 .recover:
440 codeBlockGetter(t1)
441 .continue:
442 end
443 codeBlockSetter(t1)
444
445 # Set up the PC.
fpizlo@apple.comebe232e2012-02-27 02:07:34 +0000446 loadp CodeBlock::m_instructions[t1], PC
fpizlo@apple.com64b92852012-02-26 00:19:07 +0000447end
448
449# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
450# Must call dispatch(0) after calling this.
451macro functionInitialization(profileArgSkip)
452 if VALUE_PROFILER
453 # Profile the arguments. Unfortunately, we have no choice but to do this. This
454 # code is pretty horrendous because of the difference in ordering between
455 # arguments and value profiles, the desire to have a simple loop-down-to-zero
456 # loop, and the desire to use only three registers so as to preserve the PC and
457 # the code block. It is likely that this code should be rewritten in a more
458 # optimal way for architectures that have more than five registers available
459 # for arbitrary use in the interpreter.
460 loadi CodeBlock::m_numParameters[t1], t0
461 addi -profileArgSkip, t0 # Use addi because that's what has the peephole
462 assert(macro (ok) bigteq t0, 0, ok end)
463 btiz t0, .argumentProfileDone
464 loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
465 muli sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
466 negi t0
467 lshifti 3, t0
468 addp t2, t3
469 .argumentProfileLoop:
470 loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
471 subp sizeof ValueProfile, t3
472 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
473 loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
474 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
475 baddinz 8, t0, .argumentProfileLoop
476 .argumentProfileDone:
477 end
478
479 # Check stack height.
480 loadi CodeBlock::m_numCalleeRegisters[t1], t0
481 loadp CodeBlock::m_globalData[t1], t2
482 loadp JSGlobalData::interpreter[t2], t2 # FIXME: Can get to the RegisterFile from the JITStackFrame
483 lshifti 3, t0
484 addp t0, cfr, t0
485 bpaeq Interpreter::m_registerFile + RegisterFile::m_end[t2], t0, .stackHeightOK
486
487 # Stack height check failed - need to call a slow_path.
488 callSlowPath(_llint_register_file_check)
489.stackHeightOK:
490end
491
492# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
493macro functionArityCheck(doneLabel, slow_path)
494 loadi PayloadOffset + ArgumentCount[cfr], t0
495 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
496 cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
497 move t1, cfr
498 btiz t0, .continue
499 loadp JITStackFrame::globalData[sp], t1
500 loadp JSGlobalData::callFrameForThrow[t1], t0
501 jmp JSGlobalData::targetMachinePCForThrow[t1]
502.continue:
503 # Reload CodeBlock and PC, since the slow_path clobbered it.
504 loadp CodeBlock[cfr], t1
fpizlo@apple.comebe232e2012-02-27 02:07:34 +0000505 loadp CodeBlock::m_instructions[t1], PC
fpizlo@apple.com64b92852012-02-26 00:19:07 +0000506 jmp doneLabel
507end
508
509_llint_program_prologue:
510 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
511 dispatch(0)
512
513
514_llint_eval_prologue:
515 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
516 dispatch(0)
517
518
519_llint_function_for_call_prologue:
520 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
521.functionForCallBegin:
522 functionInitialization(0)
523 dispatch(0)
524
525
526_llint_function_for_construct_prologue:
527 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
528.functionForConstructBegin:
529 functionInitialization(1)
530 dispatch(0)
531
532
533_llint_function_for_call_arity_check:
534 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
535 functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
536
537
538_llint_function_for_construct_arity_check:
539 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
540 functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
541
542# Instruction implementations
543
544_llint_op_enter:
545 traceExecution()
546 loadp CodeBlock[cfr], t2
547 loadi CodeBlock::m_numVars[t2], t2
548 btiz t2, .opEnterDone
549 move UndefinedTag, t0
550 move 0, t1
551.opEnterLoop:
552 subi 1, t2
553 storei t0, TagOffset[cfr, t2, 8]
554 storei t1, PayloadOffset[cfr, t2, 8]
555 btinz t2, .opEnterLoop
556.opEnterDone:
557 dispatch(1)
558
559
560_llint_op_create_activation:
561 traceExecution()
562 loadi 4[PC], t0
563 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
564 callSlowPath(_llint_slow_path_create_activation)
565.opCreateActivationDone:
566 dispatch(2)
567
568
569_llint_op_init_lazy_reg:
570 traceExecution()
571 loadi 4[PC], t0
572 storei EmptyValueTag, TagOffset[cfr, t0, 8]
573 storei 0, PayloadOffset[cfr, t0, 8]
574 dispatch(2)
575
576
577_llint_op_create_arguments:
578 traceExecution()
579 loadi 4[PC], t0
580 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
581 callSlowPath(_llint_slow_path_create_arguments)
582.opCreateArgumentsDone:
583 dispatch(2)
584
585
586macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, scratch1, scratch2, slowCase)
587 if ALWAYS_ALLOCATE_SLOW
588 jmp slowCase
589 else
590 const offsetOfMySizeClass =
591 JSGlobalData::heap +
592 Heap::m_objectSpace +
593 MarkedSpace::m_normalSpace +
594 MarkedSpace::Subspace::preciseAllocators +
595 sizeClassIndex * sizeof MarkedAllocator
596
597 # FIXME: we can get the global data in one load from the stack.
598 loadp CodeBlock[cfr], scratch1
599 loadp CodeBlock::m_globalData[scratch1], scratch1
600
601 # Get the object from the free list.
602 loadp offsetOfMySizeClass + MarkedAllocator::m_firstFreeCell[scratch1], result
603 btpz result, slowCase
604
605 # Remove the object from the free list.
606 loadp [result], scratch2
607 storep scratch2, offsetOfMySizeClass + MarkedAllocator::m_firstFreeCell[scratch1]
608
609 # Initialize the object.
610 loadp classInfoOffset[scratch1], scratch2
611 storep scratch2, [result]
612 storep structure, JSCell::m_structure[result]
613 storep 0, JSObject::m_inheritorID[result]
614 addp sizeof JSObject, result, scratch1
615 storep scratch1, JSObject::m_propertyStorage[result]
616 end
617end
618
619_llint_op_create_this:
620 traceExecution()
621 loadi 8[PC], t0
622 assertNotConstant(t0)
623 bineq TagOffset[cfr, t0, 8], CellTag, .opCreateThisSlow
624 loadi PayloadOffset[cfr, t0, 8], t0
625 loadp JSCell::m_structure[t0], t1
626 bbb Structure::m_typeInfo + TypeInfo::m_type[t1], ObjectType, .opCreateThisSlow
627 loadp JSObject::m_inheritorID[t0], t2
628 btpz t2, .opCreateThisSlow
629 allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow)
630 loadi 4[PC], t1
631 storei CellTag, TagOffset[cfr, t1, 8]
632 storei t0, PayloadOffset[cfr, t1, 8]
633 dispatch(3)
634
635.opCreateThisSlow:
636 callSlowPath(_llint_slow_path_create_this)
637 dispatch(3)
638
639
640_llint_op_get_callee:
641 traceExecution()
642 loadi 4[PC], t0
643 loadp PayloadOffset + Callee[cfr], t1
644 storei CellTag, TagOffset[cfr, t0, 8]
645 storei t1, PayloadOffset[cfr, t0, 8]
646 dispatch(2)
647
648
649_llint_op_convert_this:
650 traceExecution()
651 loadi 4[PC], t0
652 bineq TagOffset[cfr, t0, 8], CellTag, .opConvertThisSlow
653 loadi PayloadOffset[cfr, t0, 8], t0
654 loadp JSCell::m_structure[t0], t0
655 bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
656 dispatch(2)
657
658.opConvertThisSlow:
659 callSlowPath(_llint_slow_path_convert_this)
660 dispatch(2)
661
662
663_llint_op_new_object:
664 traceExecution()
665 loadp CodeBlock[cfr], t0
666 loadp CodeBlock::m_globalObject[t0], t0
667 loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
668 allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow)
669 loadi 4[PC], t1
670 storei CellTag, TagOffset[cfr, t1, 8]
671 storei t0, PayloadOffset[cfr, t1, 8]
672 dispatch(2)
673
674.opNewObjectSlow:
675 callSlowPath(_llint_slow_path_new_object)
676 dispatch(2)
677
678
679_llint_op_new_array:
680 traceExecution()
681 callSlowPath(_llint_slow_path_new_array)
682 dispatch(4)
683
684
685_llint_op_new_array_buffer:
686 traceExecution()
687 callSlowPath(_llint_slow_path_new_array_buffer)
688 dispatch(4)
689
690
691_llint_op_new_regexp:
692 traceExecution()
693 callSlowPath(_llint_slow_path_new_regexp)
694 dispatch(3)
695
696
697_llint_op_mov:
698 traceExecution()
699 loadi 8[PC], t1
700 loadi 4[PC], t0
701 loadConstantOrVariable(t1, t2, t3)
702 storei t2, TagOffset[cfr, t0, 8]
703 storei t3, PayloadOffset[cfr, t0, 8]
704 dispatch(3)
705
706
707_llint_op_not:
708 traceExecution()
709 loadi 8[PC], t0
710 loadi 4[PC], t1
711 loadConstantOrVariable(t0, t2, t3)
712 bineq t2, BooleanTag, .opNotSlow
713 xori 1, t3
714 storei t2, TagOffset[cfr, t1, 8]
715 storei t3, PayloadOffset[cfr, t1, 8]
716 dispatch(3)
717
718.opNotSlow:
719 callSlowPath(_llint_slow_path_not)
720 dispatch(3)
721
722
723_llint_op_eq:
724 traceExecution()
725 loadi 12[PC], t2
726 loadi 8[PC], t0
727 loadConstantOrVariable(t2, t3, t1)
728 loadConstantOrVariable2Reg(t0, t2, t0)
729 bineq t2, t3, .opEqSlow
730 bieq t2, CellTag, .opEqSlow
731 bib t2, LowestTag, .opEqSlow
732 loadi 4[PC], t2
733 cieq t0, t1, t0
734 storei BooleanTag, TagOffset[cfr, t2, 8]
735 storei t0, PayloadOffset[cfr, t2, 8]
736 dispatch(4)
737
738.opEqSlow:
739 callSlowPath(_llint_slow_path_eq)
740 dispatch(4)
741
742
743_llint_op_eq_null:
744 traceExecution()
745 loadi 8[PC], t0
746 loadi 4[PC], t3
747 assertNotConstant(t0)
748 loadi TagOffset[cfr, t0, 8], t1
749 loadi PayloadOffset[cfr, t0, 8], t0
750 bineq t1, CellTag, .opEqNullImmediate
751 loadp JSCell::m_structure[t0], t1
752 tbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
753 jmp .opEqNullNotImmediate
754.opEqNullImmediate:
755 cieq t1, NullTag, t2
756 cieq t1, UndefinedTag, t1
757 ori t2, t1
758.opEqNullNotImmediate:
759 storei BooleanTag, TagOffset[cfr, t3, 8]
760 storei t1, PayloadOffset[cfr, t3, 8]
761 dispatch(3)
762
763
764_llint_op_neq:
765 traceExecution()
766 loadi 12[PC], t2
767 loadi 8[PC], t0
768 loadConstantOrVariable(t2, t3, t1)
769 loadConstantOrVariable2Reg(t0, t2, t0)
770 bineq t2, t3, .opNeqSlow
771 bieq t2, CellTag, .opNeqSlow
772 bib t2, LowestTag, .opNeqSlow
773 loadi 4[PC], t2
774 cineq t0, t1, t0
775 storei BooleanTag, TagOffset[cfr, t2, 8]
776 storei t0, PayloadOffset[cfr, t2, 8]
777 dispatch(4)
778
779.opNeqSlow:
780 callSlowPath(_llint_slow_path_neq)
781 dispatch(4)
782
783
784_llint_op_neq_null:
785 traceExecution()
786 loadi 8[PC], t0
787 loadi 4[PC], t3
788 assertNotConstant(t0)
789 loadi TagOffset[cfr, t0, 8], t1
790 loadi PayloadOffset[cfr, t0, 8], t0
791 bineq t1, CellTag, .opNeqNullImmediate
792 loadp JSCell::m_structure[t0], t1
793 tbz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
794 jmp .opNeqNullNotImmediate
795.opNeqNullImmediate:
796 cineq t1, NullTag, t2
797 cineq t1, UndefinedTag, t1
798 andi t2, t1
799.opNeqNullNotImmediate:
800 storei BooleanTag, TagOffset[cfr, t3, 8]
801 storei t1, PayloadOffset[cfr, t3, 8]
802 dispatch(3)
803
804
805macro strictEq(equalityOperation, slow_path)
806 loadi 12[PC], t2
807 loadi 8[PC], t0
808 loadConstantOrVariable(t2, t3, t1)
809 loadConstantOrVariable2Reg(t0, t2, t0)
810 bineq t2, t3, .slow
811 bib t2, LowestTag, .slow
812 bineq t2, CellTag, .notString
813 loadp JSCell::m_structure[t0], t2
814 loadp JSCell::m_structure[t1], t3
815 bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
816 bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
817.notString:
818 loadi 4[PC], t2
819 equalityOperation(t0, t1, t0)
820 storei BooleanTag, TagOffset[cfr, t2, 8]
821 storei t0, PayloadOffset[cfr, t2, 8]
822 dispatch(4)
823
824.slow:
825 callSlowPath(slow_path)
826 dispatch(4)
827end
828
829_llint_op_stricteq:
830 traceExecution()
831 strictEq(macro (left, right, result) cieq left, right, result end, _llint_slow_path_stricteq)
832
833
834_llint_op_nstricteq:
835 traceExecution()
836 strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
837
838
839_llint_op_less:
840 traceExecution()
841 callSlowPath(_llint_slow_path_less)
842 dispatch(4)
843
844
845_llint_op_lesseq:
846 traceExecution()
847 callSlowPath(_llint_slow_path_lesseq)
848 dispatch(4)
849
850
851_llint_op_greater:
852 traceExecution()
853 callSlowPath(_llint_slow_path_greater)
854 dispatch(4)
855
856
857_llint_op_greatereq:
858 traceExecution()
859 callSlowPath(_llint_slow_path_greatereq)
860 dispatch(4)
861
862
863_llint_op_pre_inc:
864 traceExecution()
865 loadi 4[PC], t0
866 bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreIncSlow
867 loadi PayloadOffset[cfr, t0, 8], t1
868 baddio 1, t1, .opPreIncSlow
869 storei t1, PayloadOffset[cfr, t0, 8]
870 dispatch(2)
871
872.opPreIncSlow:
873 callSlowPath(_llint_slow_path_pre_inc)
874 dispatch(2)
875
876
877_llint_op_pre_dec:
878 traceExecution()
879 loadi 4[PC], t0
880 bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreDecSlow
881 loadi PayloadOffset[cfr, t0, 8], t1
882 bsubio 1, t1, .opPreDecSlow
883 storei t1, PayloadOffset[cfr, t0, 8]
884 dispatch(2)
885
886.opPreDecSlow:
887 callSlowPath(_llint_slow_path_pre_dec)
888 dispatch(2)
889
890
891_llint_op_post_inc:
892 traceExecution()
893 loadi 8[PC], t0
894 loadi 4[PC], t1
895 bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostIncSlow
896 bieq t0, t1, .opPostIncDone
897 loadi PayloadOffset[cfr, t0, 8], t2
898 move t2, t3
899 baddio 1, t3, .opPostIncSlow
900 storei Int32Tag, TagOffset[cfr, t1, 8]
901 storei t2, PayloadOffset[cfr, t1, 8]
902 storei t3, PayloadOffset[cfr, t0, 8]
903.opPostIncDone:
904 dispatch(3)
905
906.opPostIncSlow:
907 callSlowPath(_llint_slow_path_post_inc)
908 dispatch(3)
909
910
911_llint_op_post_dec:
912 traceExecution()
913 loadi 8[PC], t0
914 loadi 4[PC], t1
915 bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostDecSlow
916 bieq t0, t1, .opPostDecDone
917 loadi PayloadOffset[cfr, t0, 8], t2
918 move t2, t3
919 bsubio 1, t3, .opPostDecSlow
920 storei Int32Tag, TagOffset[cfr, t1, 8]
921 storei t2, PayloadOffset[cfr, t1, 8]
922 storei t3, PayloadOffset[cfr, t0, 8]
923.opPostDecDone:
924 dispatch(3)
925
926.opPostDecSlow:
927 callSlowPath(_llint_slow_path_post_dec)
928 dispatch(3)
929
930
931_llint_op_to_jsnumber:
932 traceExecution()
933 loadi 8[PC], t0
934 loadi 4[PC], t1
935 loadConstantOrVariable(t0, t2, t3)
936 bieq t2, Int32Tag, .opToJsnumberIsInt
937 biaeq t2, EmptyValueTag, .opToJsnumberSlow
938.opToJsnumberIsInt:
939 storei t2, TagOffset[cfr, t1, 8]
940 storei t3, PayloadOffset[cfr, t1, 8]
941 dispatch(3)
942
943.opToJsnumberSlow:
944 callSlowPath(_llint_slow_path_to_jsnumber)
945 dispatch(3)
946
947
948_llint_op_negate:
949 traceExecution()
950 loadi 8[PC], t0
951 loadi 4[PC], t3
952 loadConstantOrVariable(t0, t1, t2)
953 bineq t1, Int32Tag, .opNegateSrcNotInt
954 btiz t2, 0x7fffffff, .opNegateSlow
955 negi t2
956 storei Int32Tag, TagOffset[cfr, t3, 8]
957 storei t2, PayloadOffset[cfr, t3, 8]
958 dispatch(3)
959.opNegateSrcNotInt:
960 bia t1, LowestTag, .opNegateSlow
961 xori 0x80000000, t1
962 storei t1, TagOffset[cfr, t3, 8]
963 storei t2, PayloadOffset[cfr, t3, 8]
964 dispatch(3)
965
966.opNegateSlow:
967 callSlowPath(_llint_slow_path_negate)
968 dispatch(3)
969
970
971macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slow_path)
972 loadi 12[PC], t2
973 loadi 8[PC], t0
974 loadConstantOrVariable(t2, t3, t1)
975 loadConstantOrVariable2Reg(t0, t2, t0)
976 bineq t2, Int32Tag, .op1NotInt
977 bineq t3, Int32Tag, .op2NotInt
978 loadi 4[PC], t2
979 integerOperationAndStore(t3, t1, t0, .slow, t2)
980 dispatch(5)
981
982.op1NotInt:
983 # First operand is definitely not an int, the second operand could be anything.
984 bia t2, LowestTag, .slow
985 bib t3, LowestTag, .op1NotIntOp2Double
986 bineq t3, Int32Tag, .slow
987 ci2d t1, ft1
988 jmp .op1NotIntReady
989.op1NotIntOp2Double:
990 fii2d t1, t3, ft1
991.op1NotIntReady:
992 loadi 4[PC], t1
993 fii2d t0, t2, ft0
994 doubleOperation(ft1, ft0)
995 stored ft0, [cfr, t1, 8]
996 dispatch(5)
997
998.op2NotInt:
999 # First operand is definitely an int, the second operand is definitely not.
1000 loadi 4[PC], t2
1001 bia t3, LowestTag, .slow
1002 ci2d t0, ft0
1003 fii2d t1, t3, ft1
1004 doubleOperation(ft1, ft0)
1005 stored ft0, [cfr, t2, 8]
1006 dispatch(5)
1007
1008.slow:
1009 callSlowPath(slow_path)
1010 dispatch(5)
1011end
1012
1013macro binaryOp(integerOperation, doubleOperation, slow_path)
1014 binaryOpCustomStore(
1015 macro (int32Tag, left, right, slow, index)
1016 integerOperation(left, right, slow)
1017 storei int32Tag, TagOffset[cfr, index, 8]
1018 storei right, PayloadOffset[cfr, index, 8]
1019 end,
1020 doubleOperation, slow_path)
1021end
1022
1023_llint_op_add:
1024 traceExecution()
1025 binaryOp(
1026 macro (left, right, slow) baddio left, right, slow end,
1027 macro (left, right) addd left, right end,
1028 _llint_slow_path_add)
1029
1030
1031_llint_op_mul:
1032 traceExecution()
1033 binaryOpCustomStore(
1034 macro (int32Tag, left, right, slow, index)
1035 const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
1036 move right, scratch
1037 bmulio left, scratch, slow
1038 btinz scratch, .done
1039 bilt left, 0, slow
1040 bilt right, 0, slow
1041 .done:
1042 storei Int32Tag, TagOffset[cfr, index, 8]
1043 storei scratch, PayloadOffset[cfr, index, 8]
1044 end,
1045 macro (left, right) muld left, right end,
1046 _llint_slow_path_mul)
1047
1048
1049_llint_op_sub:
1050 traceExecution()
1051 binaryOp(
1052 macro (left, right, slow) bsubio left, right, slow end,
1053 macro (left, right) subd left, right end,
1054 _llint_slow_path_sub)
1055
1056
1057_llint_op_div:
1058 traceExecution()
1059 binaryOpCustomStore(
1060 macro (int32Tag, left, right, slow, index)
1061 ci2d left, ft0
1062 ci2d right, ft1
1063 divd ft0, ft1
1064 bcd2i ft1, right, .notInt
1065 storei int32Tag, TagOffset[cfr, index, 8]
1066 storei right, PayloadOffset[cfr, index, 8]
1067 jmp .done
1068 .notInt:
1069 stored ft1, [cfr, index, 8]
1070 .done:
1071 end,
1072 macro (left, right) divd left, right end,
1073 _llint_slow_path_div)
1074
1075
1076_llint_op_mod:
1077 traceExecution()
1078 callSlowPath(_llint_slow_path_mod)
1079 dispatch(4)
1080
1081
1082macro bitOp(operation, slow_path, advance)
1083 loadi 12[PC], t2
1084 loadi 8[PC], t0
1085 loadConstantOrVariable(t2, t3, t1)
1086 loadConstantOrVariable2Reg(t0, t2, t0)
1087 bineq t3, Int32Tag, .slow
1088 bineq t2, Int32Tag, .slow
1089 loadi 4[PC], t2
1090 operation(t1, t0, .slow)
1091 storei t3, TagOffset[cfr, t2, 8]
1092 storei t0, PayloadOffset[cfr, t2, 8]
1093 dispatch(advance)
1094
1095.slow:
1096 callSlowPath(slow_path)
1097 dispatch(advance)
1098end
1099
1100_llint_op_lshift:
1101 traceExecution()
1102 bitOp(
1103 macro (left, right, slow) lshifti left, right end,
1104 _llint_slow_path_lshift,
1105 4)
1106
1107
1108_llint_op_rshift:
1109 traceExecution()
1110 bitOp(
1111 macro (left, right, slow) rshifti left, right end,
1112 _llint_slow_path_rshift,
1113 4)
1114
1115
1116_llint_op_urshift:
1117 traceExecution()
1118 bitOp(
1119 macro (left, right, slow)
1120 urshifti left, right
1121 bilt right, 0, slow
1122 end,
1123 _llint_slow_path_urshift,
1124 4)
1125
1126
1127_llint_op_bitand:
1128 traceExecution()
1129 bitOp(
1130 macro (left, right, slow) andi left, right end,
1131 _llint_slow_path_bitand,
1132 5)
1133
1134
1135_llint_op_bitxor:
1136 traceExecution()
1137 bitOp(
1138 macro (left, right, slow) xori left, right end,
1139 _llint_slow_path_bitxor,
1140 5)
1141
1142
1143_llint_op_bitor:
1144 traceExecution()
1145 bitOp(
1146 macro (left, right, slow) ori left, right end,
1147 _llint_slow_path_bitor,
1148 5)
1149
1150
1151_llint_op_bitnot:
1152 traceExecution()
1153 loadi 8[PC], t1
1154 loadi 4[PC], t0
1155 loadConstantOrVariable(t1, t2, t3)
1156 bineq t2, Int32Tag, .opBitnotSlow
1157 noti t3
1158 storei t2, TagOffset[cfr, t0, 8]
1159 storei t3, PayloadOffset[cfr, t0, 8]
1160 dispatch(3)
1161
1162.opBitnotSlow:
1163 callSlowPath(_llint_slow_path_bitnot)
1164 dispatch(3)
1165
1166
1167_llint_op_check_has_instance:
1168 traceExecution()
1169 loadi 4[PC], t1
1170 loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
1171 loadp JSCell::m_structure[t0], t0
1172 btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsHasInstance, .opCheckHasInstanceSlow
1173 dispatch(2)
1174
1175.opCheckHasInstanceSlow:
1176 callSlowPath(_llint_slow_path_check_has_instance)
1177 dispatch(2)
1178
1179
1180_llint_op_instanceof:
1181 traceExecution()
1182 # Check that baseVal implements the default HasInstance behavior.
1183 # FIXME: This should be deprecated.
1184 loadi 12[PC], t1
1185 loadConstantOrVariablePayloadUnchecked(t1, t0)
1186 loadp JSCell::m_structure[t0], t0
1187 btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opInstanceofSlow
1188
1189 # Actually do the work.
1190 loadi 16[PC], t0
1191 loadi 4[PC], t3
1192 loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
1193 loadp JSCell::m_structure[t1], t2
1194 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
1195 loadi 8[PC], t0
1196 loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
1197
1198 # Register state: t1 = prototype, t2 = value
1199 move 1, t0
1200.opInstanceofLoop:
1201 loadp JSCell::m_structure[t2], t2
1202 loadi Structure::m_prototype + PayloadOffset[t2], t2
1203 bpeq t2, t1, .opInstanceofDone
1204 btinz t2, .opInstanceofLoop
1205
1206 move 0, t0
1207.opInstanceofDone:
1208 storei BooleanTag, TagOffset[cfr, t3, 8]
1209 storei t0, PayloadOffset[cfr, t3, 8]
1210 dispatch(5)
1211
1212.opInstanceofSlow:
1213 callSlowPath(_llint_slow_path_instanceof)
1214 dispatch(5)
1215
1216
1217_llint_op_typeof:
1218 traceExecution()
1219 callSlowPath(_llint_slow_path_typeof)
1220 dispatch(3)
1221
1222
1223_llint_op_is_undefined:
1224 traceExecution()
1225 callSlowPath(_llint_slow_path_is_undefined)
1226 dispatch(3)
1227
1228
1229_llint_op_is_boolean:
1230 traceExecution()
1231 callSlowPath(_llint_slow_path_is_boolean)
1232 dispatch(3)
1233
1234
1235_llint_op_is_number:
1236 traceExecution()
1237 callSlowPath(_llint_slow_path_is_number)
1238 dispatch(3)
1239
1240
1241_llint_op_is_string:
1242 traceExecution()
1243 callSlowPath(_llint_slow_path_is_string)
1244 dispatch(3)
1245
1246
1247_llint_op_is_object:
1248 traceExecution()
1249 callSlowPath(_llint_slow_path_is_object)
1250 dispatch(3)
1251
1252
1253_llint_op_is_function:
1254 traceExecution()
1255 callSlowPath(_llint_slow_path_is_function)
1256 dispatch(3)
1257
1258
1259_llint_op_in:
1260 traceExecution()
1261 callSlowPath(_llint_slow_path_in)
1262 dispatch(4)
1263
1264
1265_llint_op_resolve:
1266 traceExecution()
1267 callSlowPath(_llint_slow_path_resolve)
1268 dispatch(4)
1269
1270
1271_llint_op_resolve_skip:
1272 traceExecution()
1273 callSlowPath(_llint_slow_path_resolve_skip)
1274 dispatch(5)
1275
1276
1277macro resolveGlobal(size, slow)
1278 # Operands are as follows:
1279 # 4[PC] Destination for the load.
1280 # 8[PC] Property identifier index in the code block.
1281 # 12[PC] Structure pointer, initialized to 0 by bytecode generator.
1282 # 16[PC] Offset in global object, initialized to 0 by bytecode generator.
1283 loadp CodeBlock[cfr], t0
1284 loadp CodeBlock::m_globalObject[t0], t0
1285 loadp JSCell::m_structure[t0], t1
1286 bpneq t1, 12[PC], slow
1287 loadi 16[PC], t1
1288 loadp JSObject::m_propertyStorage[t0], t0
1289 loadi TagOffset[t0, t1, 8], t2
1290 loadi PayloadOffset[t0, t1, 8], t3
1291 loadi 4[PC], t0
1292 storei t2, TagOffset[cfr, t0, 8]
1293 storei t3, PayloadOffset[cfr, t0, 8]
1294 loadi (size - 1) * 4[PC], t0
1295 valueProfile(t2, t3, t0)
1296end
1297
1298_llint_op_resolve_global:
1299 traceExecution()
1300 resolveGlobal(6, .opResolveGlobalSlow)
1301 dispatch(6)
1302
1303.opResolveGlobalSlow:
1304 callSlowPath(_llint_slow_path_resolve_global)
1305 dispatch(6)
1306
1307
1308# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
1309# scopes as they are traversed. scopeCheck() is called with two arguments: the register
1310# holding the scope, and a register that can be used for scratch. Note that this does not
1311# use t3, so you can hold stuff in t3 if need be.
1312macro getScope(deBruijinIndexOperand, scopeCheck)
1313 loadp ScopeChain + PayloadOffset[cfr], t0
1314 loadi deBruijinIndexOperand, t2
1315
1316 btiz t2, .done
1317
1318 loadp CodeBlock[cfr], t1
1319 bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
1320 btbz CodeBlock::m_needsFullScopeChain[t1], .loop
1321
1322 loadi CodeBlock::m_activationRegister[t1], t1
1323
1324 # Need to conditionally skip over one scope.
1325 bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
1326 scopeCheck(t0, t1)
1327 loadp ScopeChainNode::next[t0], t0
1328.noActivation:
1329 subi 1, t2
1330
1331 btiz t2, .done
1332.loop:
1333 scopeCheck(t0, t1)
1334 loadp ScopeChainNode::next[t0], t0
1335 subi 1, t2
1336 btinz t2, .loop
1337
1338.done:
1339end
1340
1341_llint_op_resolve_global_dynamic:
1342 traceExecution()
1343 loadp JITStackFrame::globalData[sp], t3
1344 loadp JSGlobalData::activationStructure[t3], t3
1345 getScope(
1346 20[PC],
1347 macro (scope, scratch)
1348 loadp ScopeChainNode::object[scope], scratch
1349 bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow
1350 end)
1351 resolveGlobal(7, .opResolveGlobalDynamicSlow)
1352 dispatch(7)
1353
1354.opResolveGlobalDynamicSuperSlow:
1355 callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic)
1356 dispatch(7)
1357
1358.opResolveGlobalDynamicSlow:
1359 callSlowPath(_llint_slow_path_resolve_global_dynamic)
1360 dispatch(7)
1361
1362
1363_llint_op_get_scoped_var:
1364 traceExecution()
1365 # Operands are as follows:
1366 # 4[PC] Destination for the load.
1367 # 8[PC] Index of register in the scope.
1368 # 12[PC] De Bruijin index.
1369 getScope(12[PC], macro (scope, scratch) end)
1370 loadi 4[PC], t1
1371 loadi 8[PC], t2
1372 loadp ScopeChainNode::object[t0], t0
1373 loadp JSVariableObject::m_registers[t0], t0
1374 loadi TagOffset[t0, t2, 8], t3
1375 loadi PayloadOffset[t0, t2, 8], t0
1376 storei t3, TagOffset[cfr, t1, 8]
1377 storei t0, PayloadOffset[cfr, t1, 8]
1378 loadi 16[PC], t1
1379 valueProfile(t3, t0, t1)
1380 dispatch(5)
1381
1382
1383_llint_op_put_scoped_var:
1384 traceExecution()
1385 getScope(8[PC], macro (scope, scratch) end)
1386 loadi 12[PC], t1
1387 loadConstantOrVariable(t1, t3, t2)
1388 loadi 4[PC], t1
1389 writeBarrier(t3, t2)
1390 loadp ScopeChainNode::object[t0], t0
1391 loadp JSVariableObject::m_registers[t0], t0
1392 storei t3, TagOffset[t0, t1, 8]
1393 storei t2, PayloadOffset[t0, t1, 8]
1394 dispatch(4)
1395
1396
1397_llint_op_get_global_var:
1398 traceExecution()
1399 loadi 8[PC], t1
1400 loadi 4[PC], t3
1401 loadp CodeBlock[cfr], t0
1402 loadp CodeBlock::m_globalObject[t0], t0
1403 loadp JSGlobalObject::m_registers[t0], t0
1404 loadi TagOffset[t0, t1, 8], t2
1405 loadi PayloadOffset[t0, t1, 8], t1
1406 storei t2, TagOffset[cfr, t3, 8]
1407 storei t1, PayloadOffset[cfr, t3, 8]
1408 loadi 12[PC], t3
1409 valueProfile(t2, t1, t3)
1410 dispatch(4)
1411
1412
1413_llint_op_put_global_var:
1414 traceExecution()
1415 loadi 8[PC], t1
1416 loadp CodeBlock[cfr], t0
1417 loadp CodeBlock::m_globalObject[t0], t0
1418 loadp JSGlobalObject::m_registers[t0], t0
1419 loadConstantOrVariable(t1, t2, t3)
1420 loadi 4[PC], t1
1421 writeBarrier(t2, t3)
1422 storei t2, TagOffset[t0, t1, 8]
1423 storei t3, PayloadOffset[t0, t1, 8]
1424 dispatch(3)
1425
1426
1427_llint_op_resolve_base:
1428 traceExecution()
1429 callSlowPath(_llint_slow_path_resolve_base)
1430 dispatch(5)
1431
1432
1433_llint_op_ensure_property_exists:
1434 traceExecution()
1435 callSlowPath(_llint_slow_path_ensure_property_exists)
1436 dispatch(3)
1437
1438
1439_llint_op_resolve_with_base:
1440 traceExecution()
1441 callSlowPath(_llint_slow_path_resolve_with_base)
1442 dispatch(5)
1443
1444
1445_llint_op_resolve_with_this:
1446 traceExecution()
1447 callSlowPath(_llint_slow_path_resolve_with_this)
1448 dispatch(5)
1449
1450
1451_llint_op_get_by_id:
1452 traceExecution()
1453 # We only do monomorphic get_by_id caching for now, and we do not modify the
1454 # opcode. We do, however, allow for the cache to change anytime if fails, since
1455 # ping-ponging is free. At best we get lucky and the get_by_id will continue
1456 # to take fast path on the new cache. At worst we take slow path, which is what
1457 # we would have been doing anyway.
1458 loadi 8[PC], t0
1459 loadi 16[PC], t1
1460 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
1461 loadi 20[PC], t2
1462 loadp JSObject::m_propertyStorage[t3], t0
1463 bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
1464 loadi 4[PC], t1
1465 loadi TagOffset[t0, t2], t3
1466 loadi PayloadOffset[t0, t2], t2
1467 storei t3, TagOffset[cfr, t1, 8]
1468 storei t2, PayloadOffset[cfr, t1, 8]
1469 loadi 32[PC], t1
1470 valueProfile(t3, t2, t1)
1471 dispatch(9)
1472
1473.opGetByIdSlow:
1474 callSlowPath(_llint_slow_path_get_by_id)
1475 dispatch(9)
1476
1477
1478_llint_op_get_arguments_length:
1479 traceExecution()
1480 loadi 8[PC], t0
1481 loadi 4[PC], t1
1482 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
1483 loadi ArgumentCount + PayloadOffset[cfr], t2
1484 subi 1, t2
1485 storei Int32Tag, TagOffset[cfr, t1, 8]
1486 storei t2, PayloadOffset[cfr, t1, 8]
1487 dispatch(4)
1488
1489.opGetArgumentsLengthSlow:
1490 callSlowPath(_llint_slow_path_get_arguments_length)
1491 dispatch(4)
1492
1493
1494_llint_op_put_by_id:
1495 traceExecution()
1496 loadi 4[PC], t3
1497 loadi 16[PC], t1
1498 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1499 loadi 12[PC], t2
1500 loadp JSObject::m_propertyStorage[t0], t3
1501 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
1502 loadi 20[PC], t1
1503 loadConstantOrVariable2Reg(t2, t0, t2)
1504 writeBarrier(t0, t2)
1505 storei t0, TagOffset[t3, t1]
1506 storei t2, PayloadOffset[t3, t1]
1507 dispatch(9)
1508
1509.opPutByIdSlow:
1510 callSlowPath(_llint_slow_path_put_by_id)
1511 dispatch(9)
1512
1513
1514macro putByIdTransition(additionalChecks)
1515 traceExecution()
1516 loadi 4[PC], t3
1517 loadi 16[PC], t1
1518 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1519 loadi 12[PC], t2
1520 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
1521 additionalChecks(t1, t3, .opPutByIdSlow)
1522 loadi 20[PC], t1
1523 loadp JSObject::m_propertyStorage[t0], t3
1524 addp t1, t3
1525 loadConstantOrVariable2Reg(t2, t1, t2)
1526 writeBarrier(t1, t2)
1527 storei t1, TagOffset[t3]
1528 loadi 24[PC], t1
1529 storei t2, PayloadOffset[t3]
1530 storep t1, JSCell::m_structure[t0]
1531 dispatch(9)
1532end
1533
1534_llint_op_put_by_id_transition_direct:
1535 putByIdTransition(macro (oldStructure, scratch, slow) end)
1536
1537
1538_llint_op_put_by_id_transition_normal:
1539 putByIdTransition(
1540 macro (oldStructure, scratch, slow)
1541 const protoCell = oldStructure # Reusing the oldStructure register for the proto
1542
1543 loadp 28[PC], scratch
1544 assert(macro (ok) btpnz scratch, ok end)
1545 loadp StructureChain::m_vector[scratch], scratch
1546 assert(macro (ok) btpnz scratch, ok end)
1547 bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
1548 .loop:
1549 loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
1550 loadp JSCell::m_structure[protoCell], oldStructure
1551 bpneq oldStructure, [scratch], slow
1552 addp 4, scratch
1553 bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
1554 .done:
1555 end)
1556
1557
1558_llint_op_del_by_id:
1559 traceExecution()
1560 callSlowPath(_llint_slow_path_del_by_id)
1561 dispatch(4)
1562
1563
1564_llint_op_get_by_val:
1565 traceExecution()
1566 loadp CodeBlock[cfr], t1
1567 loadi 8[PC], t2
1568 loadi 12[PC], t3
1569 loadp CodeBlock::m_globalData[t1], t1
1570 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
1571 loadp JSGlobalData::jsArrayClassInfo[t1], t2
1572 loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
1573 bpneq [t0], t2, .opGetByValSlow
1574 loadp JSArray::m_storage[t0], t3
1575 biaeq t1, JSArray::m_vectorLength[t0], .opGetByValSlow
1576 loadi 4[PC], t0
1577 loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
1578 loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
1579 bieq t2, EmptyValueTag, .opGetByValSlow
1580 storei t2, TagOffset[cfr, t0, 8]
1581 storei t1, PayloadOffset[cfr, t0, 8]
1582 loadi 16[PC], t0
1583 valueProfile(t2, t1, t0)
1584 dispatch(5)
1585
1586.opGetByValSlow:
1587 callSlowPath(_llint_slow_path_get_by_val)
1588 dispatch(5)
1589
1590
1591_llint_op_get_argument_by_val:
1592 traceExecution()
1593 loadi 8[PC], t0
1594 loadi 12[PC], t1
1595 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
1596 loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
1597 addi 1, t2
1598 loadi ArgumentCount + PayloadOffset[cfr], t1
1599 biaeq t2, t1, .opGetArgumentByValSlow
1600 negi t2
1601 loadi 4[PC], t3
1602 loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
1603 loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
1604 storei t0, TagOffset[cfr, t3, 8]
1605 storei t1, PayloadOffset[cfr, t3, 8]
1606 dispatch(5)
1607
1608.opGetArgumentByValSlow:
1609 callSlowPath(_llint_slow_path_get_argument_by_val)
1610 dispatch(5)
1611
1612
1613_llint_op_get_by_pname:
1614 traceExecution()
1615 loadi 12[PC], t0
1616 loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
1617 loadi 16[PC], t0
1618 bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
1619 loadi 8[PC], t0
1620 loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
1621 loadi 20[PC], t0
1622 loadi PayloadOffset[cfr, t0, 8], t3
1623 loadp JSCell::m_structure[t2], t0
1624 bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
1625 loadi 24[PC], t0
1626 loadi [cfr, t0, 8], t0
1627 subi 1, t0
1628 biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
1629 loadp JSObject::m_propertyStorage[t2], t2
1630 loadi TagOffset[t2, t0, 8], t1
1631 loadi PayloadOffset[t2, t0, 8], t3
1632 loadi 4[PC], t0
1633 storei t1, TagOffset[cfr, t0, 8]
1634 storei t3, PayloadOffset[cfr, t0, 8]
1635 dispatch(7)
1636
1637.opGetByPnameSlow:
1638 callSlowPath(_llint_slow_path_get_by_pname)
1639 dispatch(7)
1640
1641
1642_llint_op_put_by_val:
1643 traceExecution()
1644 loadi 4[PC], t0
1645 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
1646 loadi 8[PC], t0
1647 loadConstantOrVariablePayload(t0, Int32Tag, t2, .opPutByValSlow)
1648 loadp CodeBlock[cfr], t0
1649 loadp CodeBlock::m_globalData[t0], t0
1650 loadp JSGlobalData::jsArrayClassInfo[t0], t0
1651 bpneq [t1], t0, .opPutByValSlow
1652 biaeq t2, JSArray::m_vectorLength[t1], .opPutByValSlow
1653 loadp JSArray::m_storage[t1], t0
1654 bieq ArrayStorage::m_vector + TagOffset[t0, t2, 8], EmptyValueTag, .opPutByValEmpty
1655.opPutByValStoreResult:
1656 loadi 12[PC], t3
1657 loadConstantOrVariable2Reg(t3, t1, t3)
1658 writeBarrier(t1, t3)
1659 storei t1, ArrayStorage::m_vector + TagOffset[t0, t2, 8]
1660 storei t3, ArrayStorage::m_vector + PayloadOffset[t0, t2, 8]
1661 dispatch(4)
1662
1663.opPutByValEmpty:
1664 addi 1, ArrayStorage::m_numValuesInVector[t0]
1665 bib t2, ArrayStorage::m_length[t0], .opPutByValStoreResult
1666 addi 1, t2, t1
1667 storei t1, ArrayStorage::m_length[t0]
1668 jmp .opPutByValStoreResult
1669
1670.opPutByValSlow:
1671 callSlowPath(_llint_slow_path_put_by_val)
1672 dispatch(4)
1673
1674
1675_llint_op_del_by_val:
1676 traceExecution()
1677 callSlowPath(_llint_slow_path_del_by_val)
1678 dispatch(4)
1679
1680
1681_llint_op_put_by_index:
1682 traceExecution()
1683 callSlowPath(_llint_slow_path_put_by_index)
1684 dispatch(4)
1685
1686
1687_llint_op_put_getter_setter:
1688 traceExecution()
1689 callSlowPath(_llint_slow_path_put_getter_setter)
1690 dispatch(5)
1691
1692
1693_llint_op_loop:
1694 nop
1695_llint_op_jmp:
1696 traceExecution()
1697 dispatchBranch(4[PC])
1698
1699
1700_llint_op_jmp_scopes:
1701 traceExecution()
1702 callSlowPath(_llint_slow_path_jmp_scopes)
1703 dispatch(0)
1704
1705
1706macro jumpTrueOrFalse(conditionOp, slow)
1707 loadi 4[PC], t1
1708 loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
1709 conditionOp(t0, .target)
1710 dispatch(3)
1711
1712.target:
1713 dispatchBranch(8[PC])
1714
1715.slow:
1716 callSlowPath(slow)
1717 dispatch(0)
1718end
1719
1720_llint_op_loop_if_true:
1721 nop
1722_llint_op_jtrue:
1723 traceExecution()
1724 jumpTrueOrFalse(
1725 macro (value, target) btinz value, target end,
1726 _llint_slow_path_jtrue)
1727
1728
1729_llint_op_loop_if_false:
1730 nop
1731_llint_op_jfalse:
1732 traceExecution()
1733 jumpTrueOrFalse(
1734 macro (value, target) btiz value, target end,
1735 _llint_slow_path_jfalse)
1736
1737
1738macro equalNull(cellHandler, immediateHandler)
1739 loadi 4[PC], t0
1740 loadi TagOffset[cfr, t0, 8], t1
1741 loadi PayloadOffset[cfr, t0, 8], t0
1742 bineq t1, CellTag, .immediate
1743 loadp JSCell::m_structure[t0], t2
1744 cellHandler(Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
1745 dispatch(3)
1746
1747.target:
1748 dispatchBranch(8[PC])
1749
1750.immediate:
1751 ori 1, t1
1752 immediateHandler(t1, .target)
1753 dispatch(3)
1754end
1755
1756_llint_op_jeq_null:
1757 traceExecution()
1758 equalNull(
1759 macro (value, target) btbnz value, MasqueradesAsUndefined, target end,
1760 macro (value, target) bieq value, NullTag, target end)
1761
1762
1763_llint_op_jneq_null:
1764 traceExecution()
1765 equalNull(
1766 macro (value, target) btbz value, MasqueradesAsUndefined, target end,
1767 macro (value, target) bineq value, NullTag, target end)
1768
1769
1770_llint_op_jneq_ptr:
1771 traceExecution()
1772 loadi 4[PC], t0
1773 loadi 8[PC], t1
1774 bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
1775 bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
1776.opJneqPtrBranch:
1777 dispatchBranch(12[PC])
1778.opJneqPtrFallThrough:
1779 dispatch(4)
1780
1781
1782macro compare(integerCompare, doubleCompare, slow_path)
1783 loadi 4[PC], t2
1784 loadi 8[PC], t3
1785 loadConstantOrVariable(t2, t0, t1)
1786 loadConstantOrVariable2Reg(t3, t2, t3)
1787 bineq t0, Int32Tag, .op1NotInt
1788 bineq t2, Int32Tag, .op2NotInt
1789 integerCompare(t1, t3, .jumpTarget)
1790 dispatch(4)
1791
1792.op1NotInt:
1793 bia t0, LowestTag, .slow
1794 bib t2, LowestTag, .op1NotIntOp2Double
1795 bineq t2, Int32Tag, .slow
1796 ci2d t3, ft1
1797 jmp .op1NotIntReady
1798.op1NotIntOp2Double:
1799 fii2d t3, t2, ft1
1800.op1NotIntReady:
1801 fii2d t1, t0, ft0
1802 doubleCompare(ft0, ft1, .jumpTarget)
1803 dispatch(4)
1804
1805.op2NotInt:
1806 ci2d t1, ft0
1807 bia t2, LowestTag, .slow
1808 fii2d t3, t2, ft1
1809 doubleCompare(ft0, ft1, .jumpTarget)
1810 dispatch(4)
1811
1812.jumpTarget:
1813 dispatchBranch(12[PC])
1814
1815.slow:
1816 callSlowPath(slow_path)
1817 dispatch(0)
1818end
1819
1820_llint_op_loop_if_less:
1821 nop
1822_llint_op_jless:
1823 traceExecution()
1824 compare(
1825 macro (left, right, target) bilt left, right, target end,
1826 macro (left, right, target) bdlt left, right, target end,
1827 _llint_slow_path_jless)
1828
1829
1830_llint_op_jnless:
1831 traceExecution()
1832 compare(
1833 macro (left, right, target) bigteq left, right, target end,
1834 macro (left, right, target) bdgtequn left, right, target end,
1835 _llint_slow_path_jnless)
1836
1837
1838_llint_op_loop_if_greater:
1839 nop
1840_llint_op_jgreater:
1841 traceExecution()
1842 compare(
1843 macro (left, right, target) bigt left, right, target end,
1844 macro (left, right, target) bdgt left, right, target end,
1845 _llint_slow_path_jgreater)
1846
1847
1848_llint_op_jngreater:
1849 traceExecution()
1850 compare(
1851 macro (left, right, target) bilteq left, right, target end,
1852 macro (left, right, target) bdltequn left, right, target end,
1853 _llint_slow_path_jngreater)
1854
1855
1856_llint_op_loop_if_lesseq:
1857 nop
1858_llint_op_jlesseq:
1859 traceExecution()
1860 compare(
1861 macro (left, right, target) bilteq left, right, target end,
1862 macro (left, right, target) bdlteq left, right, target end,
1863 _llint_slow_path_jlesseq)
1864
1865
1866_llint_op_jnlesseq:
1867 traceExecution()
1868 compare(
1869 macro (left, right, target) bigt left, right, target end,
1870 macro (left, right, target) bdgtun left, right, target end,
1871 _llint_slow_path_jnlesseq)
1872
1873
1874_llint_op_loop_if_greatereq:
1875 nop
1876_llint_op_jgreatereq:
1877 traceExecution()
1878 compare(
1879 macro (left, right, target) bigteq left, right, target end,
1880 macro (left, right, target) bdgteq left, right, target end,
1881 _llint_slow_path_jgreatereq)
1882
1883
1884_llint_op_jngreatereq:
1885 traceExecution()
1886 compare(
1887 macro (left, right, target) bilt left, right, target end,
1888 macro (left, right, target) bdltun left, right, target end,
1889 _llint_slow_path_jngreatereq)
1890
1891
1892_llint_op_loop_hint:
1893 traceExecution()
1894 checkSwitchToJITForLoop()
1895 dispatch(1)
1896
1897
1898_llint_op_switch_imm:
1899 traceExecution()
1900 loadi 12[PC], t2
1901 loadi 4[PC], t3
1902 loadConstantOrVariable(t2, t1, t0)
1903 loadp CodeBlock[cfr], t2
1904 loadp CodeBlock::m_rareData[t2], t2
1905 muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
1906 loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
1907 addp t3, t2
1908 bineq t1, Int32Tag, .opSwitchImmNotInt
1909 subi SimpleJumpTable::min[t2], t0
1910 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1911 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1912 loadi [t3, t0, 4], t1
1913 btiz t1, .opSwitchImmFallThrough
1914 dispatchBranchWithOffset(t1)
1915
1916.opSwitchImmNotInt:
1917 bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
1918.opSwitchImmFallThrough:
1919 dispatchBranch(8[PC])
1920
1921.opSwitchImmSlow:
1922 callSlowPath(_llint_slow_path_switch_imm)
1923 dispatch(0)
1924
1925
1926_llint_op_switch_char:
1927 traceExecution()
1928 loadi 12[PC], t2
1929 loadi 4[PC], t3
1930 loadConstantOrVariable(t2, t1, t0)
1931 loadp CodeBlock[cfr], t2
1932 loadp CodeBlock::m_rareData[t2], t2
1933 muli sizeof SimpleJumpTable, t3
1934 loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
1935 addp t3, t2
1936 bineq t1, CellTag, .opSwitchCharFallThrough
1937 loadp JSCell::m_structure[t0], t1
1938 bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
1939 loadp JSString::m_value[t0], t0
1940 bineq StringImpl::m_length[t0], 1, .opSwitchCharFallThrough
1941 loadp StringImpl::m_data8[t0], t1
1942 btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1943 loadh [t1], t0
1944 jmp .opSwitchCharReady
1945.opSwitchChar8Bit:
1946 loadb [t1], t0
1947.opSwitchCharReady:
1948 subi SimpleJumpTable::min[t2], t0
1949 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1950 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1951 loadi [t2, t0, 4], t1
1952 btiz t1, .opSwitchImmFallThrough
1953 dispatchBranchWithOffset(t1)
1954
1955.opSwitchCharFallThrough:
1956 dispatchBranch(8[PC])
1957
1958
1959_llint_op_switch_string:
1960 traceExecution()
1961 callSlowPath(_llint_slow_path_switch_string)
1962 dispatch(0)
1963
1964
1965_llint_op_new_func:
1966 traceExecution()
1967 btiz 12[PC], .opNewFuncUnchecked
1968 loadi 4[PC], t1
1969 bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
1970.opNewFuncUnchecked:
1971 callSlowPath(_llint_slow_path_new_func)
1972.opNewFuncDone:
1973 dispatch(4)
1974
1975
1976_llint_op_new_func_exp:
1977 traceExecution()
1978 callSlowPath(_llint_slow_path_new_func_exp)
1979 dispatch(3)
1980
1981
1982macro doCall(slow_path)
1983 loadi 4[PC], t0
1984 loadi 16[PC], t1
1985 loadp LLIntCallLinkInfo::callee[t1], t2
1986 loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
1987 bineq t3, t2, .opCallSlow
1988 loadi 12[PC], t3
1989 addp 24, PC
1990 lshifti 3, t3
1991 addp cfr, t3 # t3 contains the new value of cfr
1992 loadp JSFunction::m_scopeChain[t2], t0
1993 storei t2, Callee + PayloadOffset[t3]
1994 storei t0, ScopeChain + PayloadOffset[t3]
1995 loadi 8 - 24[PC], t2
1996 storei PC, ArgumentCount + TagOffset[cfr]
1997 storep cfr, CallerFrame[t3]
1998 storei t2, ArgumentCount + PayloadOffset[t3]
1999 storei CellTag, Callee + TagOffset[t3]
2000 storei CellTag, ScopeChain + TagOffset[t3]
2001 move t3, cfr
2002 call LLIntCallLinkInfo::machineCodeTarget[t1]
2003 dispatchAfterCall()
2004
2005.opCallSlow:
2006 slowPathForCall(6, slow_path)
2007end
2008
2009_llint_op_call:
2010 traceExecution()
2011 doCall(_llint_slow_path_call)
2012
2013
2014_llint_op_construct:
2015 traceExecution()
2016 doCall(_llint_slow_path_construct)
2017
2018
2019_llint_op_call_varargs:
2020 traceExecution()
2021 slowPathForCall(6, _llint_slow_path_call_varargs)
2022
2023
2024_llint_op_call_eval:
2025 traceExecution()
2026
2027 # Eval is executed in one of two modes:
2028 #
2029 # 1) We find that we're really invoking eval() in which case the
2030 # execution is perfomed entirely inside the slow_path, and it
2031 # returns the PC of a function that just returns the return value
2032 # that the eval returned.
2033 #
2034 # 2) We find that we're invoking something called eval() that is not
2035 # the real eval. Then the slow_path returns the PC of the thing to
2036 # call, and we call it.
2037 #
2038 # This allows us to handle two cases, which would require a total of
2039 # up to four pieces of state that cannot be easily packed into two
2040 # registers (C functions can return up to two registers, easily):
2041 #
2042 # - The call frame register. This may or may not have been modified
2043 # by the slow_path, but the convention is that it returns it. It's not
2044 # totally clear if that's necessary, since the cfr is callee save.
2045 # But that's our style in this here interpreter so we stick with it.
2046 #
2047 # - A bit to say if the slow_path successfully executed the eval and has
2048 # the return value, or did not execute the eval but has a PC for us
2049 # to call.
2050 #
2051 # - Either:
2052 # - The JS return value (two registers), or
2053 #
2054 # - The PC to call.
2055 #
2056 # It turns out to be easier to just always have this return the cfr
2057 # and a PC to call, and that PC may be a dummy thunk that just
2058 # returns the JS value that the eval returned.
2059
2060 slowPathForCall(4, _llint_slow_path_call_eval)
2061
2062
2063_llint_generic_return_point:
2064 dispatchAfterCall()
2065
2066
2067_llint_op_tear_off_activation:
2068 traceExecution()
2069 loadi 4[PC], t0
2070 loadi 8[PC], t1
2071 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationCreated
2072 bieq TagOffset[cfr, t1, 8], EmptyValueTag, .opTearOffActivationNotCreated
2073.opTearOffActivationCreated:
2074 callSlowPath(_llint_slow_path_tear_off_activation)
2075.opTearOffActivationNotCreated:
2076 dispatch(3)
2077
2078
2079_llint_op_tear_off_arguments:
2080 traceExecution()
2081 loadi 4[PC], t0
2082 subi 1, t0 # Get the unmodifiedArgumentsRegister
2083 bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
2084 callSlowPath(_llint_slow_path_tear_off_arguments)
2085.opTearOffArgumentsNotCreated:
2086 dispatch(2)
2087
2088
2089macro doReturn()
2090 loadp ReturnPC[cfr], t2
2091 loadp CallerFrame[cfr], cfr
2092 restoreReturnAddressBeforeReturn(t2)
2093 ret
2094end
2095
2096_llint_op_ret:
2097 traceExecution()
2098 checkSwitchToJITForEpilogue()
2099 loadi 4[PC], t2
2100 loadConstantOrVariable(t2, t1, t0)
2101 doReturn()
2102
2103
2104_llint_op_call_put_result:
2105 loadi 4[PC], t2
2106 loadi 8[PC], t3
2107 storei t1, TagOffset[cfr, t2, 8]
2108 storei t0, PayloadOffset[cfr, t2, 8]
2109 valueProfile(t1, t0, t3)
2110 traceExecution() # Needs to be here because it would clobber t1, t0
2111 dispatch(3)
2112
2113
2114_llint_op_ret_object_or_this:
2115 traceExecution()
2116 checkSwitchToJITForEpilogue()
2117 loadi 4[PC], t2
2118 loadConstantOrVariable(t2, t1, t0)
2119 bineq t1, CellTag, .opRetObjectOrThisNotObject
2120 loadp JSCell::m_structure[t0], t2
2121 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
2122 doReturn()
2123
2124.opRetObjectOrThisNotObject:
2125 loadi 8[PC], t2
2126 loadConstantOrVariable(t2, t1, t0)
2127 doReturn()
2128
2129
2130_llint_op_method_check:
2131 traceExecution()
2132 # We ignore method checks and use normal get_by_id optimizations.
2133 dispatch(1)
2134
2135
2136_llint_op_strcat:
2137 traceExecution()
2138 callSlowPath(_llint_slow_path_strcat)
2139 dispatch(4)
2140
2141
2142_llint_op_to_primitive:
2143 traceExecution()
2144 loadi 8[PC], t2
2145 loadi 4[PC], t3
2146 loadConstantOrVariable(t2, t1, t0)
2147 bineq t1, CellTag, .opToPrimitiveIsImm
2148 loadp JSCell::m_structure[t0], t2
2149 bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
2150.opToPrimitiveIsImm:
2151 storei t1, TagOffset[cfr, t3, 8]
2152 storei t0, PayloadOffset[cfr, t3, 8]
2153 dispatch(3)
2154
2155.opToPrimitiveSlowCase:
2156 callSlowPath(_llint_slow_path_to_primitive)
2157 dispatch(3)
2158
2159
2160_llint_op_get_pnames:
2161 traceExecution()
2162 callSlowPath(_llint_slow_path_get_pnames)
2163 dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
2164
2165
2166_llint_op_next_pname:
2167 traceExecution()
2168 loadi 12[PC], t1
2169 loadi 16[PC], t2
2170 loadi PayloadOffset[cfr, t1, 8], t0
2171 bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
2172 loadi 20[PC], t2
2173 loadi PayloadOffset[cfr, t2, 8], t2
2174 loadp JSPropertyNameIterator::m_jsStrings[t2], t3
2175 loadi [t3, t0, 8], t3
2176 addi 1, t0
2177 storei t0, PayloadOffset[cfr, t1, 8]
2178 loadi 4[PC], t1
2179 storei CellTag, TagOffset[cfr, t1, 8]
2180 storei t3, PayloadOffset[cfr, t1, 8]
2181 loadi 8[PC], t3
2182 loadi PayloadOffset[cfr, t3, 8], t3
2183 loadp JSCell::m_structure[t3], t1
2184 bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
2185 loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
2186 loadp StructureChain::m_vector[t0], t0
2187 btpz [t0], .opNextPnameTarget
2188.opNextPnameCheckPrototypeLoop:
2189 bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
2190 loadp Structure::m_prototype + PayloadOffset[t1], t2
2191 loadp JSCell::m_structure[t2], t1
2192 bpneq t1, [t0], .opNextPnameSlow
2193 addp 4, t0
2194 btpnz [t0], .opNextPnameCheckPrototypeLoop
2195.opNextPnameTarget:
2196 dispatchBranch(24[PC])
2197
2198.opNextPnameEnd:
2199 dispatch(7)
2200
2201.opNextPnameSlow:
2202 callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
2203 dispatch(0)
2204
2205
2206_llint_op_push_scope:
2207 traceExecution()
2208 callSlowPath(_llint_slow_path_push_scope)
2209 dispatch(2)
2210
2211
2212_llint_op_pop_scope:
2213 traceExecution()
2214 callSlowPath(_llint_slow_path_pop_scope)
2215 dispatch(1)
2216
2217
2218_llint_op_push_new_scope:
2219 traceExecution()
2220 callSlowPath(_llint_slow_path_push_new_scope)
2221 dispatch(4)
2222
2223
2224_llint_op_catch:
2225 # This is where we end up from the JIT's throw trampoline (because the
2226 # machine code return address will be set to _llint_op_catch), and from
2227 # the interpreter's throw trampoline (see _llint_throw_trampoline).
2228 # The JIT throwing protocol calls for the cfr to be in t0. The throwing
2229 # code must have known that we were throwing to the interpreter, and have
2230 # set JSGlobalData::targetInterpreterPCForThrow.
2231 move t0, cfr
2232 loadp JITStackFrame::globalData[sp], t3
2233 loadi JSGlobalData::targetInterpreterPCForThrow[t3], PC
2234 loadi JSGlobalData::exception + PayloadOffset[t3], t0
2235 loadi JSGlobalData::exception + TagOffset[t3], t1
2236 storei 0, JSGlobalData::exception + PayloadOffset[t3]
2237 storei EmptyValueTag, JSGlobalData::exception + TagOffset[t3]
2238 loadi 4[PC], t2
2239 storei t0, PayloadOffset[cfr, t2, 8]
2240 storei t1, TagOffset[cfr, t2, 8]
2241 traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
2242 dispatch(2)
2243
2244
2245_llint_op_throw:
2246 traceExecution()
2247 callSlowPath(_llint_slow_path_throw)
2248 dispatch(2)
2249
2250
2251_llint_op_throw_reference_error:
2252 traceExecution()
2253 callSlowPath(_llint_slow_path_throw_reference_error)
2254 dispatch(2)
2255
2256
2257_llint_op_jsr:
2258 traceExecution()
2259 loadi 4[PC], t0
2260 addi 3 * 4, PC, t1
2261 storei t1, [cfr, t0, 8]
2262 dispatchBranch(8[PC])
2263
2264
2265_llint_op_sret:
2266 traceExecution()
2267 loadi 4[PC], t0
2268 loadp [cfr, t0, 8], PC
2269 dispatch(0)
2270
2271
2272_llint_op_debug:
2273 traceExecution()
2274 callSlowPath(_llint_slow_path_debug)
2275 dispatch(4)
2276
2277
2278_llint_op_profile_will_call:
2279 traceExecution()
2280 loadp JITStackFrame::enabledProfilerReference[sp], t0
2281 btpz [t0], .opProfileWillCallDone
2282 callSlowPath(_llint_slow_path_profile_will_call)
2283.opProfileWillCallDone:
2284 dispatch(2)
2285
2286
2287_llint_op_profile_did_call:
2288 traceExecution()
2289 loadp JITStackFrame::enabledProfilerReference[sp], t0
2290 btpz [t0], .opProfileWillCallDone
2291 callSlowPath(_llint_slow_path_profile_did_call)
2292.opProfileDidCallDone:
2293 dispatch(2)
2294
2295
2296_llint_op_end:
2297 traceExecution()
2298 checkSwitchToJITForEpilogue()
2299 loadi 4[PC], t0
2300 loadi TagOffset[cfr, t0, 8], t1
2301 loadi PayloadOffset[cfr, t0, 8], t0
2302 doReturn()
2303
2304
2305_llint_throw_from_slow_path_trampoline:
2306 # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
2307 # the throw target is not necessarily interpreted code, we come to here.
2308 # This essentially emulates the JIT's throwing protocol.
2309 loadp JITStackFrame::globalData[sp], t1
2310 loadp JSGlobalData::callFrameForThrow[t1], t0
2311 jmp JSGlobalData::targetMachinePCForThrow[t1]
2312
2313
2314_llint_throw_during_call_trampoline:
2315 preserveReturnAddressAfterCall(t2)
2316 loadp JITStackFrame::globalData[sp], t1
2317 loadp JSGlobalData::callFrameForThrow[t1], t0
2318 jmp JSGlobalData::targetMachinePCForThrow[t1]
2319
2320
2321# Lastly, make sure that we can link even though we don't support all opcodes.
2322# These opcodes should never arise when using LLInt or either JIT. We assert
2323# as much.
2324
2325macro notSupported()
2326 if ASSERT_ENABLED
2327 crash()
2328 else
2329 # We should use whatever the smallest possible instruction is, just to
2330 # ensure that there is a gap between instruction labels. If multiple
2331 # smallest instructions exist, we should pick the one that is most
2332 # likely result in execution being halted. Currently that is the break
2333 # instruction on all architectures we're interested in. (Break is int3
2334 # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
2335 break
2336 end
2337end
2338
2339_llint_op_get_array_length:
2340 notSupported()
2341
2342_llint_op_get_by_id_chain:
2343 notSupported()
2344
2345_llint_op_get_by_id_custom_chain:
2346 notSupported()
2347
2348_llint_op_get_by_id_custom_proto:
2349 notSupported()
2350
2351_llint_op_get_by_id_custom_self:
2352 notSupported()
2353
2354_llint_op_get_by_id_generic:
2355 notSupported()
2356
2357_llint_op_get_by_id_getter_chain:
2358 notSupported()
2359
2360_llint_op_get_by_id_getter_proto:
2361 notSupported()
2362
2363_llint_op_get_by_id_getter_self:
2364 notSupported()
2365
2366_llint_op_get_by_id_proto:
2367 notSupported()
2368
2369_llint_op_get_by_id_self:
2370 notSupported()
2371
2372_llint_op_get_string_length:
2373 notSupported()
2374
2375_llint_op_put_by_id_generic:
2376 notSupported()
2377
2378_llint_op_put_by_id_replace:
2379 notSupported()
2380
2381_llint_op_put_by_id_transition:
2382 notSupported()
2383
2384
2385# Indicate the end of LLInt.
2386_llint_end:
2387 crash()
2388