blob: cfbd8cec56f7b44cc5347007d76aaf972597bd86 [file] [log] [blame]
dbates@webkit.org98f0de02013-10-15 22:16:39 +00001/*
2 * Copyright (C) 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef ARM64Assembler_h
27#define ARM64Assembler_h
28
29#if ENABLE(ASSEMBLER) && CPU(ARM64)
30
31#include "AssemblerBuffer.h"
32#include <wtf/Assertions.h>
33#include <wtf/Vector.h>
34#include <stdint.h>
35
36#define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
37#define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
38#define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
39#define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
40#define DATASIZE DATASIZE_OF(datasize)
41#define MEMOPSIZE MEMOPSIZE_OF(datasize)
42#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
43
44namespace JSC {
45
46ALWAYS_INLINE bool isInt9(int32_t value)
47{
48 return value == ((value << 23) >> 23);
49}
50
51ALWAYS_INLINE bool isUInt5(int32_t value)
52{
53 return !(value & ~0x1f);
54}
55
56ALWAYS_INLINE bool isUInt12(int32_t value)
57{
58 return !(value & ~0xfff);
59}
60
61ALWAYS_INLINE bool isUInt12(intptr_t value)
62{
63 return !(value & ~0xfffL);
64}
65
66class UInt5 {
67public:
68 explicit UInt5(int value)
69 : m_value(value)
70 {
71 ASSERT(isUInt5(value));
72 }
73
74 operator int() { return m_value; }
75
76private:
77 int m_value;
78};
79
80class UInt12 {
81public:
82 explicit UInt12(int value)
83 : m_value(value)
84 {
85 ASSERT(isUInt12(value));
86 }
87
88 operator int() { return m_value; }
89
90private:
91 int m_value;
92};
93
94class PostIndex {
95public:
96 explicit PostIndex(int value)
97 : m_value(value)
98 {
99 ASSERT(isInt9(value));
100 }
101
102 operator int() { return m_value; }
103
104private:
105 int m_value;
106};
107
108class PreIndex {
109public:
110 explicit PreIndex(int value)
111 : m_value(value)
112 {
113 ASSERT(isInt9(value));
114 }
115
116 operator int() { return m_value; }
117
118private:
119 int m_value;
120};
121
122class LogicalImmediate {
123public:
124 static LogicalImmediate create32(uint32_t value)
125 {
126 // Check for 0, -1 - these cannot be encoded.
127 if (!value || !~value)
128 return InvalidLogicalImmediate;
129
130 // First look for a 32-bit pattern, then for repeating 16-bit
131 // patterns, 8-bit, 4-bit, and finally 2-bit.
132
133 unsigned hsb, lsb;
134 bool inverted;
135 if (findBitRange<32>(value, hsb, lsb, inverted))
136 return encodeLogicalImmediate<32>(hsb, lsb, inverted);
137
138 if ((value & 0xffff) != (value >> 16))
139 return InvalidLogicalImmediate;
140 value &= 0xffff;
141
142 if (findBitRange<16>(value, hsb, lsb, inverted))
143 return encodeLogicalImmediate<16>(hsb, lsb, inverted);
144
145 if ((value & 0xff) != (value >> 8))
146 return InvalidLogicalImmediate;
147 value &= 0xff;
148
149 if (findBitRange<8>(value, hsb, lsb, inverted))
150 return encodeLogicalImmediate<8>(hsb, lsb, inverted);
151
152 if ((value & 0xf) != (value >> 4))
153 return InvalidLogicalImmediate;
154 value &= 0xf;
155
156 if (findBitRange<4>(value, hsb, lsb, inverted))
157 return encodeLogicalImmediate<4>(hsb, lsb, inverted);
158
159 if ((value & 0x3) != (value >> 2))
160 return InvalidLogicalImmediate;
161 value &= 0x3;
162
163 if (findBitRange<2>(value, hsb, lsb, inverted))
164 return encodeLogicalImmediate<2>(hsb, lsb, inverted);
165
166 return InvalidLogicalImmediate;
167 }
168
169 static LogicalImmediate create64(uint64_t value)
170 {
171 // Check for 0, -1 - these cannot be encoded.
172 if (!value || !~value)
173 return InvalidLogicalImmediate;
174
175 // Look for a contiguous bit range.
176 unsigned hsb, lsb;
177 bool inverted;
178 if (findBitRange<64>(value, hsb, lsb, inverted))
179 return encodeLogicalImmediate<64>(hsb, lsb, inverted);
180
181 // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
182 if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32))
183 return create32(static_cast<uint32_t>(value));
184 return InvalidLogicalImmediate;
185 }
186
187 int value() const
188 {
189 ASSERT(isValid());
190 return m_value;
191 }
192
193 bool isValid() const
194 {
195 return m_value != InvalidLogicalImmediate;
196 }
197
198 bool is64bit() const
199 {
200 return m_value & (1 << 12);
201 }
202
203private:
204 LogicalImmediate(int value)
205 : m_value(value)
206 {
207 }
208
209 // Generate a mask with bits in the range hsb..0 set, for example:
210 // hsb:63 = 0xffffffffffffffff
211 // hsb:42 = 0x000007ffffffffff
212 // hsb: 0 = 0x0000000000000001
213 static uint64_t mask(unsigned hsb)
214 {
215 ASSERT(hsb < 64);
216 return 0xffffffffffffffffull >> (63 - hsb);
217 }
218
219 template<unsigned N>
220 static void partialHSB(uint64_t& value, unsigned&result)
221 {
222 if (value & (0xffffffffffffffffull << N)) {
223 result += N;
224 value >>= N;
225 }
226 }
227
228 // Find the bit number of the highest bit set in a non-zero value, for example:
229 // 0x8080808080808080 = hsb:63
230 // 0x0000000000000001 = hsb: 0
231 // 0x000007ffffe00000 = hsb:42
232 static unsigned highestSetBit(uint64_t value)
233 {
234 ASSERT(value);
235 unsigned hsb = 0;
236 partialHSB<32>(value, hsb);
237 partialHSB<16>(value, hsb);
238 partialHSB<8>(value, hsb);
239 partialHSB<4>(value, hsb);
240 partialHSB<2>(value, hsb);
241 partialHSB<1>(value, hsb);
242 return hsb;
243 }
244
245 // This function takes a value and a bit width, where value obeys the following constraints:
246 // * bits outside of the width of the value must be zero.
247 // * bits within the width of value must neither be all clear or all set.
248 // The input is inspected to detect values that consist of either two or three contiguous
249 // ranges of bits. The output range hsb..lsb will describe the second range of the value.
250 // if the range is set, inverted will be false, and if the range is clear, inverted will
251 // be true. For example (with width 8):
252 // 00001111 = hsb:3, lsb:0, inverted:false
253 // 11110000 = hsb:3, lsb:0, inverted:true
254 // 00111100 = hsb:5, lsb:2, inverted:false
255 // 11000011 = hsb:5, lsb:2, inverted:true
256 template<unsigned width>
257 static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
258 {
259 ASSERT(value & mask(width - 1));
260 ASSERT(value != mask(width - 1));
261 ASSERT(!(value & ~mask(width - 1)));
262
263 // Detect cases where the top bit is set; if so, flip all the bits & set invert.
264 // This halves the number of patterns we need to look for.
265 const uint64_t msb = 1ull << (width - 1);
266 if ((inverted = (value & msb)))
267 value ^= mask(width - 1);
268
269 // Find the highest set bit in value, generate a corresponding mask & flip all
270 // bits under it.
271 hsb = highestSetBit(value);
272 value ^= mask(hsb);
273 if (!value) {
274 // If this cleared the value, then the range hsb..0 was all set.
275 lsb = 0;
276 return true;
277 }
278
279 // Try making one more mask, and flipping the bits!
280 lsb = highestSetBit(value);
281 value ^= mask(lsb);
282 if (!value) {
283 // Success - but lsb actually points to the hsb of a third range - add one
284 // to get to the lsb of the mid range.
285 ++lsb;
286 return true;
287 }
288
289 return false;
290 }
291
292 // Encodes the set of immN:immr:imms fields found in a logical immediate.
293 template<unsigned width>
294 static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
295 {
296 // Check width is a power of 2!
297 ASSERT(!(width & (width -1)));
298 ASSERT(width <= 64 && width >= 2);
299 ASSERT(hsb >= lsb);
300 ASSERT(hsb < width);
301
302 int immN = 0;
303 int imms = 0;
304 int immr = 0;
305
306 // For 64-bit values this is easy - just set immN to true, and imms just
307 // contains the bit number of the highest set bit of the set range. For
308 // values with narrower widths, these are encoded by a leading set of
309 // one bits, followed by a zero bit, followed by the remaining set of bits
310 // being the high bit of the range. For a 32-bit immediate there are no
311 // leading one bits, just a zero followed by a five bit number. For a
312 // 16-bit immediate there is one one bit, a zero bit, and then a four bit
313 // bit-position, etc.
314 if (width == 64)
315 immN = 1;
316 else
317 imms = 63 & ~(width + width - 1);
318
319 if (inverted) {
320 // if width is 64 & hsb is 62, then we have a value something like:
321 // 0x80000000ffffffff (in this case with lsb 32).
322 // The ror should be by 1, imms (effectively set width minus 1) is
323 // 32. Set width is full width minus cleared width.
324 immr = (width - 1) - hsb;
325 imms |= (width - ((hsb - lsb) + 1)) - 1;
326 } else {
327 // if width is 64 & hsb is 62, then we have a value something like:
328 // 0x7fffffff00000000 (in this case with lsb 32).
329 // The value is effectively rol'ed by lsb, which is equivalent to
330 // a ror by width - lsb (or 0, in the case where lsb is 0). imms
331 // is hsb - lsb.
332 immr = (width - lsb) & (width - 1);
333 imms |= hsb - lsb;
334 }
335
336 return immN << 12 | immr << 6 | imms;
337 }
338
339 static const int InvalidLogicalImmediate = -1;
340
341 int m_value;
342};
343
344inline uint16_t getHalfword(uint64_t value, int which)
345{
346 return value >> (which << 4);
347}
348
349namespace ARM64Registers {
350 typedef enum {
351 // Parameter/result registers
352 x0,
353 x1,
354 x2,
355 x3,
356 x4,
357 x5,
358 x6,
359 x7,
360 // Indirect result location register
361 x8,
362 // Temporary registers
363 x9,
364 x10,
365 x11,
366 x12,
367 x13,
368 x14,
369 x15,
370 // Intra-procedure-call scratch registers (temporary)
371 x16, ip0 = x16,
372 x17, ip1 = x17,
373 // Platform Register (temporary)
374 x18,
375 // Callee-saved
376 x19,
377 x20,
378 x21,
379 x22,
380 x23,
381 x24,
382 x25,
383 x26,
384 x27,
385 x28,
386 // Special
387 x29, fp = x29,
388 x30, lr = x30,
389 sp,
390 zr = 0x3f,
391 } RegisterID;
392
393 typedef enum {
394 // Parameter/result registers
395 q0,
396 q1,
397 q2,
398 q3,
399 q4,
400 q5,
401 q6,
402 q7,
403 // Callee-saved (up to 64-bits only!)
404 q8,
405 q9,
406 q10,
407 q11,
408 q12,
409 q13,
410 q14,
411 q15,
412 // Temporary registers
413 q16,
414 q17,
415 q18,
416 q19,
417 q20,
418 q21,
419 q22,
420 q23,
421 q24,
422 q25,
423 q26,
424 q27,
425 q28,
426 q29,
427 q30,
428 q31,
429 } FPRegisterID;
430
431 static bool isSp(RegisterID reg) { return reg == sp; }
432 static bool isZr(RegisterID reg) { return reg == zr; }
433}
434
435class ARM64Assembler {
436public:
437 typedef ARM64Registers::RegisterID RegisterID;
438 typedef ARM64Registers::FPRegisterID FPRegisterID;
fpizlo@apple.com7a214582013-10-18 02:43:44 +0000439
440 static RegisterID firstRegister() { return ARM64Registers::x0; }
441 static RegisterID lastRegister() { return ARM64Registers::x28; }
442
443 static FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
444 static FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
dbates@webkit.org98f0de02013-10-15 22:16:39 +0000445
446private:
447 static bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
448 static bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); }
449
450public:
451 ARM64Assembler()
452 : m_indexOfLastWatchpoint(INT_MIN)
453 , m_indexOfTailOfLastWatchpoint(INT_MIN)
454 {
455 }
fpizlo@apple.coma26c9042013-10-20 02:07:39 +0000456
457 AssemblerBuffer& buffer() { return m_buffer; }
dbates@webkit.org98f0de02013-10-15 22:16:39 +0000458
459 // (HS, LO, HI, LS) -> (AE, B, A, BE)
460 // (VS, VC) -> (O, NO)
461 typedef enum {
462 ConditionEQ,
463 ConditionNE,
464 ConditionHS, ConditionCS = ConditionHS,
465 ConditionLO, ConditionCC = ConditionLO,
466 ConditionMI,
467 ConditionPL,
468 ConditionVS,
469 ConditionVC,
470 ConditionHI,
471 ConditionLS,
472 ConditionGE,
473 ConditionLT,
474 ConditionGT,
475 ConditionLE,
476 ConditionAL,
477 ConditionInvalid
478 } Condition;
479
480 static Condition invert(Condition cond)
481 {
482 return static_cast<Condition>(cond ^ 1);
483 }
484
485 typedef enum {
486 LSL,
487 LSR,
488 ASR,
489 ROR
490 } ShiftType;
491
492 typedef enum {
493 UXTB,
494 UXTH,
495 UXTW,
496 UXTX,
497 SXTB,
498 SXTH,
499 SXTW,
500 SXTX
501 } ExtendType;
502
503 enum SetFlags {
504 DontSetFlags,
505 S
506 };
507
508#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
509#define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
510 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
511 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
512 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
513 JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
514 JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
515 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
516 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
517 JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
518 JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
519 };
520 enum JumpLinkType {
521 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
522 LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
523 LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
524 LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
525 LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
526 LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
527 LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
528 LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
529 };
530
531 class LinkRecord {
532 public:
533 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
534 {
535 data.realTypes.m_from = from;
536 data.realTypes.m_to = to;
537 data.realTypes.m_type = type;
538 data.realTypes.m_linkType = LinkInvalid;
539 data.realTypes.m_condition = condition;
540 }
541 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
542 {
543 data.realTypes.m_from = from;
544 data.realTypes.m_to = to;
545 data.realTypes.m_type = type;
546 data.realTypes.m_linkType = LinkInvalid;
547 data.realTypes.m_condition = condition;
548 data.realTypes.m_is64Bit = is64Bit;
549 data.realTypes.m_compareRegister = compareRegister;
550 }
551 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
552 {
553 data.realTypes.m_from = from;
554 data.realTypes.m_to = to;
555 data.realTypes.m_type = type;
556 data.realTypes.m_linkType = LinkInvalid;
557 data.realTypes.m_condition = condition;
558 data.realTypes.m_bitNumber = bitNumber;
559 data.realTypes.m_compareRegister = compareRegister;
560 }
561 void operator=(const LinkRecord& other)
562 {
563 data.copyTypes.content[0] = other.data.copyTypes.content[0];
564 data.copyTypes.content[1] = other.data.copyTypes.content[1];
565 data.copyTypes.content[2] = other.data.copyTypes.content[2];
566 }
567 intptr_t from() const { return data.realTypes.m_from; }
568 void setFrom(intptr_t from) { data.realTypes.m_from = from; }
569 intptr_t to() const { return data.realTypes.m_to; }
570 JumpType type() const { return data.realTypes.m_type; }
571 JumpLinkType linkType() const { return data.realTypes.m_linkType; }
572 void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
573 Condition condition() const { return data.realTypes.m_condition; }
574 bool is64Bit() const { return data.realTypes.m_is64Bit; }
575 unsigned bitNumber() const { return data.realTypes.m_bitNumber; }
576 RegisterID compareRegister() const { return data.realTypes.m_compareRegister; }
577
578 private:
579 union {
580 struct RealTypes {
581 intptr_t m_from : 48;
582 intptr_t m_to : 48;
583 JumpType m_type : 8;
584 JumpLinkType m_linkType : 8;
585 Condition m_condition : 4;
586 bool m_is64Bit : 1;
587 unsigned m_bitNumber : 6;
588 RegisterID m_compareRegister : 5;
589 } realTypes;
590 struct CopyTypes {
591 uint64_t content[3];
592 } copyTypes;
593 COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
594 } data;
595 };
596
597 // bits(N) VFPExpandImm(bits(8) imm8);
598 //
599 // Encoding of floating point immediates is a litte complicated. Here's a
600 // high level description:
601 // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
602 // and the algirithm for expanding to a single precision float:
603 // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
604 //
605 // The trickiest bit is how the exponent is handled. The following table
606 // may help clarify things a little:
607 // 654
608 // 100 01111100 124 -3 1020 01111111100
609 // 101 01111101 125 -2 1021 01111111101
610 // 110 01111110 126 -1 1022 01111111110
611 // 111 01111111 127 0 1023 01111111111
612 // 000 10000000 128 1 1024 10000000000
613 // 001 10000001 129 2 1025 10000000001
614 // 010 10000010 130 3 1026 10000000010
615 // 011 10000011 131 4 1027 10000000011
616 // The first column shows the bit pattern stored in bits 6-4 of the arm
617 // encoded immediate. The second column shows the 8-bit IEEE 754 single
618 // -precision exponent in binary, the third column shows the raw decimal
619 // value. IEEE 754 single-precision numbers are stored with a bias of 127
620 // to the exponent, so the fourth column shows the resulting exponent.
621 // From this was can see that the exponent can be in the range -3..4,
622 // which agrees with the high level description given above. The fifth
623 // and sixth columns shows the value stored in a IEEE 754 double-precision
624 // number to represent these exponents in decimal and binary, given the
625 // bias of 1023.
626 //
627 // Ultimately, detecting doubles that can be encoded as immediates on arm
628 // and encoding doubles is actually not too bad. A floating point value can
629 // be encoded by retaining the sign bit, the low three bits of the exponent
630 // and the high 4 bits of the mantissa. To validly be able to encode an
631 // immediate the remainder of the mantissa must be zero, and the high part
632 // of the exponent must match the top bit retained, bar the highest bit
633 // which must be its inverse.
634 static bool canEncodeFPImm(double d)
635 {
636 // Discard the sign bit, the low two bits of the exponent & the highest
637 // four bits of the mantissa.
638 uint64_t masked = bitwise_cast<uint64_t>(d) & 0x7fc0ffffffffffffull;
639 return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull);
640 }
641
642 template<int datasize>
643 static bool canEncodePImmOffset(int32_t offset)
644 {
645 int32_t maxPImm = 4095 * (datasize / 8);
646 if (offset < 0)
647 return false;
648 if (offset > maxPImm)
649 return false;
650 if (offset & ((datasize / 8 ) - 1))
651 return false;
652 return true;
653 }
654
655 static bool canEncodeSImmOffset(int32_t offset)
656 {
657 return isInt9(offset);
658 }
659
660private:
661 int encodeFPImm(double d)
662 {
663 ASSERT(canEncodeFPImm(d));
664 uint64_t u64 = bitwise_cast<uint64_t>(d);
665 return (static_cast<int>(u64 >> 56) & 0x80) | (static_cast<int>(u64 >> 48) & 0x7f);
666 }
667
668 template<int datasize>
669 int encodeShiftAmount(int amount)
670 {
671 ASSERT(!amount || datasize == (8 << amount));
672 return amount;
673 }
674
675 template<int datasize>
676 static int encodePositiveImmediate(unsigned pimm)
677 {
678 ASSERT(!(pimm & ((datasize / 8) - 1)));
679 return pimm / (datasize / 8);
680 }
681
682 enum Datasize {
683 Datasize_32,
684 Datasize_64,
685 Datasize_64_top,
686 Datasize_16
687 };
688
689 enum MemOpSize {
690 MemOpSize_8_or_128,
691 MemOpSize_16,
692 MemOpSize_32,
693 MemOpSize_64,
694 };
695
696 enum BranchType {
697 BranchType_JMP,
698 BranchType_CALL,
699 BranchType_RET
700 };
701
702 enum AddOp {
703 AddOp_ADD,
704 AddOp_SUB
705 };
706
707 enum BitfieldOp {
708 BitfieldOp_SBFM,
709 BitfieldOp_BFM,
710 BitfieldOp_UBFM
711 };
712
713 enum DataOp1Source {
714 DataOp_RBIT,
715 DataOp_REV16,
716 DataOp_REV32,
717 DataOp_REV64,
718 DataOp_CLZ,
719 DataOp_CLS
720 };
721
722 enum DataOp2Source {
723 DataOp_UDIV = 2,
724 DataOp_SDIV = 3,
725 DataOp_LSLV = 8,
726 DataOp_LSRV = 9,
727 DataOp_ASRV = 10,
728 DataOp_RORV = 11
729 };
730
731 enum DataOp3Source {
732 DataOp_MADD = 0,
733 DataOp_MSUB = 1,
734 DataOp_SMADDL = 2,
735 DataOp_SMSUBL = 3,
736 DataOp_SMULH = 4,
737 DataOp_UMADDL = 10,
738 DataOp_UMSUBL = 11,
739 DataOp_UMULH = 12
740 };
741
742 enum ExcepnOp {
743 ExcepnOp_EXCEPTION = 0,
744 ExcepnOp_BREAKPOINT = 1,
745 ExcepnOp_HALT = 2,
746 ExcepnOp_DCPS = 5
747 };
748
749 enum FPCmpOp {
750 FPCmpOp_FCMP = 0x00,
751 FPCmpOp_FCMP0 = 0x08,
752 FPCmpOp_FCMPE = 0x10,
753 FPCmpOp_FCMPE0 = 0x18
754 };
755
756 enum FPCondCmpOp {
757 FPCondCmpOp_FCMP,
758 FPCondCmpOp_FCMPE
759 };
760
761 enum FPDataOp1Source {
762 FPDataOp_FMOV = 0,
763 FPDataOp_FABS = 1,
764 FPDataOp_FNEG = 2,
765 FPDataOp_FSQRT = 3,
766 FPDataOp_FCVT_toSingle = 4,
767 FPDataOp_FCVT_toDouble = 5,
768 FPDataOp_FCVT_toHalf = 7,
769 FPDataOp_FRINTN = 8,
770 FPDataOp_FRINTP = 9,
771 FPDataOp_FRINTM = 10,
772 FPDataOp_FRINTZ = 11,
773 FPDataOp_FRINTA = 12,
774 FPDataOp_FRINTX = 14,
775 FPDataOp_FRINTI = 15
776 };
777
778 enum FPDataOp2Source {
779 FPDataOp_FMUL,
780 FPDataOp_FDIV,
781 FPDataOp_FADD,
782 FPDataOp_FSUB,
783 FPDataOp_FMAX,
784 FPDataOp_FMIN,
785 FPDataOp_FMAXNM,
786 FPDataOp_FMINNM,
787 FPDataOp_FNMUL
788 };
789
790 enum FPIntConvOp {
791 FPIntConvOp_FCVTNS = 0x00,
792 FPIntConvOp_FCVTNU = 0x01,
793 FPIntConvOp_SCVTF = 0x02,
794 FPIntConvOp_UCVTF = 0x03,
795 FPIntConvOp_FCVTAS = 0x04,
796 FPIntConvOp_FCVTAU = 0x05,
797 FPIntConvOp_FMOV_QtoX = 0x06,
798 FPIntConvOp_FMOV_XtoQ = 0x07,
799 FPIntConvOp_FCVTPS = 0x08,
800 FPIntConvOp_FCVTPU = 0x09,
801 FPIntConvOp_FMOV_QtoX_top = 0x0e,
802 FPIntConvOp_FMOV_XtoQ_top = 0x0f,
803 FPIntConvOp_FCVTMS = 0x10,
804 FPIntConvOp_FCVTMU = 0x11,
805 FPIntConvOp_FCVTZS = 0x18,
806 FPIntConvOp_FCVTZU = 0x19,
807 };
808
809 enum LogicalOp {
810 LogicalOp_AND,
811 LogicalOp_ORR,
812 LogicalOp_EOR,
813 LogicalOp_ANDS
814 };
815
816 enum MemOp {
817 MemOp_STORE,
818 MemOp_LOAD,
819 MemOp_STORE_V128,
820 MemOp_LOAD_V128,
821 MemOp_PREFETCH = 2, // size must be 3
822 MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2
823 MemOp_LOAD_signed32 = 3 // size may be 0 or 1
824 };
825
826 enum MoveWideOp {
827 MoveWideOp_N = 0,
828 MoveWideOp_Z = 2,
829 MoveWideOp_K = 3
830 };
831
832 enum LdrLiteralOp {
833 LdrLiteralOp_32BIT = 0,
834 LdrLiteralOp_64BIT = 1,
835 LdrLiteralOp_LDRSW = 2,
836 LdrLiteralOp_128BIT = 2
837 };
838
839public:
840 // Integer Instructions:
841
842 template<int datasize, SetFlags setFlags = DontSetFlags>
843 ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm)
844 {
845 CHECK_DATASIZE();
846 insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd));
847 }
848
849 template<int datasize, SetFlags setFlags = DontSetFlags>
850 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
851 {
852 CHECK_DATASIZE();
853 ASSERT(!shift || shift == 12);
854 insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd));
855 }
856
857 template<int datasize, SetFlags setFlags = DontSetFlags>
858 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
859 {
860 add<datasize, setFlags>(rd, rn, rm, LSL, 0);
861 }
862
863 template<int datasize, SetFlags setFlags = DontSetFlags>
864 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
865 {
866 CHECK_DATASIZE();
867 insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd));
868 }
869
870 template<int datasize, SetFlags setFlags = DontSetFlags>
871 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
872 {
873 CHECK_DATASIZE();
874 if (isSp(rn)) {
875 ASSERT(shift == LSL);
876 add<datasize, setFlags>(rd, rn, rm, UXTX, amount);
877 } else
878 insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
879 }
880
881 ALWAYS_INLINE void adr(RegisterID rd, int offset)
882 {
883 insn(pcRelative(false, offset, rd));
884 }
885
886 ALWAYS_INLINE void adrp(RegisterID rd, int offset)
887 {
888 ASSERT(!(offset & 0xfff));
889 insn(pcRelative(true, offset >> 12, rd));
890 }
891
892 template<int datasize, SetFlags setFlags = DontSetFlags>
893 ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm)
894 {
895 and_<datasize, setFlags>(rd, rn, rm, LSL, 0);
896 }
897
898 template<int datasize, SetFlags setFlags = DontSetFlags>
899 ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
900 {
901 CHECK_DATASIZE();
902 insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd));
903 }
904
905 template<int datasize, SetFlags setFlags = DontSetFlags>
906 ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm)
907 {
908 CHECK_DATASIZE();
909 insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd));
910 }
911
912 template<int datasize>
913 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift)
914 {
915 ASSERT(shift < datasize);
916 sbfm<datasize>(rd, rn, shift, datasize - 1);
917 }
918
919 template<int datasize>
920 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
921 {
922 asrv<datasize>(rd, rn, rm);
923 }
924
925 template<int datasize>
926 ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm)
927 {
928 CHECK_DATASIZE();
929 insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd));
930 }
931
932 ALWAYS_INLINE void b(int32_t offset = 0)
933 {
934 ASSERT(!(offset & 3));
935 offset >>= 2;
936 ASSERT(offset == (offset << 6) >> 6);
937 insn(unconditionalBranchImmediate(false, offset));
938 }
939
940 ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0)
941 {
942 ASSERT(!(offset & 3));
943 offset >>= 2;
944 ASSERT(offset == (offset << 13) >> 13);
945 insn(conditionalBranchImmediate(offset, cond));
946 }
947
948 template<int datasize>
949 ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width)
950 {
951 bfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
952 }
953
954 template<int datasize>
955 ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms)
956 {
957 CHECK_DATASIZE();
958 insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd));
959 }
960
961 template<int datasize>
962 ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width)
963 {
964 bfm<datasize>(rd, rn, lsb, lsb + width - 1);
965 }
966
967 template<int datasize, SetFlags setFlags = DontSetFlags>
968 ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm)
969 {
970 bic<datasize, setFlags>(rd, rn, rm, LSL, 0);
971 }
972
973 template<int datasize, SetFlags setFlags = DontSetFlags>
974 ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
975 {
976 CHECK_DATASIZE();
977 insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd));
978 }
979
980 ALWAYS_INLINE void bl(int32_t offset = 0)
981 {
982 ASSERT(!(offset & 3));
983 offset >>= 2;
984 insn(unconditionalBranchImmediate(true, offset));
985 }
986
987 ALWAYS_INLINE void blr(RegisterID rn)
988 {
989 insn(unconditionalBranchRegister(BranchType_CALL, rn));
990 }
991
992 ALWAYS_INLINE void br(RegisterID rn)
993 {
994 insn(unconditionalBranchRegister(BranchType_JMP, rn));
995 }
996
997 ALWAYS_INLINE void brk(uint16_t imm)
998 {
999 insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0));
1000 }
1001
1002 template<int datasize>
1003 ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0)
1004 {
1005 CHECK_DATASIZE();
1006 ASSERT(!(offset & 3));
1007 offset >>= 2;
1008 insn(compareAndBranchImmediate(DATASIZE, true, offset, rt));
1009 }
1010
1011 template<int datasize>
1012 ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0)
1013 {
1014 CHECK_DATASIZE();
1015 ASSERT(!(offset & 3));
1016 offset >>= 2;
1017 insn(compareAndBranchImmediate(DATASIZE, false, offset, rt));
1018 }
1019
1020 template<int datasize>
1021 ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
1022 {
1023 CHECK_DATASIZE();
1024 insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv));
1025 }
1026
1027 template<int datasize>
1028 ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
1029 {
1030 CHECK_DATASIZE();
1031 insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv));
1032 }
1033
1034 template<int datasize>
1035 ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
1036 {
1037 CHECK_DATASIZE();
1038 insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv));
1039 }
1040
1041 template<int datasize>
1042 ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
1043 {
1044 CHECK_DATASIZE();
1045 insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv));
1046 }
1047
1048 template<int datasize>
1049 ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond)
1050 {
1051 csinc<datasize>(rd, rn, rn, invert(cond));
1052 }
1053
1054 template<int datasize>
1055 ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond)
1056 {
1057 csinv<datasize>(rd, rn, rn, invert(cond));
1058 }
1059
1060 template<int datasize>
1061 ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn)
1062 {
1063 CHECK_DATASIZE();
1064 insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd));
1065 }
1066
1067 template<int datasize>
1068 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn)
1069 {
1070 CHECK_DATASIZE();
1071 insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd));
1072 }
1073
1074 template<int datasize>
1075 ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0)
1076 {
1077 add<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
1078 }
1079
1080 template<int datasize>
1081 ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm)
1082 {
1083 add<datasize, S>(ARM64Registers::zr, rn, rm);
1084 }
1085
1086 template<int datasize>
1087 ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1088 {
1089 add<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
1090 }
1091
1092 template<int datasize>
1093 ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1094 {
1095 add<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
1096 }
1097
1098 template<int datasize>
1099 ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0)
1100 {
1101 sub<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
1102 }
1103
1104 template<int datasize>
1105 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
1106 {
1107 sub<datasize, S>(ARM64Registers::zr, rn, rm);
1108 }
1109
1110 template<int datasize>
1111 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1112 {
1113 sub<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
1114 }
1115
1116 template<int datasize>
1117 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1118 {
1119 sub<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
1120 }
1121
1122 template<int datasize>
1123 ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond)
1124 {
1125 csneg<datasize>(rd, rn, rn, invert(cond));
1126 }
1127
1128 template<int datasize>
1129 ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
1130 {
1131 CHECK_DATASIZE();
1132 insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd));
1133 }
1134
1135 template<int datasize>
1136 ALWAYS_INLINE void cset(RegisterID rd, Condition cond)
1137 {
1138 csinc<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
1139 }
1140
1141 template<int datasize>
1142 ALWAYS_INLINE void csetm(RegisterID rd, Condition cond)
1143 {
1144 csinv<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
1145 }
1146
1147 template<int datasize>
1148 ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
1149 {
1150 CHECK_DATASIZE();
1151 insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd));
1152 }
1153
1154 template<int datasize>
1155 ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
1156 {
1157 CHECK_DATASIZE();
1158 insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd));
1159 }
1160
1161 template<int datasize>
1162 ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
1163 {
1164 CHECK_DATASIZE();
1165 insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd));
1166 }
1167
1168 template<int datasize>
1169 ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm)
1170 {
1171 eon<datasize>(rd, rn, rm, LSL, 0);
1172 }
1173
1174 template<int datasize>
1175 ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1176 {
1177 CHECK_DATASIZE();
1178 insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd));
1179 }
1180
1181 template<int datasize>
1182 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1183 {
1184 eor<datasize>(rd, rn, rm, LSL, 0);
1185 }
1186
1187 template<int datasize>
1188 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1189 {
1190 CHECK_DATASIZE();
1191 insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd));
1192 }
1193
1194 template<int datasize>
1195 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm)
1196 {
1197 CHECK_DATASIZE();
1198 insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd));
1199 }
1200
1201 template<int datasize>
1202 ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb)
1203 {
1204 CHECK_DATASIZE();
1205 insn(extract(DATASIZE, rm, lsb, rn, rd));
1206 }
1207
1208 ALWAYS_INLINE void hint(int imm)
1209 {
1210 insn(hintPseudo(imm));
1211 }
1212
1213 ALWAYS_INLINE void hlt(uint16_t imm)
1214 {
1215 insn(excepnGeneration(ExcepnOp_HALT, imm, 0));
1216 }
1217
1218 template<int datasize>
1219 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
1220 {
1221 ldr<datasize>(rt, rn, rm, UXTX, 0);
1222 }
1223
1224 template<int datasize>
1225 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1226 {
1227 CHECK_DATASIZE();
1228 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
1229 }
1230
1231 template<int datasize>
1232 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm)
1233 {
1234 CHECK_DATASIZE();
1235 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
1236 }
1237
1238 template<int datasize>
1239 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm)
1240 {
1241 CHECK_DATASIZE();
1242 insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
1243 }
1244
1245 template<int datasize>
1246 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm)
1247 {
1248 CHECK_DATASIZE();
1249 insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
1250 }
1251
1252 template<int datasize>
1253 ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0)
1254 {
1255 CHECK_DATASIZE();
1256 ASSERT(!(offset & 3));
1257 insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt));
1258 }
1259
1260 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm)
1261 {
1262 // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
1263 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt));
1264 }
1265
1266 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1267 {
1268 ASSERT_UNUSED(amount, !amount);
1269 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt));
1270 }
1271
1272 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm)
1273 {
1274 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt));
1275 }
1276
1277 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm)
1278 {
1279 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
1280 }
1281
1282 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm)
1283 {
1284 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
1285 }
1286
1287 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm)
1288 {
1289 ldrh(rt, rn, rm, UXTX, 0);
1290 }
1291
1292 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1293 {
1294 ASSERT(!amount || amount == 1);
1295 insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt));
1296 }
1297
1298 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm)
1299 {
1300 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt));
1301 }
1302
1303 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm)
1304 {
1305 insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
1306 }
1307
1308 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm)
1309 {
1310 insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
1311 }
1312
1313 template<int datasize>
1314 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm)
1315 {
1316 CHECK_DATASIZE();
1317 // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
1318 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt));
1319 }
1320
1321 template<int datasize>
1322 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1323 {
1324 CHECK_DATASIZE();
1325 ASSERT_UNUSED(amount, !amount);
1326 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt));
1327 }
1328
1329 template<int datasize>
1330 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm)
1331 {
1332 CHECK_DATASIZE();
1333 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt));
1334 }
1335
1336 template<int datasize>
1337 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm)
1338 {
1339 CHECK_DATASIZE();
1340 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1341 }
1342
1343 template<int datasize>
1344 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm)
1345 {
1346 CHECK_DATASIZE();
1347 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1348 }
1349
1350 template<int datasize>
1351 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm)
1352 {
1353 ldrsh<datasize>(rt, rn, rm, UXTX, 0);
1354 }
1355
1356 template<int datasize>
1357 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1358 {
1359 CHECK_DATASIZE();
1360 ASSERT(!amount || amount == 1);
1361 insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt));
1362 }
1363
1364 template<int datasize>
1365 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm)
1366 {
1367 CHECK_DATASIZE();
1368 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt));
1369 }
1370
1371 template<int datasize>
1372 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm)
1373 {
1374 CHECK_DATASIZE();
1375 insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1376 }
1377
1378 template<int datasize>
1379 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm)
1380 {
1381 CHECK_DATASIZE();
1382 insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1383 }
1384
1385 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm)
1386 {
1387 ldrsw(rt, rn, rm, UXTX, 0);
1388 }
1389
1390 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1391 {
1392 ASSERT(!amount || amount == 2);
1393 insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt));
1394 }
1395
1396 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm)
1397 {
1398 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt));
1399 }
1400
1401 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm)
1402 {
1403 insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
1404 }
1405
1406 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm)
1407 {
1408 insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
1409 }
1410
1411 ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0)
1412 {
1413 ASSERT(!(offset & 3));
1414 insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt));
1415 }
1416
1417 template<int datasize>
1418 ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm)
1419 {
1420 CHECK_DATASIZE();
1421 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
1422 }
1423
1424 ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm)
1425 {
1426 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
1427 }
1428
1429 ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm)
1430 {
1431 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
1432 }
1433
1434 template<int datasize>
1435 ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm)
1436 {
1437 CHECK_DATASIZE();
1438 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1439 }
1440
1441 template<int datasize>
1442 ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm)
1443 {
1444 CHECK_DATASIZE();
1445 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1446 }
1447
1448 ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm)
1449 {
1450 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
1451 }
1452
1453 template<int datasize>
1454 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift)
1455 {
1456 ASSERT(shift < datasize);
1457 ubfm<datasize>(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift);
1458 }
1459
1460 template<int datasize>
1461 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1462 {
1463 lslv<datasize>(rd, rn, rm);
1464 }
1465
1466 template<int datasize>
1467 ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm)
1468 {
1469 CHECK_DATASIZE();
1470 insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd));
1471 }
1472
1473 template<int datasize>
1474 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift)
1475 {
1476 ASSERT(shift < datasize);
1477 ubfm<datasize>(rd, rn, shift, datasize - 1);
1478 }
1479
1480 template<int datasize>
1481 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1482 {
1483 lsrv<datasize>(rd, rn, rm);
1484 }
1485
1486 template<int datasize>
1487 ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm)
1488 {
1489 CHECK_DATASIZE();
1490 insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd));
1491 }
1492
1493 template<int datasize>
1494 ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1495 {
1496 CHECK_DATASIZE();
1497 insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
1498 }
1499
1500 template<int datasize>
1501 ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm)
1502 {
1503 msub<datasize>(rd, rn, rm, ARM64Registers::zr);
1504 }
1505
1506 template<int datasize>
1507 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1508 {
1509 if (isSp(rd) || isSp(rm))
1510 add<datasize>(rd, rm, UInt12(0));
1511 else
1512 orr<datasize>(rd, ARM64Registers::zr, rm);
1513 }
1514
1515 template<int datasize>
1516 ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm)
1517 {
1518 orr<datasize>(rd, ARM64Registers::zr, imm);
1519 }
1520
1521 template<int datasize>
1522 ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0)
1523 {
1524 CHECK_DATASIZE();
1525 ASSERT(!(shift & 0xf));
1526 insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd));
1527 }
1528
1529 template<int datasize>
1530 ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0)
1531 {
1532 CHECK_DATASIZE();
1533 ASSERT(!(shift & 0xf));
1534 insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd));
1535 }
1536
1537 template<int datasize>
1538 ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0)
1539 {
1540 CHECK_DATASIZE();
1541 ASSERT(!(shift & 0xf));
1542 insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd));
1543 }
1544
1545 template<int datasize>
1546 ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1547 {
1548 CHECK_DATASIZE();
1549 insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
1550 }
1551
1552 template<int datasize>
1553 ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm)
1554 {
1555 madd<datasize>(rd, rn, rm, ARM64Registers::zr);
1556 }
1557
1558 template<int datasize>
1559 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1560 {
1561 orn<datasize>(rd, ARM64Registers::zr, rm);
1562 }
1563
1564 template<int datasize>
1565 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
1566 {
1567 orn<datasize>(rd, ARM64Registers::zr, rm, shift, amount);
1568 }
1569
1570 template<int datasize, SetFlags setFlags = DontSetFlags>
1571 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1572 {
1573 sub<datasize, setFlags>(rd, ARM64Registers::zr, rm);
1574 }
1575
1576 template<int datasize, SetFlags setFlags = DontSetFlags>
1577 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
1578 {
1579 sub<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
1580 }
1581
1582 template<int datasize, SetFlags setFlags = DontSetFlags>
1583 ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm)
1584 {
1585 sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm);
1586 }
1587
1588 template<int datasize, SetFlags setFlags = DontSetFlags>
1589 ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
1590 {
1591 sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
1592 }
1593
1594 ALWAYS_INLINE void nop()
1595 {
1596 insn(nopPseudo());
1597 }
fpizlo@apple.com33961712013-11-20 05:49:05 +00001598
1599 ALWAYS_INLINE void dmbSY()
1600 {
1601 insn(0xd5033fbf);
1602 }
dbates@webkit.org98f0de02013-10-15 22:16:39 +00001603
1604 template<int datasize>
1605 ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm)
1606 {
1607 orn<datasize>(rd, rn, rm, LSL, 0);
1608 }
1609
1610 template<int datasize>
1611 ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1612 {
1613 CHECK_DATASIZE();
1614 insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd));
1615 }
1616
1617 template<int datasize>
1618 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1619 {
1620 orr<datasize>(rd, rn, rm, LSL, 0);
1621 }
1622
1623 template<int datasize>
1624 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1625 {
1626 CHECK_DATASIZE();
1627 insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd));
1628 }
1629
1630 template<int datasize>
1631 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm)
1632 {
1633 CHECK_DATASIZE();
1634 insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd));
1635 }
1636
1637 template<int datasize>
1638 ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn)
1639 {
1640 CHECK_DATASIZE();
1641 insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd));
1642 }
1643
1644 ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr)
1645 {
1646 insn(unconditionalBranchRegister(BranchType_RET, rn));
1647 }
1648
1649 template<int datasize>
1650 ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn)
1651 {
1652 CHECK_DATASIZE();
1653 if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
1654 insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd));
1655 else
1656 insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd));
1657 }
1658
1659 template<int datasize>
1660 ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn)
1661 {
1662 CHECK_DATASIZE();
1663 insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd));
1664 }
1665
1666 template<int datasize>
1667 ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn)
1668 {
1669 ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands.
1670 insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd));
1671 }
1672
1673 template<int datasize>
1674 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1675 {
1676 rorv<datasize>(rd, rn, rm);
1677 }
1678
1679 template<int datasize>
1680 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift)
1681 {
1682 extr<datasize>(rd, rs, rs, shift);
1683 }
1684
1685 template<int datasize>
1686 ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm)
1687 {
1688 CHECK_DATASIZE();
1689 insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd));
1690 }
1691
1692 template<int datasize, SetFlags setFlags = DontSetFlags>
1693 ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm)
1694 {
1695 CHECK_DATASIZE();
1696 insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd));
1697 }
1698
1699 template<int datasize>
1700 ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width)
1701 {
1702 sbfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
1703 }
1704
1705 template<int datasize>
1706 ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms)
1707 {
1708 CHECK_DATASIZE();
1709 insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd));
1710 }
1711
1712 template<int datasize>
1713 ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width)
1714 {
1715 sbfm<datasize>(rd, rn, lsb, lsb + width - 1);
1716 }
1717
1718 template<int datasize>
1719 ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1720 {
1721 CHECK_DATASIZE();
1722 insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd));
1723 }
1724
1725 ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1726 {
1727 insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
1728 }
1729
1730 ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm)
1731 {
1732 smsubl(rd, rn, rm, ARM64Registers::zr);
1733 }
1734
1735 ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1736 {
1737 insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
1738 }
1739
1740 ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm)
1741 {
1742 insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd));
1743 }
1744
1745 ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm)
1746 {
1747 smaddl(rd, rn, rm, ARM64Registers::zr);
1748 }
1749
1750 template<int datasize>
1751 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
1752 {
1753 str<datasize>(rt, rn, rm, UXTX, 0);
1754 }
1755
1756 template<int datasize>
1757 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1758 {
1759 CHECK_DATASIZE();
1760 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
1761 }
1762
1763 template<int datasize>
1764 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm)
1765 {
1766 CHECK_DATASIZE();
1767 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
1768 }
1769
1770 template<int datasize>
1771 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm)
1772 {
1773 CHECK_DATASIZE();
1774 insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
1775 }
1776
1777 template<int datasize>
1778 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm)
1779 {
1780 CHECK_DATASIZE();
1781 insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
1782 }
1783
1784 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm)
1785 {
1786 // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
1787 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt));
1788 }
1789
1790 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1791 {
1792 ASSERT_UNUSED(amount, !amount);
1793 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt));
1794 }
1795
1796 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm)
1797 {
1798 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt));
1799 }
1800
1801 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm)
1802 {
1803 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
1804 }
1805
1806 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm)
1807 {
1808 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
1809 }
1810
1811 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm)
1812 {
1813 strh(rt, rn, rm, UXTX, 0);
1814 }
1815
1816 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1817 {
1818 ASSERT(!amount || amount == 1);
1819 insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt));
1820 }
1821
1822 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm)
1823 {
1824 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt));
1825 }
1826
1827 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm)
1828 {
1829 insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
1830 }
1831
1832 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm)
1833 {
1834 insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
1835 }
1836
1837 template<int datasize>
1838 ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm)
1839 {
1840 CHECK_DATASIZE();
1841 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
1842 }
1843
1844 ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm)
1845 {
1846 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
1847 }
1848
1849 ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm)
1850 {
1851 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
1852 }
1853
1854 template<int datasize, SetFlags setFlags = DontSetFlags>
1855 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
1856 {
1857 CHECK_DATASIZE();
1858 ASSERT(!shift || shift == 12);
1859 insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd));
1860 }
1861
1862 template<int datasize, SetFlags setFlags = DontSetFlags>
1863 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1864 {
1865 sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
1866 }
1867
1868 template<int datasize, SetFlags setFlags = DontSetFlags>
1869 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1870 {
1871 CHECK_DATASIZE();
1872 insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd));
1873 }
1874
1875 template<int datasize, SetFlags setFlags = DontSetFlags>
1876 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1877 {
1878 CHECK_DATASIZE();
1879 if (isSp(rn)) {
1880 ASSERT(shift == LSL);
1881 sub<datasize, setFlags>(rd, rn, rm, UXTX, amount);
1882 } else
1883 insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
1884 }
1885
1886 template<int datasize>
1887 ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn)
1888 {
1889 sbfm<datasize>(rd, rn, 0, 7);
1890 }
1891
1892 template<int datasize>
1893 ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn)
1894 {
1895 sbfm<datasize>(rd, rn, 0, 15);
1896 }
1897
1898 ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn)
1899 {
1900 sbfm<64>(rd, rn, 0, 31);
1901 }
1902
1903 ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0)
1904 {
1905 ASSERT(!(offset & 3));
1906 offset >>= 2;
1907 insn(testAndBranchImmediate(false, imm, offset, rt));
1908 }
1909
1910 ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0)
1911 {
1912 ASSERT(!(offset & 3));
1913 offset >>= 2;
1914 insn(testAndBranchImmediate(true, imm, offset, rt));
1915 }
1916
1917 template<int datasize>
1918 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1919 {
1920 and_<datasize, S>(ARM64Registers::zr, rn, rm);
1921 }
1922
1923 template<int datasize>
1924 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1925 {
1926 and_<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
1927 }
1928
1929 template<int datasize>
1930 ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm)
1931 {
1932 and_<datasize, S>(ARM64Registers::zr, rn, imm);
1933 }
1934
1935 template<int datasize>
1936 ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width)
1937 {
1938 ubfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
1939 }
1940
1941 template<int datasize>
1942 ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms)
1943 {
1944 CHECK_DATASIZE();
1945 insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd));
1946 }
1947
1948 template<int datasize>
1949 ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width)
1950 {
1951 ubfm<datasize>(rd, rn, lsb, lsb + width - 1);
1952 }
1953
1954 template<int datasize>
1955 ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1956 {
1957 CHECK_DATASIZE();
1958 insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd));
1959 }
1960
1961 ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1962 {
1963 insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
1964 }
1965
1966 ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm)
1967 {
1968 umsubl(rd, rn, rm, ARM64Registers::zr);
1969 }
1970
1971 ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1972 {
1973 insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
1974 }
1975
1976 ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm)
1977 {
1978 insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd));
1979 }
1980
1981 ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm)
1982 {
1983 umaddl(rd, rn, rm, ARM64Registers::zr);
1984 }
1985
1986 template<int datasize>
1987 ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn)
1988 {
1989 ubfm<datasize>(rd, rn, 0, 7);
1990 }
1991
1992 template<int datasize>
1993 ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn)
1994 {
1995 ubfm<datasize>(rd, rn, 0, 15);
1996 }
1997
1998 ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn)
1999 {
2000 ubfm<64>(rd, rn, 0, 31);
2001 }
2002
2003 // Floating Point Instructions:
2004
2005 template<int datasize>
2006 ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn)
2007 {
2008 CHECK_DATASIZE();
2009 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd));
2010 }
2011
2012 template<int datasize>
2013 ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2014 {
2015 CHECK_DATASIZE();
2016 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd));
2017 }
2018
2019 template<int datasize>
2020 ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
2021 {
2022 CHECK_DATASIZE();
2023 insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv));
2024 }
2025
2026 template<int datasize>
2027 ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
2028 {
2029 CHECK_DATASIZE();
2030 insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv));
2031 }
2032
2033 template<int datasize>
2034 ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm)
2035 {
2036 CHECK_DATASIZE();
2037 insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP));
2038 }
2039
2040 template<int datasize>
2041 ALWAYS_INLINE void fcmp_0(FPRegisterID vn)
2042 {
2043 CHECK_DATASIZE();
2044 insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMP0));
2045 }
2046
2047 template<int datasize>
2048 ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm)
2049 {
2050 CHECK_DATASIZE();
2051 insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE));
2052 }
2053
2054 template<int datasize>
2055 ALWAYS_INLINE void fcmpe_0(FPRegisterID vn)
2056 {
2057 CHECK_DATASIZE();
2058 insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMPE0));
2059 }
2060
2061 template<int datasize>
2062 ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond)
2063 {
2064 CHECK_DATASIZE();
2065 insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd));
2066 }
2067
2068 template<int dstsize, int srcsize>
2069 ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn)
2070 {
2071 ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64);
2072 ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64);
2073 ASSERT(dstsize != srcsize);
2074 Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16;
2075 FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf;
2076 insn(floatingPointDataProcessing1Source(type, opcode, vn, vd));
2077 }
2078
2079 template<int dstsize, int srcsize>
2080 ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn)
2081 {
2082 CHECK_DATASIZE_OF(dstsize);
2083 CHECK_DATASIZE_OF(srcsize);
2084 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd));
2085 }
2086
2087 template<int dstsize, int srcsize>
2088 ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn)
2089 {
2090 CHECK_DATASIZE_OF(dstsize);
2091 CHECK_DATASIZE_OF(srcsize);
2092 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd));
2093 }
2094
2095 template<int dstsize, int srcsize>
2096 ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn)
2097 {
2098 CHECK_DATASIZE_OF(dstsize);
2099 CHECK_DATASIZE_OF(srcsize);
2100 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd));
2101 }
2102
2103 template<int dstsize, int srcsize>
2104 ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn)
2105 {
2106 CHECK_DATASIZE_OF(dstsize);
2107 CHECK_DATASIZE_OF(srcsize);
2108 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd));
2109 }
2110
2111 template<int dstsize, int srcsize>
2112 ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn)
2113 {
2114 CHECK_DATASIZE_OF(dstsize);
2115 CHECK_DATASIZE_OF(srcsize);
2116 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd));
2117 }
2118
2119 template<int dstsize, int srcsize>
2120 ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn)
2121 {
2122 CHECK_DATASIZE_OF(dstsize);
2123 CHECK_DATASIZE_OF(srcsize);
2124 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd));
2125 }
2126
2127 template<int dstsize, int srcsize>
2128 ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn)
2129 {
2130 CHECK_DATASIZE_OF(dstsize);
2131 CHECK_DATASIZE_OF(srcsize);
2132 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd));
2133 }
2134
2135 template<int dstsize, int srcsize>
2136 ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn)
2137 {
2138 CHECK_DATASIZE_OF(dstsize);
2139 CHECK_DATASIZE_OF(srcsize);
2140 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd));
2141 }
2142
2143 template<int dstsize, int srcsize>
2144 ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn)
2145 {
2146 CHECK_DATASIZE_OF(dstsize);
2147 CHECK_DATASIZE_OF(srcsize);
2148 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd));
2149 }
2150
2151 template<int dstsize, int srcsize>
2152 ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn)
2153 {
2154 CHECK_DATASIZE_OF(dstsize);
2155 CHECK_DATASIZE_OF(srcsize);
2156 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd));
2157 }
2158
2159 template<int datasize>
2160 ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2161 {
2162 CHECK_DATASIZE();
2163 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd));
2164 }
2165
2166 template<int datasize>
2167 ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2168 {
2169 CHECK_DATASIZE();
2170 insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd));
2171 }
2172
2173 template<int datasize>
2174 ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2175 {
2176 CHECK_DATASIZE();
2177 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd));
2178 }
2179
2180 template<int datasize>
2181 ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2182 {
2183 CHECK_DATASIZE();
2184 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd));
2185 }
2186
2187 template<int datasize>
2188 ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2189 {
2190 CHECK_DATASIZE();
2191 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd));
2192 }
2193
2194 template<int datasize>
2195 ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2196 {
2197 CHECK_DATASIZE();
2198 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd));
2199 }
2200
2201 template<int datasize>
2202 ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn)
2203 {
2204 CHECK_DATASIZE();
2205 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd));
2206 }
2207
2208 template<int datasize>
2209 ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn)
2210 {
2211 CHECK_DATASIZE();
2212 insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd));
2213 }
2214
2215 template<int datasize>
2216 ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn)
2217 {
2218 CHECK_DATASIZE();
2219 insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd));
2220 }
2221
2222 template<int datasize>
2223 ALWAYS_INLINE void fmov(FPRegisterID vd, double imm)
2224 {
2225 CHECK_DATASIZE();
2226 insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd));
2227 }
2228
2229 ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn)
2230 {
2231 insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd));
2232 }
2233
2234 ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn)
2235 {
2236 insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd));
2237 }
2238
2239 template<int datasize>
2240 ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2241 {
2242 CHECK_DATASIZE();
2243 insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd));
2244 }
2245
2246 template<int datasize>
2247 ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2248 {
2249 CHECK_DATASIZE();
2250 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd));
2251 }
2252
2253 template<int datasize>
2254 ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn)
2255 {
2256 CHECK_DATASIZE();
2257 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd));
2258 }
2259
2260 template<int datasize>
2261 ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2262 {
2263 CHECK_DATASIZE();
2264 insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd));
2265 }
2266
2267 template<int datasize>
2268 ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2269 {
2270 CHECK_DATASIZE();
2271 insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd));
2272 }
2273
2274 template<int datasize>
2275 ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2276 {
2277 CHECK_DATASIZE();
2278 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd));
2279 }
2280
2281 template<int datasize>
2282 ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn)
2283 {
2284 CHECK_DATASIZE();
2285 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd));
2286 }
2287
2288 template<int datasize>
2289 ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn)
2290 {
2291 CHECK_DATASIZE();
2292 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd));
2293 }
2294
2295 template<int datasize>
2296 ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn)
2297 {
2298 CHECK_DATASIZE();
2299 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd));
2300 }
2301
2302 template<int datasize>
2303 ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn)
2304 {
2305 CHECK_DATASIZE();
2306 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd));
2307 }
2308
2309 template<int datasize>
2310 ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn)
2311 {
2312 CHECK_DATASIZE();
2313 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd));
2314 }
2315
2316 template<int datasize>
2317 ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn)
2318 {
2319 CHECK_DATASIZE();
2320 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd));
2321 }
2322
2323 template<int datasize>
2324 ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn)
2325 {
2326 CHECK_DATASIZE();
2327 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd));
2328 }
2329
2330 template<int datasize>
2331 ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn)
2332 {
2333 CHECK_DATASIZE();
2334 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd));
2335 }
2336
2337 template<int datasize>
2338 ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2339 {
2340 CHECK_DATASIZE();
2341 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd));
2342 }
2343
2344 template<int datasize>
2345 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm)
2346 {
2347 ldr<datasize>(rt, rn, rm, UXTX, 0);
2348 }
2349
2350 template<int datasize>
2351 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
2352 {
2353 CHECK_FP_MEMOP_DATASIZE();
2354 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
2355 }
2356
2357 template<int datasize>
2358 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm)
2359 {
2360 CHECK_FP_MEMOP_DATASIZE();
2361 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
2362 }
2363
2364 template<int datasize>
2365 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm)
2366 {
2367 CHECK_FP_MEMOP_DATASIZE();
2368 insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
2369 }
2370
2371 template<int datasize>
2372 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm)
2373 {
2374 CHECK_FP_MEMOP_DATASIZE();
2375 insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
2376 }
2377
2378 template<int datasize>
2379 ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0)
2380 {
2381 CHECK_FP_MEMOP_DATASIZE();
2382 ASSERT(datasize >= 32);
2383 ASSERT(!(offset & 3));
2384 insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt));
2385 }
2386
2387 template<int datasize>
2388 ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm)
2389 {
2390 CHECK_FP_MEMOP_DATASIZE();
2391 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
2392 }
2393
2394 template<int dstsize, int srcsize>
2395 ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn)
2396 {
2397 CHECK_DATASIZE_OF(dstsize);
2398 CHECK_DATASIZE_OF(srcsize);
2399 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd));
2400 }
2401
2402 template<int datasize>
2403 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm)
2404 {
2405 str<datasize>(rt, rn, rm, UXTX, 0);
2406 }
2407
2408 template<int datasize>
2409 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
2410 {
2411 CHECK_FP_MEMOP_DATASIZE();
2412 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
2413 }
2414
2415 template<int datasize>
2416 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm)
2417 {
2418 CHECK_FP_MEMOP_DATASIZE();
2419 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
2420 }
2421
2422 template<int datasize>
2423 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm)
2424 {
2425 CHECK_FP_MEMOP_DATASIZE();
2426 insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
2427 }
2428
2429 template<int datasize>
2430 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm)
2431 {
2432 CHECK_FP_MEMOP_DATASIZE();
2433 insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
2434 }
2435
2436 template<int datasize>
2437 ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm)
2438 {
2439 CHECK_DATASIZE();
2440 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
2441 }
2442
2443 template<int dstsize, int srcsize>
2444 ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn)
2445 {
2446 CHECK_DATASIZE_OF(dstsize);
2447 CHECK_DATASIZE_OF(srcsize);
2448 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd));
2449 }
2450
2451 // Admin methods:
2452
2453 AssemblerLabel labelIgnoringWatchpoints()
2454 {
2455 return m_buffer.label();
2456 }
2457
2458 AssemblerLabel labelForWatchpoint()
2459 {
2460 AssemblerLabel result = m_buffer.label();
2461 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
2462 result = label();
2463 m_indexOfLastWatchpoint = result.m_offset;
2464 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
2465 return result;
2466 }
2467
2468 AssemblerLabel label()
2469 {
2470 AssemblerLabel result = m_buffer.label();
2471 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
2472 nop();
2473 result = m_buffer.label();
2474 }
2475 return result;
2476 }
2477
2478 AssemblerLabel align(int alignment)
2479 {
2480 ASSERT(!(alignment & 3));
2481 while (!m_buffer.isAligned(alignment))
2482 brk(0);
2483 return label();
2484 }
2485
2486 static void* getRelocatedAddress(void* code, AssemblerLabel label)
2487 {
2488 ASSERT(label.isSet());
2489 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2490 }
2491
2492 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2493 {
2494 return b.m_offset - a.m_offset;
2495 }
2496
2497 int executableOffsetFor(int location)
2498 {
2499 if (!location)
2500 return 0;
2501 return static_cast<int32_t*>(m_buffer.data())[location / sizeof(int32_t) - 1];
2502 }
dbates@webkit.org98f0de02013-10-15 22:16:39 +00002503
2504 void* unlinkedCode() { return m_buffer.data(); }
2505 size_t codeSize() const { return m_buffer.codeSize(); }
2506
2507 static unsigned getCallReturnOffset(AssemblerLabel call)
2508 {
2509 ASSERT(call.isSet());
2510 return call.m_offset;
2511 }
2512
2513 // Linking & patching:
2514 //
2515 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2516 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2517 // code has been finalized it is (platform support permitting) within a non-
2518 // writable region of memory; to modify the code in an execute-only execuable
2519 // pool the 'repatch' and 'relink' methods should be used.
2520
2521 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2522 {
2523 ASSERT(to.isSet());
2524 ASSERT(from.isSet());
2525 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2526 }
2527
2528 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
2529 {
2530 ASSERT(to.isSet());
2531 ASSERT(from.isSet());
2532 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister));
2533 }
2534
2535 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
2536 {
2537 ASSERT(to.isSet());
2538 ASSERT(from.isSet());
2539 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister));
2540 }
2541
2542 void linkJump(AssemblerLabel from, AssemblerLabel to)
2543 {
2544 ASSERT(from.isSet());
2545 ASSERT(to.isSet());
2546 relinkJumpOrCall<false>(addressOf(from), addressOf(to));
2547 }
2548
2549 static void linkJump(void* code, AssemblerLabel from, void* to)
2550 {
2551 ASSERT(from.isSet());
2552 relinkJumpOrCall<false>(addressOf(code, from), to);
2553 }
2554
2555 static void linkCall(void* code, AssemblerLabel from, void* to)
2556 {
2557 ASSERT(from.isSet());
2558 linkJumpOrCall<true>(addressOf(code, from) - 1, to);
2559 }
2560
2561 static void linkPointer(void* code, AssemblerLabel where, void* valuePtr)
2562 {
2563 linkPointer(addressOf(code, where), valuePtr);
2564 }
2565
2566 static void replaceWithJump(void* where, void* to)
2567 {
2568 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(where)) >> 2;
2569 ASSERT(static_cast<int>(offset) == offset);
2570 *static_cast<int*>(where) = unconditionalBranchImmediate(false, static_cast<int>(offset));
2571 cacheFlush(where, sizeof(int));
2572 }
2573
2574 static ptrdiff_t maxJumpReplacementSize()
2575 {
2576 return 4;
2577 }
2578
2579 static void replaceWithLoad(void* where)
2580 {
2581 Datasize sf;
2582 AddOp op;
2583 SetFlags S;
2584 int shift;
2585 int imm12;
2586 RegisterID rn;
2587 RegisterID rd;
2588 if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) {
2589 ASSERT(sf == Datasize_64);
2590 ASSERT(op == AddOp_ADD);
2591 ASSERT(!S);
2592 ASSERT(!shift);
2593 ASSERT(!(imm12 & ~0xff8));
2594 *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd);
2595 cacheFlush(where, sizeof(int));
2596 }
2597#if !ASSERT_DISABLED
2598 else {
2599 MemOpSize size;
2600 bool V;
2601 MemOp opc;
2602 int imm12;
2603 RegisterID rn;
2604 RegisterID rt;
2605 ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt));
2606 ASSERT(size == MemOpSize_64);
2607 ASSERT(!V);
2608 ASSERT(opc == MemOp_LOAD);
2609 ASSERT(!(imm12 & ~0x1ff));
2610 }
2611#endif
2612 }
2613
2614 static void replaceWithAddressComputation(void* where)
2615 {
2616 MemOpSize size;
2617 bool V;
2618 MemOp opc;
2619 int imm12;
2620 RegisterID rn;
2621 RegisterID rt;
2622 if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) {
2623 ASSERT(size == MemOpSize_64);
2624 ASSERT(!V);
2625 ASSERT(opc == MemOp_LOAD);
2626 ASSERT(!(imm12 & ~0x1ff));
2627 *static_cast<int*>(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt);
2628 cacheFlush(where, sizeof(int));
2629 }
2630#if !ASSERT_DISABLED
2631 else {
2632 Datasize sf;
2633 AddOp op;
2634 SetFlags S;
2635 int shift;
2636 int imm12;
2637 RegisterID rn;
2638 RegisterID rd;
2639 ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd));
2640 ASSERT(sf == Datasize_64);
2641 ASSERT(op == AddOp_ADD);
2642 ASSERT(!S);
2643 ASSERT(!shift);
2644 ASSERT(!(imm12 & ~0xff8));
2645 }
2646#endif
2647 }
2648
2649 static void repatchPointer(void* where, void* valuePtr)
2650 {
2651 linkPointer(static_cast<int*>(where), valuePtr, true);
2652 }
2653
2654 static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush)
2655 {
2656 uintptr_t value = reinterpret_cast<uintptr_t>(valuePtr);
2657 address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
2658 address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd);
2659 address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd);
2660
2661 if (flush)
2662 cacheFlush(address, sizeof(int) * 3);
2663 }
2664
2665 static void repatchInt32(void* where, int32_t value)
2666 {
2667 int* address = static_cast<int*>(where);
2668
2669 Datasize sf;
2670 MoveWideOp opc;
2671 int hw;
2672 uint16_t imm16;
2673 RegisterID rd;
2674 bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
2675 ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw);
2676 ASSERT(checkMovk<Datasize_32>(address[1], 1, rd));
2677
2678 if (value >= 0) {
2679 address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
2680 address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
2681 } else {
2682 address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd);
2683 address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
2684 }
2685
2686 cacheFlush(where, sizeof(int) * 2);
2687 }
2688
2689 static void* readPointer(void* where)
2690 {
2691 int* address = static_cast<int*>(where);
2692
2693 Datasize sf;
2694 MoveWideOp opc;
2695 int hw;
2696 uint16_t imm16;
2697 RegisterID rdFirst, rd;
2698
2699 bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst);
2700 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
2701 uintptr_t result = imm16;
2702
2703 expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd);
2704 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst);
2705 result |= static_cast<uintptr_t>(imm16) << 16;
2706
2707 expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd);
2708 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst);
2709 result |= static_cast<uintptr_t>(imm16) << 32;
2710
2711 return reinterpret_cast<void*>(result);
2712 }
2713
2714 static void* readCallTarget(void* from)
2715 {
2716 return readPointer(reinterpret_cast<int*>(from) - 4);
2717 }
2718
2719 static void relinkJump(void* from, void* to)
2720 {
2721 relinkJumpOrCall<false>(reinterpret_cast<int*>(from), to);
2722 cacheFlush(from, sizeof(int));
2723 }
2724
2725 static void relinkCall(void* from, void* to)
2726 {
2727 relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, to);
2728 cacheFlush(reinterpret_cast<int*>(from) - 1, sizeof(int));
2729 }
2730
2731 static void repatchCompact(void* where, int32_t value)
2732 {
2733 ASSERT(!(value & ~0x3ff8));
2734
2735 MemOpSize size;
2736 bool V;
2737 MemOp opc;
2738 int imm12;
2739 RegisterID rn;
2740 RegisterID rt;
2741 bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt);
2742 ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR.
2743
2744 if (size == MemOpSize_32)
2745 imm12 = encodePositiveImmediate<32>(value);
2746 else
2747 imm12 = encodePositiveImmediate<64>(value);
2748 *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt);
2749
2750 cacheFlush(where, sizeof(int));
2751 }
2752
2753 unsigned debugOffset() { return m_buffer.debugOffset(); }
2754
2755 static void cacheFlush(void* code, size_t size)
2756 {
2757#if OS(IOS)
2758 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2759#else
2760#error "The cacheFlush support is missing on this platform."
2761#endif
2762 }
2763
2764 // Assembler admin methods:
2765
2766 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
2767
2768 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
2769 {
2770 return a.from() < b.from();
2771 }
2772
2773 bool canCompact(JumpType jumpType)
2774 {
2775 // Fixed jumps cannot be compacted
2776 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
2777 }
2778
2779 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
2780 {
2781 switch (jumpType) {
2782 case JumpFixed:
2783 return LinkInvalid;
2784 case JumpNoConditionFixedSize:
2785 return LinkJumpNoCondition;
2786 case JumpConditionFixedSize:
2787 return LinkJumpCondition;
2788 case JumpCompareAndBranchFixedSize:
2789 return LinkJumpCompareAndBranch;
2790 case JumpTestBitFixedSize:
2791 return LinkJumpTestBit;
2792 case JumpNoCondition:
2793 return LinkJumpNoCondition;
2794 case JumpCondition: {
2795 ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
2796 ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
2797 intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
2798
2799 if (((relative << 43) >> 43) == relative)
2800 return LinkJumpConditionDirect;
2801
2802 return LinkJumpCondition;
2803 }
2804 case JumpCompareAndBranch: {
2805 ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
2806 ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
2807 intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
2808
2809 if (((relative << 43) >> 43) == relative)
2810 return LinkJumpCompareAndBranchDirect;
2811
2812 return LinkJumpCompareAndBranch;
2813 }
2814 case JumpTestBit: {
2815 ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
2816 ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
2817 intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
2818
2819 if (((relative << 50) >> 50) == relative)
2820 return LinkJumpTestBitDirect;
2821
2822 return LinkJumpTestBit;
2823 }
2824 default:
2825 ASSERT_NOT_REACHED();
2826 }
2827
2828 return LinkJumpNoCondition;
2829 }
2830
2831 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2832 {
2833 JumpLinkType linkType = computeJumpType(record.type(), from, to);
2834 record.setLinkType(linkType);
2835 return linkType;
2836 }
2837
2838 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
2839 {
2840 int32_t ptr = regionStart / sizeof(int32_t);
2841 const int32_t end = regionEnd / sizeof(int32_t);
2842 int32_t* offsets = static_cast<int32_t*>(m_buffer.data());
2843 while (ptr < end)
2844 offsets[ptr++] = offset;
2845 }
2846
2847 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2848 {
2849 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2850 return m_jumpsToLink;
2851 }
2852
2853 void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
2854 {
2855 switch (record.linkType()) {
2856 case LinkJumpNoCondition:
2857 linkJumpOrCall<false>(reinterpret_cast<int*>(from), to);
2858 break;
2859 case LinkJumpConditionDirect:
2860 linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), to);
2861 break;
2862 case LinkJumpCondition:
2863 linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, to);
2864 break;
2865 case LinkJumpCompareAndBranchDirect:
2866 linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), to);
2867 break;
2868 case LinkJumpCompareAndBranch:
2869 linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to);
2870 break;
2871 case LinkJumpTestBitDirect:
2872 linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), to);
2873 break;
2874 case LinkJumpTestBit:
2875 linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to);
2876 break;
2877 default:
2878 ASSERT_NOT_REACHED();
2879 break;
2880 }
2881 }
2882
2883private:
2884 template<Datasize size>
2885 static bool checkMovk(int insn, int _hw, RegisterID _rd)
2886 {
2887 Datasize sf;
2888 MoveWideOp opc;
2889 int hw;
2890 uint16_t imm16;
2891 RegisterID rd;
2892 bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd);
2893
2894 return expected
2895 && sf == size
2896 && opc == MoveWideOp_K
2897 && hw == _hw
2898 && rd == _rd;
2899 }
2900
2901 static void linkPointer(int* address, void* valuePtr, bool flush = false)
2902 {
2903 Datasize sf;
2904 MoveWideOp opc;
2905 int hw;
2906 uint16_t imm16;
2907 RegisterID rd;
2908 bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
2909 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
2910 ASSERT(checkMovk<Datasize_64>(address[1], 1, rd));
2911 ASSERT(checkMovk<Datasize_64>(address[2], 2, rd));
2912
2913 setPointer(address, valuePtr, rd, flush);
2914 }
2915
2916 template<bool isCall>
2917 static void linkJumpOrCall(int* from, void* to)
2918 {
2919 bool link;
2920 int imm26;
2921 bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from);
2922
2923 ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop);
2924 ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from));
2925 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2926 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2927 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
2928 ASSERT(static_cast<int>(offset) == offset);
2929
2930 *from = unconditionalBranchImmediate(isCall, static_cast<int>(offset));
2931 }
2932
2933 template<bool isDirect>
2934 static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to)
2935 {
2936 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2937 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2938 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
2939 ASSERT(((offset << 38) >> 38) == offset);
2940
2941 bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
2942 ASSERT(!isDirect || useDirect);
2943
2944 if (useDirect || isDirect) {
2945 *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt);
2946 if (!isDirect)
2947 *(from + 1) = nopPseudo();
2948 } else {
2949 *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt);
2950 linkJumpOrCall<false>(from + 1, to);
2951 }
2952 }
2953
2954 template<bool isDirect>
2955 static void linkConditionalBranch(Condition condition, int* from, void* to)
2956 {
2957 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2958 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2959 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
2960 ASSERT(((offset << 38) >> 38) == offset);
2961
2962 bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
2963 ASSERT(!isDirect || useDirect);
2964
2965 if (useDirect || isDirect) {
2966 *from = conditionalBranchImmediate(static_cast<int>(offset), condition);
2967 if (!isDirect)
2968 *(from + 1) = nopPseudo();
2969 } else {
2970 *from = conditionalBranchImmediate(2, invert(condition));
2971 linkJumpOrCall<false>(from + 1, to);
2972 }
2973 }
2974
2975 template<bool isDirect>
2976 static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to)
2977 {
2978 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2979 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2980 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
2981 ASSERT(static_cast<int>(offset) == offset);
2982 ASSERT(((offset << 38) >> 38) == offset);
2983
2984 bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits
2985 ASSERT(!isDirect || useDirect);
2986
2987 if (useDirect || isDirect) {
2988 *from = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt);
2989 if (!isDirect)
2990 *(from + 1) = nopPseudo();
2991 } else {
2992 *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt);
2993 linkJumpOrCall<false>(from + 1, to);
2994 }
2995 }
2996
2997 template<bool isCall>
2998 static void relinkJumpOrCall(int* from, void* to)
2999 {
3000 if (!isCall && disassembleNop(from)) {
3001 unsigned op01;
3002 int imm19;
3003 Condition condition;
3004 bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition);
3005
3006 if (isConditionalBranchImmediate) {
3007 ASSERT_UNUSED(op01, !op01);
3008 ASSERT_UNUSED(isCall, !isCall);
3009
3010 if (imm19 == 8)
3011 condition = invert(condition);
3012
3013 linkConditionalBranch<false>(condition, from - 1, to);
3014 return;
3015 }
3016
3017 Datasize opSize;
3018 bool op;
3019 RegisterID rt;
3020 bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt);
3021
3022 if (isCompareAndBranchImmediate) {
3023 if (imm19 == 8)
3024 op = !op;
3025
3026 linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to);
3027 return;
3028 }
3029
3030 int imm14;
3031 unsigned bitNumber;
3032 bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt);
3033
3034 if (isTestAndBranchImmediate) {
3035 if (imm14 == 8)
3036 op = !op;
3037
3038 linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to);
3039 return;
3040 }
3041 }
3042
3043 linkJumpOrCall<isCall>(from, to);
3044 }
3045
3046 static int* addressOf(void* code, AssemblerLabel label)
3047 {
3048 return reinterpret_cast<int*>(static_cast<char*>(code) + label.m_offset);
3049 }
3050
3051 int* addressOf(AssemblerLabel label)
3052 {
3053 return addressOf(m_buffer.data(), label);
3054 }
3055
3056 static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); }
3057 static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); }
3058 static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); }
3059
3060 static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd)
3061 {
3062 int insn = *static_cast<int*>(address);
3063 sf = static_cast<Datasize>((insn >> 31) & 1);
3064 op = static_cast<AddOp>((insn >> 30) & 1);
3065 S = static_cast<SetFlags>((insn >> 29) & 1);
3066 shift = (insn >> 22) & 3;
3067 imm12 = (insn >> 10) & 0x3ff;
3068 rn = disassembleXOrSp((insn >> 5) & 0x1f);
3069 rd = disassembleXOrZrOrSp(S, insn & 0x1f);
3070 return (insn & 0x1f000000) == 0x11000000;
3071 }
3072
3073 static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt)
3074 {
3075 int insn = *static_cast<int*>(address);
3076 size = static_cast<MemOpSize>((insn >> 30) & 3);
3077 V = (insn >> 26) & 1;
3078 opc = static_cast<MemOp>((insn >> 22) & 3);
3079 imm12 = (insn >> 10) & 0xfff;
3080 rn = disassembleXOrSp((insn >> 5) & 0x1f);
3081 rt = disassembleXOrZr(insn & 0x1f);
3082 return (insn & 0x3b000000) == 0x39000000;
3083 }
3084
3085 static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd)
3086 {
3087 int insn = *static_cast<int*>(address);
3088 sf = static_cast<Datasize>((insn >> 31) & 1);
3089 opc = static_cast<MoveWideOp>((insn >> 29) & 3);
3090 hw = (insn >> 21) & 3;
3091 imm16 = insn >> 5;
3092 rd = disassembleXOrZr(insn & 0x1f);
3093 return (insn & 0x1f800000) == 0x12800000;
3094 }
3095
3096 static bool disassembleNop(void* address)
3097 {
3098 unsigned insn = *static_cast<unsigned*>(address);
3099 return insn == 0xd503201f;
3100 }
3101
3102 static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt)
3103 {
3104 int insn = *static_cast<int*>(address);
3105 sf = static_cast<Datasize>((insn >> 31) & 1);
3106 op = (insn >> 24) & 0x1;
3107 imm19 = (insn << 8) >> 13;
3108 rt = static_cast<RegisterID>(insn & 0x1f);
3109 return (insn & 0x7e000000) == 0x34000000;
3110
3111 }
3112
3113 static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition)
3114 {
3115 int insn = *static_cast<int*>(address);
3116 op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1);
3117 imm19 = (insn << 8) >> 13;
3118 condition = static_cast<Condition>(insn & 0xf);
3119 return (insn & 0xfe000000) == 0x54000000;
3120 }
3121
3122 static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt)
3123 {
3124 int insn = *static_cast<int*>(address);
3125 op = (insn >> 24) & 0x1;
3126 imm14 = (insn << 13) >> 18;
3127 bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn > 19) & 0x1f));
3128 rt = static_cast<RegisterID>(insn & 0x1f);
3129 return (insn & 0x7e000000) == 0x36000000;
3130
3131 }
3132
3133 static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26)
3134 {
3135 int insn = *static_cast<int*>(address);
3136 op = (insn >> 31) & 1;
3137 imm26 = (insn << 6) >> 6;
3138 return (insn & 0x7c000000) == 0x14000000;
3139 }
3140
3141 static int xOrSp(RegisterID reg) { ASSERT(!isZr(reg)); return reg; }
3142 static int xOrZr(RegisterID reg) { ASSERT(!isSp(reg)); return reg & 31; }
3143 static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast<FPRegisterID>(xOrZr(reg)); }
3144 static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); }
3145
3146 ALWAYS_INLINE void insn(int instruction)
3147 {
3148 m_buffer.putInt(instruction);
3149 }
3150
3151 ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd)
3152 {
3153 ASSERT(imm3 < 5);
3154 // The only allocated values for opt is 0.
3155 const int opt = 0;
3156 return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
3157 }
3158
3159 ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd)
3160 {
3161 ASSERT(shift < 2);
3162 ASSERT(isUInt12(imm12));
3163 return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
3164 }
3165
3166 ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
3167 {
3168 ASSERT(shift < 3);
3169 ASSERT(!(imm6 & (sf ? ~63 : ~31)));
3170 return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3171 }
3172
3173 ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd)
3174 {
3175 const int opcode2 = 0;
3176 return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3177 }
3178
3179 ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd)
3180 {
3181 ASSERT(immr < (sf ? 64 : 32));
3182 ASSERT(imms < (sf ? 64 : 32));
3183 const int N = sf;
3184 return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3185 }
3186
3187 // 'op' means negate
3188 ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt)
3189 {
3190 ASSERT(imm19 == (imm19 << 13) >> 13);
3191 return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt));
3192 }
3193
3194 ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond)
3195 {
3196 ASSERT(imm19 == (imm19 << 13) >> 13);
3197 ASSERT(!(cond & ~15));
3198 // The only allocated values for o1 & o0 are 0.
3199 const int o1 = 0;
3200 const int o0 = 0;
3201 return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond);
3202 }
3203
3204 ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv)
3205 {
3206 ASSERT(!(imm5 & ~0x1f));
3207 ASSERT(nzcv < 16);
3208 const int S = 1;
3209 const int o2 = 0;
3210 const int o3 = 0;
3211 return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
3212 }
3213
3214 ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv)
3215 {
3216 ASSERT(nzcv < 16);
3217 const int S = 1;
3218 const int o2 = 0;
3219 const int o3 = 0;
3220 return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
3221 }
3222
3223 // 'op' means negate
3224 // 'op2' means increment
3225 ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd)
3226 {
3227 const int S = 0;
3228 return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3229 }
3230
3231 ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd)
3232 {
3233 const int S = 0;
3234 const int opcode2 = 0;
3235 return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3236 }
3237
3238 ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd)
3239 {
3240 const int S = 0;
3241 return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3242 }
3243
3244 ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd)
3245 {
3246 int op54 = opcode >> 4;
3247 int op31 = (opcode >> 1) & 7;
3248 int op0 = opcode & 1;
3249 return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3250 }
3251
3252 ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL)
3253 {
3254 ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4)));
3255 const int op2 = 0;
3256 return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL);
3257 }
3258
3259 ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd)
3260 {
3261 ASSERT(imms < (sf ? 64 : 32));
3262 const int op21 = 0;
3263 const int N = sf;
3264 const int o0 = 0;
3265 return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3266 }
3267
3268 ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2)
3269 {
3270 const int M = 0;
3271 const int S = 0;
3272 const int op = 0;
3273 return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2);
3274 }
3275
3276 ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv)
3277 {
3278 ASSERT(nzcv < 16);
3279 const int M = 0;
3280 const int S = 0;
3281 return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv);
3282 }
3283
3284 ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd)
3285 {
3286 const int M = 0;
3287 const int S = 0;
3288 return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd);
3289 }
3290
3291 ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd)
3292 {
3293 const int M = 0;
3294 const int S = 0;
3295 const int imm5 = 0;
3296 return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd);
3297 }
3298
3299 ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd)
3300 {
3301 const int S = 0;
3302 return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd);
3303 }
3304
3305 ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd)
3306 {
3307 return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd));
3308 }
3309
3310 ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd)
3311 {
3312 return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd);
3313 }
3314
3315 ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd)
3316 {
3317 const int M = 0;
3318 const int S = 0;
3319 return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd);
3320 }
3321
3322 ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd)
3323 {
3324 const int M = 0;
3325 const int S = 0;
3326 return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd);
3327 }
3328
3329 // 'o1' means negate
3330 ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd)
3331 {
3332 const int M = 0;
3333 const int S = 0;
3334 return (0x1f000000 | M << 31 | S << 29 | type << 22 | o1 << 21 | rm << 16 | o2 << 15 | ra << 10 | rn << 5 | rd);
3335 }
3336
3337 // 'V' means vector
3338 ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, FPRegisterID rt)
3339 {
3340 ASSERT(((imm19 << 13) >> 13) == imm19);
3341 return (0x18000000 | opc << 30 | V << 26 | (imm19 & 0x7ffff) << 5 | rt);
3342 }
3343
3344 ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, RegisterID rt)
3345 {
3346 return loadRegisterLiteral(opc, V, imm19, xOrZrAsFPR(rt));
3347 }
3348
3349 // 'V' means vector
3350 ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
3351 {
3352 ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
3353 ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
3354 ASSERT(isInt9(imm9));
3355 return (0x38000400 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
3356 }
3357
3358 ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
3359 {
3360 return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
3361 }
3362
3363 // 'V' means vector
3364 ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
3365 {
3366 ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
3367 ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
3368 ASSERT(isInt9(imm9));
3369 return (0x38000c00 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
3370 }
3371
3372 ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
3373 {
3374 return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
3375 }
3376
3377 // 'V' means vector
3378 // 'S' means shift rm
3379 ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt)
3380 {
3381 ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
3382 ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
3383 ASSERT(option & 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit.
3384 return (0x38200800 | size << 30 | V << 26 | opc << 22 | xOrZr(rm) << 16 | option << 13 | S << 12 | xOrSp(rn) << 5 | rt);
3385 }
3386
3387 ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, RegisterID rt)
3388 {
3389 return loadStoreRegisterRegisterOffset(size, V, opc, rm, option, S, rn, xOrZrAsFPR(rt));
3390 }
3391
3392 // 'V' means vector
3393 ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
3394 {
3395 ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
3396 ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
3397 ASSERT(isInt9(imm9));
3398 return (0x38000000 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
3399 }
3400
3401 ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
3402 {
3403 ASSERT(isInt9(imm9));
3404 return loadStoreRegisterUnscaledImmediate(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
3405 }
3406
3407 // 'V' means vector
3408 ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, FPRegisterID rt)
3409 {
3410 ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
3411 ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
3412 ASSERT(isUInt12(imm12));
3413 return (0x39000000 | size << 30 | V << 26 | opc << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | rt);
3414 }
3415
3416 ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, RegisterID rt)
3417 {
3418 return loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, xOrZrAsFPR(rt));
3419 }
3420
3421 ALWAYS_INLINE static int logicalImmediate(Datasize sf, LogicalOp opc, int N_immr_imms, RegisterID rn, RegisterID rd)
3422 {
3423 ASSERT(!(N_immr_imms & (sf ? ~0x1fff : ~0xfff)));
3424 return (0x12000000 | sf << 31 | opc << 29 | N_immr_imms << 10 | xOrZr(rn) << 5 | xOrZrOrSp(opc == LogicalOp_ANDS, rd));
3425 }
3426
3427 // 'N' means negate rm
3428 ALWAYS_INLINE static int logicalShiftedRegister(Datasize sf, LogicalOp opc, ShiftType shift, bool N, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
3429 {
3430 ASSERT(!(imm6 & (sf ? ~63 : ~31)));
3431 return (0x0a000000 | sf << 31 | opc << 29 | shift << 22 | N << 21 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3432 }
3433
3434 ALWAYS_INLINE static int moveWideImediate(Datasize sf, MoveWideOp opc, int hw, uint16_t imm16, RegisterID rd)
3435 {
3436 ASSERT(hw < (sf ? 4 : 2));
3437 return (0x12800000 | sf << 31 | opc << 29 | hw << 21 | (int)imm16 << 5 | xOrZr(rd));
3438 }
3439
3440 // 'op' means link
3441 ALWAYS_INLINE static int unconditionalBranchImmediate(bool op, int32_t imm26)
3442 {
3443 ASSERT(imm26 == (imm26 << 6) >> 6);
3444 return (0x14000000 | op << 31 | (imm26 & 0x3ffffff));
3445 }
3446
3447 // 'op' means page
3448 ALWAYS_INLINE static int pcRelative(bool op, int32_t imm21, RegisterID rd)
3449 {
3450 ASSERT(imm21 == (imm21 << 11) >> 11);
3451 int32_t immlo = imm21 & 3;
3452 int32_t immhi = (imm21 >> 2) & 0x7ffff;
3453 return (0x10000000 | op << 31 | immlo << 29 | immhi << 5 | xOrZr(rd));
3454 }
3455
3456 ALWAYS_INLINE static int system(bool L, int op0, int op1, int crn, int crm, int op2, RegisterID rt)
3457 {
3458 return (0xd5000000 | L << 21 | op0 << 19 | op1 << 16 | crn << 12 | crm << 8 | op2 << 5 | xOrZr(rt));
3459 }
3460
3461 ALWAYS_INLINE static int hintPseudo(int imm)
3462 {
3463 ASSERT(!(imm & ~0x7f));
3464 return system(0, 0, 3, 2, (imm >> 3) & 0xf, imm & 0x7, ARM64Registers::zr);
3465 }
3466
3467 ALWAYS_INLINE static int nopPseudo()
3468 {
3469 return hintPseudo(0);
3470 }
3471
3472 // 'op' means negate
3473 ALWAYS_INLINE static int testAndBranchImmediate(bool op, int b50, int imm14, RegisterID rt)
3474 {
3475 ASSERT(!(b50 & ~0x3f));
3476 ASSERT(imm14 == (imm14 << 18) >> 18);
3477 int b5 = b50 >> 5;
3478 int b40 = b50 & 0x1f;
3479 return (0x36000000 | b5 << 31 | op << 24 | b40 << 19 | (imm14 & 0x3fff) << 5 | xOrZr(rt));
3480 }
3481
3482 ALWAYS_INLINE static int unconditionalBranchRegister(BranchType opc, RegisterID rn)
3483 {
3484 // The only allocated values for op2 is 0x1f, for op3 & op4 are 0.
3485 const int op2 = 0x1f;
3486 const int op3 = 0;
3487 const int op4 = 0;
3488 return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4);
3489 }
3490
3491 AssemblerBuffer m_buffer;
3492 Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
3493 int m_indexOfLastWatchpoint;
3494 int m_indexOfTailOfLastWatchpoint;
3495};
3496
3497} // namespace JSC
3498
3499#undef CHECK_DATASIZE_OF
3500#undef DATASIZE_OF
3501#undef MEMOPSIZE_OF
3502#undef CHECK_DATASIZE
3503#undef DATASIZE
3504#undef MEMOPSIZE
3505#undef CHECK_FP_MEMOP_DATASIZE
3506
3507#endif // ENABLE(ASSEMBLER) && CPU(ARM64)
3508
3509#endif // ARM64Assembler_h