1 /* Copyright (C) 2009-2021 Free Software Foundation, Inc. 2 Contributed by ARM Ltd. 3 4 This file is part of GDB. 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 18 19 #ifndef ARCH_AARCH64_INSN_H 20 #define ARCH_AARCH64_INSN_H 21 22 extern bool aarch64_debug; 23 24 /* Print an "aarch64" debug statement. */ 25 26 #define aarch64_debug_printf(fmt, ...) \ 27 debug_prefixed_printf_cond (aarch64_debug, "aarch64", fmt, ##__VA_ARGS__) 28 29 /* Support routines for instruction parsing. */ 30 31 /* Create a mask of X bits. */ 32 #define submask(x) ((1L << ((x) + 1)) - 1) 33 34 /* Extract the bitfield from OBJ starting at bit ST and ending at bit FN. */ 35 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st))) 36 37 /* Extract bit ST from OBJ. */ 38 #define bit(obj,st) (((obj) >> (st)) & 1) 39 40 /* Extract the signed bitfield from OBJ starting at bit ST and ending at 41 bit FN. The result is sign-extended. */ 42 #define sbits(obj,st,fn) \ 43 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st)))) 44 45 /* List of opcodes that we need for building the jump pad and relocating 46 an instruction. */ 47 48 enum aarch64_opcodes 49 { 50 /* B 0001 01ii iiii iiii iiii iiii iiii iiii */ 51 /* BL 1001 01ii iiii iiii iiii iiii iiii iiii */ 52 /* B.COND 0101 0100 iiii iiii iiii iiii iii0 cccc */ 53 /* CBZ s011 0100 iiii iiii iiii iiii iiir rrrr */ 54 /* CBNZ s011 0101 iiii iiii iiii iiii iiir rrrr */ 55 /* TBZ b011 0110 bbbb biii iiii iiii iiir rrrr */ 56 /* TBNZ b011 0111 bbbb biii iiii iiii iiir rrrr */ 57 B = 0x14000000, 58 BL = 0x80000000 | B, 59 BCOND = 0x40000000 | B, 60 CBZ = 0x20000000 | B, 61 CBNZ = 0x21000000 | B, 62 TBZ = 0x36000000 | B, 63 TBNZ = 0x37000000 | B, 64 /* BR 1101 0110 0001 1111 0000 00rr rrr0 0000 */ 65 /* BLR 1101 0110 0011 1111 0000 00rr rrr0 0000 */ 66 BR = 0xd61f0000, 67 BLR = 0xd63f0000, 68 /* RET 1101 0110 0101 1111 0000 00rr rrr0 0000 */ 69 RET = 0xd65f0000, 70 /* STP s010 100o o0ii iiii irrr rrrr rrrr rrrr */ 71 /* LDP s010 100o o1ii iiii irrr rrrr rrrr rrrr */ 72 /* STP (SIMD&VFP) ss10 110o o0ii iiii irrr rrrr rrrr rrrr */ 73 /* LDP (SIMD&VFP) ss10 110o o1ii iiii irrr rrrr rrrr rrrr */ 74 STP = 0x28000000, 75 LDP = 0x28400000, 76 STP_SIMD_VFP = 0x04000000 | STP, 77 LDP_SIMD_VFP = 0x04000000 | LDP, 78 /* STR ss11 100o 00xi iiii iiii xxrr rrrr rrrr */ 79 /* LDR ss11 100o 01xi iiii iiii xxrr rrrr rrrr */ 80 /* LDRSW 1011 100o 10xi iiii iiii xxrr rrrr rrrr */ 81 STR = 0x38000000, 82 LDR = 0x00400000 | STR, 83 LDRSW = 0x80800000 | STR, 84 /* LDAXR ss00 1000 0101 1111 1111 11rr rrrr rrrr */ 85 LDAXR = 0x085ffc00, 86 /* STXR ss00 1000 000r rrrr 0111 11rr rrrr rrrr */ 87 STXR = 0x08007c00, 88 /* STLR ss00 1000 1001 1111 1111 11rr rrrr rrrr */ 89 STLR = 0x089ffc00, 90 /* MOV s101 0010 1xxi iiii iiii iiii iiir rrrr */ 91 /* MOVK s111 0010 1xxi iiii iiii iiii iiir rrrr */ 92 MOV = 0x52800000, 93 MOVK = 0x20000000 | MOV, 94 /* ADD s00o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */ 95 /* SUB s10o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */ 96 /* SUBS s11o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */ 97 ADD = 0x01000000, 98 SUB = 0x40000000 | ADD, 99 SUBS = 0x20000000 | SUB, 100 /* AND s000 1010 xx0x xxxx xxxx xxxx xxxx xxxx */ 101 /* ORR s010 1010 xx0x xxxx xxxx xxxx xxxx xxxx */ 102 /* ORN s010 1010 xx1x xxxx xxxx xxxx xxxx xxxx */ 103 /* EOR s100 1010 xx0x xxxx xxxx xxxx xxxx xxxx */ 104 AND = 0x0a000000, 105 ORR = 0x20000000 | AND, 106 ORN = 0x00200000 | ORR, 107 EOR = 0x40000000 | AND, 108 /* LSLV s001 1010 110r rrrr 0010 00rr rrrr rrrr */ 109 /* LSRV s001 1010 110r rrrr 0010 01rr rrrr rrrr */ 110 /* ASRV s001 1010 110r rrrr 0010 10rr rrrr rrrr */ 111 LSLV = 0x1ac02000, 112 LSRV = 0x00000400 | LSLV, 113 ASRV = 0x00000800 | LSLV, 114 /* SBFM s001 0011 0nii iiii iiii iirr rrrr rrrr */ 115 SBFM = 0x13000000, 116 /* UBFM s101 0011 0nii iiii iiii iirr rrrr rrrr */ 117 UBFM = 0x40000000 | SBFM, 118 /* CSINC s001 1010 100r rrrr cccc 01rr rrrr rrrr */ 119 CSINC = 0x9a800400, 120 /* MUL s001 1011 000r rrrr 0111 11rr rrrr rrrr */ 121 MUL = 0x1b007c00, 122 /* MSR (register) 1101 0101 0001 oooo oooo oooo ooor rrrr */ 123 /* MRS 1101 0101 0011 oooo oooo oooo ooor rrrr */ 124 MSR = 0xd5100000, 125 MRS = 0x00200000 | MSR, 126 /* HINT 1101 0101 0000 0011 0010 oooo ooo1 1111 */ 127 HINT = 0xd503201f, 128 SEVL = (5 << 5) | HINT, 129 WFE = (2 << 5) | HINT, 130 NOP = (0 << 5) | HINT, 131 }; 132 133 /* List of useful masks. */ 134 enum aarch64_masks 135 { 136 /* Used for masking out an Rn argument from an opcode. */ 137 CLEAR_Rn_MASK = 0xfffffc1f, 138 }; 139 140 /* Representation of a general purpose register of the form xN or wN. 141 142 This type is used by emitting functions that take registers as operands. */ 143 144 struct aarch64_register 145 { 146 unsigned num; 147 int is64; 148 }; 149 150 enum aarch64_memory_operand_type 151 { 152 MEMORY_OPERAND_OFFSET, 153 MEMORY_OPERAND_PREINDEX, 154 MEMORY_OPERAND_POSTINDEX, 155 }; 156 157 /* Representation of a memory operand, used for load and store 158 instructions. 159 160 The types correspond to the following variants: 161 162 MEMORY_OPERAND_OFFSET: LDR rt, [rn, #offset] 163 MEMORY_OPERAND_PREINDEX: LDR rt, [rn, #index]! 164 MEMORY_OPERAND_POSTINDEX: LDR rt, [rn], #index */ 165 166 struct aarch64_memory_operand 167 { 168 /* Type of the operand. */ 169 enum aarch64_memory_operand_type type; 170 171 /* Index from the base register. */ 172 int32_t index; 173 }; 174 175 /* Helper macro to mask and shift a value into a bitfield. */ 176 177 #define ENCODE(val, size, offset) \ 178 ((uint32_t) ((val & ((1ULL << size) - 1)) << offset)) 179 180 int aarch64_decode_adr (CORE_ADDR addr, uint32_t insn, int *is_adrp, 181 unsigned *rd, int32_t *offset); 182 183 int aarch64_decode_b (CORE_ADDR addr, uint32_t insn, int *is_bl, 184 int32_t *offset); 185 186 int aarch64_decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, 187 int32_t *offset); 188 189 int aarch64_decode_cb (CORE_ADDR addr, uint32_t insn, int *is64, 190 int *is_cbnz, unsigned *rn, int32_t *offset); 191 192 int aarch64_decode_tb (CORE_ADDR addr, uint32_t insn, int *is_tbnz, 193 unsigned *bit, unsigned *rt, int32_t *imm); 194 195 int aarch64_decode_ldr_literal (CORE_ADDR addr, uint32_t insn, int *is_w, 196 int *is64, unsigned *rt, int32_t *offset); 197 198 /* Data passed to each method of aarch64_insn_visitor. */ 199 200 struct aarch64_insn_data 201 { 202 /* The instruction address. */ 203 CORE_ADDR insn_addr; 204 }; 205 206 /* Visit different instructions by different methods. */ 207 208 struct aarch64_insn_visitor 209 { 210 /* Visit instruction B/BL OFFSET. */ 211 void (*b) (const int is_bl, const int32_t offset, 212 struct aarch64_insn_data *data); 213 214 /* Visit instruction B.COND OFFSET. */ 215 void (*b_cond) (const unsigned cond, const int32_t offset, 216 struct aarch64_insn_data *data); 217 218 /* Visit instruction CBZ/CBNZ Rn, OFFSET. */ 219 void (*cb) (const int32_t offset, const int is_cbnz, 220 const unsigned rn, int is64, 221 struct aarch64_insn_data *data); 222 223 /* Visit instruction TBZ/TBNZ Rt, #BIT, OFFSET. */ 224 void (*tb) (const int32_t offset, int is_tbnz, 225 const unsigned rt, unsigned bit, 226 struct aarch64_insn_data *data); 227 228 /* Visit instruction ADR/ADRP Rd, OFFSET. */ 229 void (*adr) (const int32_t offset, const unsigned rd, 230 const int is_adrp, struct aarch64_insn_data *data); 231 232 /* Visit instruction LDR/LDRSW Rt, OFFSET. */ 233 void (*ldr_literal) (const int32_t offset, const int is_sw, 234 const unsigned rt, const int is64, 235 struct aarch64_insn_data *data); 236 237 /* Visit instruction INSN of other kinds. */ 238 void (*others) (const uint32_t insn, struct aarch64_insn_data *data); 239 }; 240 241 void aarch64_relocate_instruction (uint32_t insn, 242 const struct aarch64_insn_visitor *visitor, 243 struct aarch64_insn_data *data); 244 245 #define can_encode_int32(val, bits) \ 246 (((val) >> (bits)) == 0 || ((val) >> (bits)) == -1) 247 248 /* Write a B or BL instruction into *BUF. 249 250 B #offset 251 BL #offset 252 253 IS_BL specifies if the link register should be updated. 254 OFFSET is the immediate offset from the current PC. It is 255 byte-addressed but should be 4 bytes aligned. It has a limited range of 256 +/- 128MB (26 bits << 2). */ 257 258 #define emit_b(buf, is_bl, offset) \ 259 aarch64_emit_insn (buf, ((is_bl) ? BL : B) | (ENCODE ((offset) >> 2, 26, 0))) 260 261 /* Write a BCOND instruction into *BUF. 262 263 B.COND #offset 264 265 COND specifies the condition field. 266 OFFSET is the immediate offset from the current PC. It is 267 byte-addressed but should be 4 bytes aligned. It has a limited range of 268 +/- 1MB (19 bits << 2). */ 269 270 #define emit_bcond(buf, cond, offset) \ 271 aarch64_emit_insn (buf, \ 272 BCOND | ENCODE ((offset) >> 2, 19, 5) \ 273 | ENCODE ((cond), 4, 0)) 274 275 /* Write a CBZ or CBNZ instruction into *BUF. 276 277 CBZ rt, #offset 278 CBNZ rt, #offset 279 280 IS_CBNZ distinguishes between CBZ and CBNZ instructions. 281 RN is the register to test. 282 OFFSET is the immediate offset from the current PC. It is 283 byte-addressed but should be 4 bytes aligned. It has a limited range of 284 +/- 1MB (19 bits << 2). */ 285 286 #define emit_cb(buf, is_cbnz, rt, offset) \ 287 aarch64_emit_insn (buf, \ 288 ((is_cbnz) ? CBNZ : CBZ) \ 289 | ENCODE (rt.is64, 1, 31) /* sf */ \ 290 | ENCODE (offset >> 2, 19, 5) /* imm19 */ \ 291 | ENCODE (rt.num, 5, 0)) 292 293 /* Write a LDR instruction into *BUF. 294 295 LDR rt, [rn, #offset] 296 LDR rt, [rn, #index]! 297 LDR rt, [rn], #index 298 299 RT is the register to store. 300 RN is the base address register. 301 OFFSET is the immediate to add to the base address. It is limited to 302 0 .. 32760 range (12 bits << 3). */ 303 304 #define emit_ldr(buf, rt, rn, operand) \ 305 aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, LDR, rt, rn, operand) 306 307 /* Write a LDRSW instruction into *BUF. The register size is 64-bit. 308 309 LDRSW xt, [rn, #offset] 310 LDRSW xt, [rn, #index]! 311 LDRSW xt, [rn], #index 312 313 RT is the register to store. 314 RN is the base address register. 315 OFFSET is the immediate to add to the base address. It is limited to 316 0 .. 16380 range (12 bits << 2). */ 317 318 #define emit_ldrsw(buf, rt, rn, operand) \ 319 aarch64_emit_load_store (buf, 3, LDRSW, rt, rn, operand) 320 321 322 /* Write a TBZ or TBNZ instruction into *BUF. 323 324 TBZ rt, #bit, #offset 325 TBNZ rt, #bit, #offset 326 327 IS_TBNZ distinguishes between TBZ and TBNZ instructions. 328 RT is the register to test. 329 BIT is the index of the bit to test in register RT. 330 OFFSET is the immediate offset from the current PC. It is 331 byte-addressed but should be 4 bytes aligned. It has a limited range of 332 +/- 32KB (14 bits << 2). */ 333 334 #define emit_tb(buf, is_tbnz, bit, rt, offset) \ 335 aarch64_emit_insn (buf, \ 336 ((is_tbnz) ? TBNZ: TBZ) \ 337 | ENCODE (bit >> 5, 1, 31) /* b5 */ \ 338 | ENCODE (bit, 5, 19) /* b40 */ \ 339 | ENCODE (offset >> 2, 14, 5) /* imm14 */ \ 340 | ENCODE (rt.num, 5, 0)) 341 342 /* Write a NOP instruction into *BUF. */ 343 344 #define emit_nop(buf) aarch64_emit_insn (buf, NOP) 345 346 int aarch64_emit_insn (uint32_t *buf, uint32_t insn); 347 348 int aarch64_emit_load_store (uint32_t *buf, uint32_t size, 349 enum aarch64_opcodes opcode, 350 struct aarch64_register rt, 351 struct aarch64_register rn, 352 struct aarch64_memory_operand operand); 353 354 #endif /* ARCH_AARCH64_INSN_H */ 355