From 4f2cb9d129f8a5eba81379b70322d013b670045c Mon Sep 17 00:00:00 2001 From: Srinath Parvathaneni Date: Tue, 25 Jun 2024 12:58:27 +0100 Subject: aarch64: Fix sve2p1 ld[1-4]/st[1-4]q instruction operands. This patch fixes encoding and syntax for sve2p1 instructions ld[1-4]q/st[1-4]q as mentioned below, for the issues reported here. https://sourceware.org/pipermail/binutils/2024-February/132408.html 1) Previously all the ld[1-4]q/st[1-4]q instructions are wrongly added as predicated instructions and this issue is fixed in this patch by replacing "SVE2p1_INSNC" with "SVE2p1_INSN" macro. 2) Wrong first operand in all the ld[1-4]q/st[1-4]q instructions is fixed by replacing "SVE_Zt" with "SVE_ZtxN". 3) Wrong operand qualifiers in ld1q and st1q instructions are also fixed in this patch. 4) In ld1q/st1q the index in the second argument is optional and if index is xzr and is skipped in the assembly, the index field is ignored by the disassembler. Fixing above mentioned issues helps with following: 1) ld1q and st1q first register operand accepts enclosed figure braces. 2) ld2q, ld3q, ld4q, st2q, st3q, and st4q instructions accepts wrapping sequence of vector registers. For the instructions ld[2-4]q/st[2-4]q, tests for wrapping sequence of vector registers are added along with short-form of operands for non-wrapping sequence. I have added test using following logic: ld2q {Z0.Q, Z1.Q}, p0/Z, [x0, #0, MUL VL] //raw insn encoding (all zeroes) ld2q {Z31.Q, Z0.Q}, p0/Z, [x0, #0, MUL VL] // encoding of ld2q {Z0.Q, Z1.Q}, p7/Z, [x0, #0, MUL VL] // encoding of ld2q {Z0.Q, Z1.Q}, p0/Z, [x30, #0, MUL VL] // encoding of ld2q {Z0.Q, Z1.Q}, p0/Z, [x0, #-16, MUL VL] // encoding of (low value) ld2q {Z0.Q, Z1.Q}, p0/Z, [x0, #14, MUL VL] // encoding of (high value) ld2q {Z31.Q, Z0.Q}, p7/Z, [x30, #-16, MUL VL] // encoding of all fields (all ones) ld2q {Z30.Q, Z31.Q}, p1/Z, [x3, #-2, MUL VL] // random encoding. For all the above form of instructions the hyphenated form is preferred for disassembly if there are more than two registers in the list, and the register numbers are monotonically increasing in increments of one. --- opcodes/aarch64-asm-2.c | 144 ++++++++++++++++++++++++------------------------ opcodes/aarch64-dis-2.c | 141 +++++++++++++++++++++++------------------------ opcodes/aarch64-opc-2.c | 4 +- opcodes/aarch64-opc.c | 78 ++++++++++++++------------ opcodes/aarch64-tbl.h | 48 ++++++++-------- 5 files changed, 206 insertions(+), 209 deletions(-) (limited to 'opcodes') diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c index 5eb21c2..00d10f1 100644 --- a/opcodes/aarch64-asm-2.c +++ b/opcodes/aarch64-asm-2.c @@ -654,7 +654,6 @@ aarch64_insert_operand (const aarch64_operand *self, case 123: case 124: case 125: - case 183: case 184: case 185: case 186: @@ -668,31 +667,32 @@ aarch64_insert_operand (const aarch64_operand *self, case 194: case 195: case 196: - case 212: + case 197: case 213: case 214: case 215: - case 224: + case 216: case 225: case 226: case 227: case 228: - case 239: - case 243: - case 248: - case 256: + case 229: + case 240: + case 244: + case 249: case 257: case 258: - case 265: + case 259: case 266: case 267: case 268: + case 269: return aarch64_ins_regno (self, info, code, inst, errors); case 6: case 119: case 120: - case 304: - case 307: + case 305: + case 308: return aarch64_ins_none (self, info, code, inst, errors); case 17: return aarch64_ins_reg_extended (self, info, code, inst, errors); @@ -707,17 +707,16 @@ aarch64_insert_operand (const aarch64_operand *self, case 37: case 38: case 39: - case 309: + case 310: return aarch64_ins_reglane (self, info, code, inst, errors); case 40: case 41: case 42: - case 229: case 230: - case 233: - case 269: + case 231: + case 234: case 270: - case 285: + case 271: case 286: case 287: case 288: @@ -734,6 +733,7 @@ aarch64_insert_operand (const aarch64_operand *self, case 299: case 300: case 301: + case 302: return aarch64_ins_simple_index (self, info, code, inst, errors); case 43: return aarch64_ins_reglist (self, info, code, inst, errors); @@ -772,9 +772,8 @@ aarch64_insert_operand (const aarch64_operand *self, case 92: case 118: case 122: - case 180: - case 182: - case 203: + case 181: + case 183: case 204: case 205: case 206: @@ -783,14 +782,15 @@ aarch64_insert_operand (const aarch64_operand *self, case 209: case 210: case 211: - case 271: - case 302: + case 212: + case 272: case 303: - case 305: + case 304: case 306: - case 308: - case 313: + case 307: + case 309: case 314: + case 315: return aarch64_ins_imm (self, info, code, inst, errors); case 52: case 53: @@ -800,10 +800,10 @@ aarch64_insert_operand (const aarch64_operand *self, case 56: return aarch64_ins_advsimd_imm_modified (self, info, code, inst, errors); case 60: - case 170: + case 171: return aarch64_ins_fpimm (self, info, code, inst, errors); case 78: - case 178: + case 179: return aarch64_ins_limm (self, info, code, inst, errors); case 79: return aarch64_ins_aimm (self, info, code, inst, errors); @@ -813,11 +813,11 @@ aarch64_insert_operand (const aarch64_operand *self, return aarch64_ins_fbits (self, info, code, inst, errors); case 83: case 84: - case 175: + case 176: return aarch64_ins_imm_rotate2 (self, info, code, inst, errors); case 85: - case 174: - case 176: + case 175: + case 177: return aarch64_ins_imm_rotate1 (self, info, code, inst, errors); case 86: case 87: @@ -894,8 +894,8 @@ aarch64_insert_operand (const aarch64_operand *self, case 150: case 151: case 152: - return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst, errors); case 153: + return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst, errors); case 154: case 155: case 156: @@ -903,117 +903,115 @@ aarch64_insert_operand (const aarch64_operand *self, case 158: case 159: case 160: - return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst, errors); case 161: + return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst, errors); case 162: case 163: case 164: - return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst, errors); case 165: - return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst, errors); case 166: - return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst, errors); case 167: - return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst, errors); case 168: - return aarch64_ins_sve_aimm (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst, errors); case 169: + return aarch64_ins_sve_aimm (self, info, code, inst, errors); + case 170: return aarch64_ins_sve_asimm (self, info, code, inst, errors); - case 171: - return aarch64_ins_sve_float_half_one (self, info, code, inst, errors); case 172: - return aarch64_ins_sve_float_half_two (self, info, code, inst, errors); + return aarch64_ins_sve_float_half_one (self, info, code, inst, errors); case 173: + return aarch64_ins_sve_float_half_two (self, info, code, inst, errors); + case 174: return aarch64_ins_sve_float_zero_one (self, info, code, inst, errors); - case 177: + case 178: return aarch64_ins_inv_limm (self, info, code, inst, errors); - case 179: + case 180: return aarch64_ins_sve_limm_mov (self, info, code, inst, errors); - case 181: + case 182: return aarch64_ins_sve_scale (self, info, code, inst, errors); - case 197: case 198: case 199: - return aarch64_ins_sve_shlimm (self, info, code, inst, errors); case 200: + return aarch64_ins_sve_shlimm (self, info, code, inst, errors); case 201: case 202: - case 284: + case 203: + case 285: return aarch64_ins_sve_shrimm (self, info, code, inst, errors); - case 216: case 217: case 218: case 219: - return aarch64_ins_sme_za_vrs1 (self, info, code, inst, errors); case 220: + return aarch64_ins_sme_za_vrs1 (self, info, code, inst, errors); case 221: case 222: case 223: + case 224: return aarch64_ins_sme_za_vrs2 (self, info, code, inst, errors); - case 231: case 232: - case 234: + case 233: case 235: case 236: case 237: case 238: + case 239: return aarch64_ins_sve_quad_index (self, info, code, inst, errors); - case 240: case 241: - return aarch64_ins_sve_index (self, info, code, inst, errors); case 242: - case 244: - case 264: - case 315: - case 316: - case 317: - return aarch64_ins_sve_reglist (self, info, code, inst, errors); + return aarch64_ins_sve_index (self, info, code, inst, errors); + case 243: case 245: + case 265: + return aarch64_ins_sve_reglist (self, info, code, inst, errors); case 246: - case 249: + case 247: case 250: case 251: case 252: case 253: - case 263: - return aarch64_ins_sve_aligned_reglist (self, info, code, inst, errors); - case 247: case 254: + case 264: + return aarch64_ins_sve_aligned_reglist (self, info, code, inst, errors); + case 248: case 255: + case 256: return aarch64_ins_sve_strided_reglist (self, info, code, inst, errors); - case 259: - case 261: - case 272: - return aarch64_ins_sme_za_hv_tiles (self, info, code, inst, errors); case 260: case 262: - return aarch64_ins_sme_za_hv_tiles_range (self, info, code, inst, errors); case 273: + return aarch64_ins_sme_za_hv_tiles (self, info, code, inst, errors); + case 261: + case 263: + return aarch64_ins_sme_za_hv_tiles_range (self, info, code, inst, errors); case 274: case 275: case 276: case 277: case 278: case 279: - return aarch64_ins_sme_za_array (self, info, code, inst, errors); case 280: - return aarch64_ins_sme_addr_ri_u4xvl (self, info, code, inst, errors); + return aarch64_ins_sme_za_array (self, info, code, inst, errors); case 281: - return aarch64_ins_sme_sm_za (self, info, code, inst, errors); + return aarch64_ins_sme_addr_ri_u4xvl (self, info, code, inst, errors); case 282: - return aarch64_ins_sme_pred_reg_with_index (self, info, code, inst, errors); + return aarch64_ins_sme_sm_za (self, info, code, inst, errors); case 283: + return aarch64_ins_sme_pred_reg_with_index (self, info, code, inst, errors); + case 284: return aarch64_ins_plain_shrimm (self, info, code, inst, errors); - case 310: case 311: case 312: + case 313: return aarch64_ins_x0_to_x30 (self, info, code, inst, errors); + case 316: + case 317: case 318: case 319: - case 320: - case 321: return aarch64_ins_rcpc3_addr_opt_offset (self, info, code, inst, errors); - case 322: + case 320: return aarch64_ins_rcpc3_addr_offset (self, info, code, inst, errors); default: assert (0); abort (); } diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c index 0952728..95342ce 100644 --- a/opcodes/aarch64-dis-2.c +++ b/opcodes/aarch64-dis-2.c @@ -34443,7 +34443,6 @@ aarch64_extract_operand (const aarch64_operand *self, case 123: case 124: case 125: - case 183: case 184: case 185: case 186: @@ -34457,31 +34456,32 @@ aarch64_extract_operand (const aarch64_operand *self, case 194: case 195: case 196: - case 212: + case 197: case 213: case 214: case 215: - case 224: + case 216: case 225: case 226: case 227: case 228: - case 239: - case 243: - case 248: - case 256: + case 229: + case 240: + case 244: + case 249: case 257: case 258: - case 265: + case 259: case 266: case 267: case 268: + case 269: return aarch64_ext_regno (self, info, code, inst, errors); case 6: case 119: case 120: - case 304: - case 307: + case 305: + case 308: return aarch64_ext_none (self, info, code, inst, errors); case 11: return aarch64_ext_regrt_sysins (self, info, code, inst, errors); @@ -34501,17 +34501,16 @@ aarch64_extract_operand (const aarch64_operand *self, case 37: case 38: case 39: - case 309: + case 310: return aarch64_ext_reglane (self, info, code, inst, errors); case 40: case 41: case 42: - case 229: case 230: - case 233: - case 269: + case 231: + case 234: case 270: - case 285: + case 271: case 286: case 287: case 288: @@ -34528,6 +34527,7 @@ aarch64_extract_operand (const aarch64_operand *self, case 299: case 300: case 301: + case 302: return aarch64_ext_simple_index (self, info, code, inst, errors); case 43: return aarch64_ext_reglist (self, info, code, inst, errors); @@ -34567,9 +34567,8 @@ aarch64_extract_operand (const aarch64_operand *self, case 92: case 118: case 122: - case 180: - case 182: - case 203: + case 181: + case 183: case 204: case 205: case 206: @@ -34578,14 +34577,15 @@ aarch64_extract_operand (const aarch64_operand *self, case 209: case 210: case 211: - case 271: - case 302: + case 212: + case 272: case 303: - case 305: + case 304: case 306: - case 308: - case 313: + case 307: + case 309: case 314: + case 315: return aarch64_ext_imm (self, info, code, inst, errors); case 52: case 53: @@ -34597,10 +34597,10 @@ aarch64_extract_operand (const aarch64_operand *self, case 57: return aarch64_ext_shll_imm (self, info, code, inst, errors); case 60: - case 170: + case 171: return aarch64_ext_fpimm (self, info, code, inst, errors); case 78: - case 178: + case 179: return aarch64_ext_limm (self, info, code, inst, errors); case 79: return aarch64_ext_aimm (self, info, code, inst, errors); @@ -34610,11 +34610,11 @@ aarch64_extract_operand (const aarch64_operand *self, return aarch64_ext_fbits (self, info, code, inst, errors); case 83: case 84: - case 175: + case 176: return aarch64_ext_imm_rotate2 (self, info, code, inst, errors); case 85: - case 174: - case 176: + case 175: + case 177: return aarch64_ext_imm_rotate1 (self, info, code, inst, errors); case 86: case 87: @@ -34691,8 +34691,8 @@ aarch64_extract_operand (const aarch64_operand *self, case 150: case 151: case 152: - return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst, errors); case 153: + return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst, errors); case 154: case 155: case 156: @@ -34700,118 +34700,115 @@ aarch64_extract_operand (const aarch64_operand *self, case 158: case 159: case 160: - return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst, errors); case 161: + return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst, errors); case 162: case 163: case 164: - return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst, errors); case 165: - return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst, errors); + return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst, errors); case 166: - return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst, errors); + return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst, errors); case 167: - return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst, errors); + return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst, errors); case 168: - return aarch64_ext_sve_aimm (self, info, code, inst, errors); + return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst, errors); case 169: + return aarch64_ext_sve_aimm (self, info, code, inst, errors); + case 170: return aarch64_ext_sve_asimm (self, info, code, inst, errors); - case 171: - return aarch64_ext_sve_float_half_one (self, info, code, inst, errors); case 172: - return aarch64_ext_sve_float_half_two (self, info, code, inst, errors); + return aarch64_ext_sve_float_half_one (self, info, code, inst, errors); case 173: + return aarch64_ext_sve_float_half_two (self, info, code, inst, errors); + case 174: return aarch64_ext_sve_float_zero_one (self, info, code, inst, errors); - case 177: + case 178: return aarch64_ext_inv_limm (self, info, code, inst, errors); - case 179: + case 180: return aarch64_ext_sve_limm_mov (self, info, code, inst, errors); - case 181: + case 182: return aarch64_ext_sve_scale (self, info, code, inst, errors); - case 197: case 198: case 199: - return aarch64_ext_sve_shlimm (self, info, code, inst, errors); case 200: + return aarch64_ext_sve_shlimm (self, info, code, inst, errors); case 201: case 202: - case 284: + case 203: + case 285: return aarch64_ext_sve_shrimm (self, info, code, inst, errors); - case 216: case 217: case 218: case 219: - return aarch64_ext_sme_za_vrs1 (self, info, code, inst, errors); case 220: + return aarch64_ext_sme_za_vrs1 (self, info, code, inst, errors); case 221: case 222: case 223: + case 224: return aarch64_ext_sme_za_vrs2 (self, info, code, inst, errors); - case 231: case 232: - case 234: + case 233: case 235: case 236: case 237: case 238: + case 239: return aarch64_ext_sve_quad_index (self, info, code, inst, errors); - case 240: case 241: - return aarch64_ext_sve_index (self, info, code, inst, errors); case 242: - case 244: - case 264: - return aarch64_ext_sve_reglist (self, info, code, inst, errors); + return aarch64_ext_sve_index (self, info, code, inst, errors); + case 243: case 245: + case 265: + return aarch64_ext_sve_reglist (self, info, code, inst, errors); case 246: - case 249: + case 247: case 250: case 251: case 252: case 253: - case 263: - return aarch64_ext_sve_aligned_reglist (self, info, code, inst, errors); - case 247: case 254: + case 264: + return aarch64_ext_sve_aligned_reglist (self, info, code, inst, errors); + case 248: case 255: + case 256: return aarch64_ext_sve_strided_reglist (self, info, code, inst, errors); - case 259: - case 261: - case 272: - return aarch64_ext_sme_za_hv_tiles (self, info, code, inst, errors); case 260: case 262: - return aarch64_ext_sme_za_hv_tiles_range (self, info, code, inst, errors); case 273: + return aarch64_ext_sme_za_hv_tiles (self, info, code, inst, errors); + case 261: + case 263: + return aarch64_ext_sme_za_hv_tiles_range (self, info, code, inst, errors); case 274: case 275: case 276: case 277: case 278: case 279: - return aarch64_ext_sme_za_array (self, info, code, inst, errors); case 280: - return aarch64_ext_sme_addr_ri_u4xvl (self, info, code, inst, errors); + return aarch64_ext_sme_za_array (self, info, code, inst, errors); case 281: - return aarch64_ext_sme_sm_za (self, info, code, inst, errors); + return aarch64_ext_sme_addr_ri_u4xvl (self, info, code, inst, errors); case 282: - return aarch64_ext_sme_pred_reg_with_index (self, info, code, inst, errors); + return aarch64_ext_sme_sm_za (self, info, code, inst, errors); case 283: + return aarch64_ext_sme_pred_reg_with_index (self, info, code, inst, errors); + case 284: return aarch64_ext_plain_shrimm (self, info, code, inst, errors); - case 310: case 311: case 312: + case 313: return aarch64_ext_x0_to_x30 (self, info, code, inst, errors); - case 315: case 316: case 317: - return aarch64_ext_sve_reglist_zt (self, info, code, inst, errors); case 318: case 319: - case 320: - case 321: return aarch64_ext_rcpc3_addr_opt_offset (self, info, code, inst, errors); - case 322: + case 320: return aarch64_ext_rcpc3_addr_offset (self, info, code, inst, errors); default: assert (0); abort (); } diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c index 3d067d4..d42fea4 100644 --- a/opcodes/aarch64-opc-2.c +++ b/opcodes/aarch64-opc-2.c @@ -172,6 +172,7 @@ const struct aarch64_operand aarch64_operands[] = {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL1", (1 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL2", (2 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL3", (3 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"}, + {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL4", (4 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZX", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn,FLD_Rm}, "vector of address with a scalar register offset"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_LSL1", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"}, @@ -339,9 +340,6 @@ const struct aarch64_operand aarch64_operands[] = {AARCH64_OPND_CLASS_INT_REG, "MOPS_WB_Rd", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an integer register with writeback"}, {AARCH64_OPND_CLASS_IMMEDIATE, "CSSC_SIMM8", OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_CSSC_imm8}, "an 8-bit signed immediate"}, {AARCH64_OPND_CLASS_IMMEDIATE, "CSSC_UIMM8", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_CSSC_imm8}, "an 8-bit unsigned immediate"}, - {AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zt2", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zt}, "a list of 2 SVE vector registers"}, - {AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zt3", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zt}, "a list of 3 SVE vector registers"}, - {AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zt4", 4 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zt}, "a list of 4 SVE vector registers"}, {AARCH64_OPND_CLASS_ADDRESS, "RCPC3_ADDR_OPT_POSTIND", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_opc2}, "an address with post-incrementing by ammount of loaded bytes"}, {AARCH64_OPND_CLASS_ADDRESS, "RCPC3_ADDR_OPT_PREIND_WB", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_opc2}, "an address with pre-incrementing with write-back by ammount of stored bytes"}, {AARCH64_OPND_CLASS_ADDRESS, "RCPC3_ADDR_POSTIND", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "an address with post-incrementing by ammount of loaded bytes"}, diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c index 6393474..72eea59 100644 --- a/opcodes/aarch64-opc.c +++ b/opcodes/aarch64-opc.c @@ -1929,9 +1929,6 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_SME_Znx2: case AARCH64_OPND_SME_Znx2_BIT_INDEX: case AARCH64_OPND_SME_Znx4: - case AARCH64_OPND_SME_Zt2: - case AARCH64_OPND_SME_Zt3: - case AARCH64_OPND_SME_Zt4: num = get_operand_specific_data (&aarch64_operands[type]); if (!check_reglist (opnd, mismatch_detail, idx, num, 1)) return 0; @@ -2475,6 +2472,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_SVE_ADDR_RX_LSL1: case AARCH64_OPND_SVE_ADDR_RX_LSL2: case AARCH64_OPND_SVE_ADDR_RX_LSL3: + case AARCH64_OPND_SVE_ADDR_RX_LSL4: case AARCH64_OPND_SVE_ADDR_RZ: case AARCH64_OPND_SVE_ADDR_RZ_LSL1: case AARCH64_OPND_SVE_ADDR_RZ_LSL2: @@ -3768,10 +3766,7 @@ print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd, /* The hyphenated form is preferred for disassembly if there is more than one register in the list, and the register numbers are monotonically increasing in increments of one. */ - if (stride == 1 && num_regs > 1 - && ((opnd->type != AARCH64_OPND_SME_Zt2) - && (opnd->type != AARCH64_OPND_SME_Zt3) - && (opnd->type != AARCH64_OPND_SME_Zt4))) + if (stride == 1 && num_regs > 1) if (opnd->qualifier == AARCH64_OPND_QLF_NIL) snprintf (buf, size, "{%s-%s}%s", style_reg (styler, "%s%d", prefix, first_reg), @@ -3877,36 +3872,51 @@ print_register_offset_address (char *buf, size_t size, bool print_amount_p = true; const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name; - if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B - || !opnd->shifter.amount_present)) + /* This is the case where offset is the optional argument and the optional + argument is ignored in the disassembly. */ + if (opnd->type == AARCH64_OPND_SVE_ADDR_ZX && offset != NULL + && strcmp (offset,"xzr") == 0) { - /* Not print the shift/extend amount when the amount is zero and - when it is not the special case of 8-bit load/store instruction. */ - print_amount_p = false; - /* Likewise, no need to print the shift operator LSL in such a - situation. */ - if (opnd->shifter.kind == AARCH64_MOD_LSL) - print_extend_p = false; + /* Example: [.S{, }]. + When the assembly is [Z0.S, XZR] or [Z0.S], Xm is XZR in both the cases + and the preferred disassembly is [Z0.S], ignoring the optional Xm. */ + snprintf (buf, size, "[%s]", style_reg (styler, base)); } - - /* Prepare for the extend/shift. */ - if (print_extend_p) + else { - if (print_amount_p) - snprintf (tb, sizeof (tb), ", %s %s", - style_sub_mnem (styler, shift_name), - style_imm (styler, "#%" PRIi64, - /* PR 21096: The %100 is to silence a warning about possible truncation. */ - (opnd->shifter.amount % 100))); + if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B + || !opnd->shifter.amount_present)) + { + /* Not print the shift/extend amount when the amount is zero and + when it is not the special case of 8-bit load/store + instruction. */ + print_amount_p = false; + /* Likewise, no need to print the shift operator LSL in such a + situation. */ + if (opnd->shifter.kind == AARCH64_MOD_LSL) + print_extend_p = false; + } + + /* Prepare for the extend/shift. */ + if (print_extend_p) + { + if (print_amount_p) + snprintf (tb, sizeof (tb), ", %s %s", + style_sub_mnem (styler, shift_name), + style_imm (styler, "#%" PRIi64, + /* PR 21096: The %100 is to silence a warning about possible + truncation. */ + (opnd->shifter.amount % 100))); + else + snprintf (tb, sizeof (tb), ", %s", + style_sub_mnem (styler, shift_name)); + } else - snprintf (tb, sizeof (tb), ", %s", - style_sub_mnem (styler, shift_name)); - } - else - tb[0] = '\0'; + tb[0] = '\0'; - snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base), - style_reg (styler, offset), tb); + snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base), + style_reg (styler, offset), tb); + } } /* Print ZA tiles from imm8 in ZERO instruction. @@ -4257,9 +4267,6 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_SME_Znx4: case AARCH64_OPND_SME_Ztx2_STRIDED: case AARCH64_OPND_SME_Ztx4_STRIDED: - case AARCH64_OPND_SME_Zt2: - case AARCH64_OPND_SME_Zt3: - case AARCH64_OPND_SME_Zt4: print_register_list (buf, size, opnd, "z", styler); break; @@ -4724,6 +4731,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_SVE_ADDR_RX_LSL1: case AARCH64_OPND_SVE_ADDR_RX_LSL2: case AARCH64_OPND_SVE_ADDR_RX_LSL3: + case AARCH64_OPND_SVE_ADDR_RX_LSL4: print_register_offset_address (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1), get_offset_int_reg_name (opnd), styler); diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h index 8892166..8246de5 100644 --- a/opcodes/aarch64-tbl.h +++ b/opcodes/aarch64-tbl.h @@ -1841,11 +1841,11 @@ { \ QLF3(S_S,P_Z,S_S), \ } -#define OP_SVE_SZS_QD \ +#define OP_SVE_QZD \ { \ QLF3(S_Q,P_Z,S_D), \ } -#define OP_SVE_SUS_QD \ +#define OP_SVE_QUD \ { \ QLF3(S_Q,NIL,S_D), \ } @@ -6642,21 +6642,23 @@ const struct aarch64_opcode aarch64_opcode_table[] = SVE2p1_INSN("dupq",0x05202400, 0xffe0fc00, sve_index, 0, OP2 (SVE_Zd, SVE_Zn_5_INDEX), OP_SVE_VV_BHSD, 0, 0), SVE2p1_INSNC("extq",0x05602400, 0xfff0fc00, sve_misc, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zm_5, SVE_UIMM4), OP_SVE_BBBU, 0, C_SCAN_MOVPRFX, 1), - SVE2p1_INSNC("ld1q",0xc400a000, 0xffe0e000, sve_misc, 0, OP3 (SVE_Zt, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SZS_QD, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("ld2q",0xa490e000, 0xfff0e000, sve_misc, 0, OP3 (SME_Zt2, SVE_Pg3, SVE_ADDR_RI_S4x2xVL), OP_SVE_QZU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("ld3q",0xa510e000, 0xfff0e000, sve_misc, 0, OP3 (SME_Zt3, SVE_Pg3, SVE_ADDR_RI_S4x3xVL), OP_SVE_QZU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("ld4q",0xa590e000, 0xfff0e000, sve_misc, 0, OP3 (SME_Zt4, SVE_Pg3, SVE_ADDR_RI_S4x4xVL), OP_SVE_QZU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("ld2q",0xa4a08000, 0xffe0e000, sve_misc, 0, OP3 (SME_Zt2, SVE_Pg3, SVE_ADDR_RR_LSL4), OP_SVE_QZU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("ld3q",0xa5208000, 0xffe0e000, sve_misc, 0, OP3 (SME_Zt3, SVE_Pg3, SVE_ADDR_RR_LSL4), OP_SVE_QZU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("ld4q",0xa5a08000, 0xffe0e000, sve_misc, 0, OP3 (SME_Zt4, SVE_Pg3, SVE_ADDR_RR_LSL4), OP_SVE_QZU, 0, C_SCAN_MOVPRFX, 0), - - SVE2p1_INSNC("st1q",0xe4202000, 0xffe0e000, sve_misc, 0, OP3 (SVE_Zt, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SUS_QD, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("st2q",0xe4400000, 0xfff0e000, sve_misc, 0, OP3 (SME_Zt2, SVE_Pg3, SVE_ADDR_RI_S4x2xVL), OP_SVE_QUU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("st3q",0xe4800000, 0xfff0e000, sve_misc, 0, OP3 (SME_Zt3, SVE_Pg3, SVE_ADDR_RI_S4x3xVL), OP_SVE_QUU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("st4q",0xe4c00000, 0xfff0e000, sve_misc, 0, OP3 (SME_Zt4, SVE_Pg3, SVE_ADDR_RI_S4x4xVL), OP_SVE_QUU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("st2q",0xe4600000, 0xffe0e000, sve_misc, 0, OP3 (SME_Zt2, SVE_Pg3, SVE_ADDR_RR_LSL4), OP_SVE_QUU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("st3q",0xe4a00000, 0xffe0e000, sve_misc, 0, OP3 (SME_Zt3, SVE_Pg3, SVE_ADDR_RR_LSL4), OP_SVE_QUU, 0, C_SCAN_MOVPRFX, 0), - SVE2p1_INSNC("st4q",0xe4e00000, 0xffe0e000, sve_misc, 0, OP3 (SME_Zt4, SVE_Pg3, SVE_ADDR_RR_LSL4), OP_SVE_QUU, 0, C_SCAN_MOVPRFX, 0), + + SVE2p1_INSN("ld1q",0xc400a000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_QZD, F_OD (1), 0), + SVE2p1_INSN("ld2q",0xa490e000, 0xfff0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RI_S4x2xVL), OP_SVE_QZU, F_OD (2), 0), + SVE2p1_INSN("ld3q",0xa510e000, 0xfff0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RI_S4x3xVL), OP_SVE_QZU, F_OD (3), 0), + SVE2p1_INSN("ld4q",0xa590e000, 0xfff0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RI_S4x4xVL), OP_SVE_QZU, F_OD (4), 0), + SVE2p1_INSN("ld2q",0xa4a08000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RX_LSL4), OP_SVE_QZU, F_OD (2), 0), + SVE2p1_INSN("ld3q",0xa5208000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RX_LSL4), OP_SVE_QZU, F_OD (3), 0), + SVE2p1_INSN("ld4q",0xa5a08000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RX_LSL4), OP_SVE_QZU, F_OD (4), 0), + + SVE2p1_INSN("st1q",0xe4202000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_QUD, F_OD (1), 0), + SVE2p1_INSN("st2q",0xe4400000, 0xfff0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RI_S4x2xVL), OP_SVE_QUU, F_OD (2), 0), + SVE2p1_INSN("st3q",0xe4800000, 0xfff0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RI_S4x3xVL), OP_SVE_QUU, F_OD (3), 0), + SVE2p1_INSN("st4q",0xe4c00000, 0xfff0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RI_S4x4xVL), OP_SVE_QUU, F_OD (4), 0), + SVE2p1_INSN("st2q",0xe4600000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RX_LSL4), OP_SVE_QUU, F_OD (2), 0), + SVE2p1_INSN("st3q",0xe4a00000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RX_LSL4), OP_SVE_QUU, F_OD (3), 0), + SVE2p1_INSN("st4q",0xe4e00000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_RX_LSL4), OP_SVE_QUU, F_OD (4), 0), + FP8_INSN("bf1cvtl", 0x2ea17800, 0xfffffc00, asimdmisc, OP2 (Vd, Vn), QL_V2FP8B8H, 0), FP8_INSN("bf1cvtl2", 0x6ea17800, 0xfffffc00, asimdmisc, OP2 (Vd, Vn), QL_V28H16B, 0), FP8_INSN("bf2cvtl", 0x2ee17800, 0xfffffc00, asimdmisc, OP2 (Vd, Vn), QL_V2FP8B8H, 0), @@ -7106,6 +7108,9 @@ const struct aarch64_opcode aarch64_opcode_table[] = Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RX_LSL3", \ (3 << OPD_F_OD_LSB) | OPD_F_NO_ZR, F(FLD_Rn,FLD_Rm), \ "an address with a scalar register offset") \ + Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RX_LSL4", \ + (4 << OPD_F_OD_LSB) | OPD_F_NO_ZR, F(FLD_Rn,FLD_Rm), \ + "an address with a scalar register offset") \ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_ZX", \ 0 << OPD_F_OD_LSB , F(FLD_SVE_Zn,FLD_Rm), \ "vector of address with a scalar register offset") \ @@ -7493,15 +7498,6 @@ const struct aarch64_opcode aarch64_opcode_table[] = "an 8-bit signed immediate") \ Y(IMMEDIATE, imm, "CSSC_UIMM8", 0, F(FLD_CSSC_imm8), \ "an 8-bit unsigned immediate") \ - X(SVE_REGLIST, ins_sve_reglist, ext_sve_reglist_zt, "SME_Zt2", \ - 2 << OPD_F_OD_LSB, F(FLD_SVE_Zt), \ - "a list of 2 SVE vector registers") \ - X(SVE_REGLIST, ins_sve_reglist, ext_sve_reglist_zt, "SME_Zt3", \ - 3 << OPD_F_OD_LSB, F(FLD_SVE_Zt), \ - "a list of 3 SVE vector registers") \ - X(SVE_REGLIST, ins_sve_reglist, ext_sve_reglist_zt, "SME_Zt4", \ - 4 << OPD_F_OD_LSB, F(FLD_SVE_Zt), \ - "a list of 4 SVE vector registers") \ X(ADDRESS, ins_rcpc3_addr_opt_offset, ext_rcpc3_addr_opt_offset, \ "RCPC3_ADDR_OPT_POSTIND", 0, F(FLD_opc2), \ "an address with post-incrementing by ammount of loaded bytes") \ -- cgit v1.1