diff options
author | Tim Newsome <tim@sifive.com> | 2020-02-14 14:54:05 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-02-14 14:54:05 -0800 |
commit | 95462a8a35556259bd4555362f5367f8b48012f1 (patch) | |
tree | 08498c249fe1c22a6cdb54dd2e2743b2af450587 | |
parent | 7cb8843794a258380b7c37509e5c693977675b2a (diff) | |
download | riscv-openocd-95462a8a35556259bd4555362f5367f8b48012f1.zip riscv-openocd-95462a8a35556259bd4555362f5367f8b48012f1.tar.gz riscv-openocd-95462a8a35556259bd4555362f5367f8b48012f1.tar.bz2 |
Add support for vector register access (#448)
* WIP
Change-Id: I0264a73b7f7d2ce89cc0b80692dbf81d9cdcc2fd
* Reading v* registers appears to work.
Can't really test it though, because gdb doesn't print them right.
Change-Id: I8d66339371c564a493d32f15c3d114b738a455c5
* Total hack to communicate registers to gdb.
Change-Id: Id06c819675f2a5bcaf751e322d95a7d71c633765
* Implement writing vector registers.
Fixed reading vector registers.
Change-Id: I8f06aa5ee5020b3213a4f68644c205c9d6b9d214
* Show gdb the actual size of the vector registers.
This length may be different per hart.
Change-Id: I92e95383da82ee7a5c995822a53d51b1ea933493
* Remove outdated todo comment.
Change-Id: Ic9158b002858f0d15a6452773b095aa5f4501128
* Removed TODO comment.
Filed #449 to track this.
Change-Id: I5277b19e545df2024f34cda39158ddf7d0d89d47
* Nicely handle some errors reading/writing V regs.
Change-Id: Ia7bb63a5f9433d9f7b46496b2c0994864cfc4a09
-rw-r--r-- | src/target/riscv/encoding.h | 1223 | ||||
-rw-r--r-- | src/target/riscv/gdb_regs.h | 18 | ||||
-rw-r--r-- | src/target/riscv/opcodes.h | 30 | ||||
-rw-r--r-- | src/target/riscv/program.c | 3 | ||||
-rw-r--r-- | src/target/riscv/riscv-013.c | 272 | ||||
-rw-r--r-- | src/target/riscv/riscv.c | 261 | ||||
-rw-r--r-- | src/target/riscv/riscv.h | 20 |
7 files changed, 1776 insertions, 51 deletions
diff --git a/src/target/riscv/encoding.h b/src/target/riscv/encoding.h index e214c0c..6624d18 100644 --- a/src/target/riscv/encoding.h +++ b/src/target/riscv/encoding.h @@ -22,6 +22,7 @@ #define MSTATUS_TVM 0x00100000 #define MSTATUS_TW 0x00200000 #define MSTATUS_TSR 0x00400000 +#define MSTATUS_VS 0x01800000 #define MSTATUS32_SD 0x80000000 #define MSTATUS_UXL 0x0000000300000000 #define MSTATUS_SXL 0x0000000C00000000 @@ -36,6 +37,7 @@ #define SSTATUS_XS 0x00018000 #define SSTATUS_SUM 0x00040000 #define SSTATUS_MXR 0x00080000 +#define SSTATUS_VS 0x01800000 #define SSTATUS32_SD 0x80000000 #define SSTATUS_UXL 0x0000000300000000 #define SSTATUS64_SD 0x8000000000000000 @@ -191,7 +193,6 @@ #ifdef __GNUC__ -/* #define read_csr(reg) ({ unsigned long __tmp; \ asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \ __tmp; }) @@ -210,7 +211,6 @@ #define clear_csr(reg, bit) ({ unsigned long __tmp; \ asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ __tmp; }) - */ #define rdtime() read_csr(time) #define rdcycle() read_csr(cycle) @@ -223,7 +223,7 @@ #endif #endif -/* Automatically generated by parse-opcodes. */ +/* Automatically generated by parse_opcodes. */ #ifndef RISCV_ENCODING_H #define RISCV_ENCODING_H #define MATCH_BEQ 0x63 @@ -426,6 +426,10 @@ #define MASK_CSRRSI 0x707f #define MATCH_CSRRCI 0x7073 #define MASK_CSRRCI 0x707f +#define MATCH_HFENCE_VVMA 0x22000073 +#define MASK_HFENCE_VVMA 0xfe007fff +#define MATCH_HFENCE_GVMA 0x62000073 +#define MASK_HFENCE_GVMA 0xfe007fff #define MATCH_FADD_S 0x53 #define MASK_FADD_S 0xfe00007f #define MATCH_FSUB_S 0x8000053 @@ -628,6 +632,12 @@ #define MASK_C_JALR 0xf07f #define MATCH_C_EBREAK 0x9002 #define MASK_C_EBREAK 0xffff +#define MATCH_C_SRLI_RV32 0x8001 +#define MASK_C_SRLI_RV32 0xfc03 +#define MATCH_C_SRAI_RV32 0x8401 +#define MASK_C_SRAI_RV32 0xfc03 +#define MATCH_C_SLLI_RV32 0x2 +#define MASK_C_SLLI_RV32 0xf003 #define MATCH_C_LD 0x6000 #define MASK_C_LD 0xe003 #define MATCH_C_SD 0xe000 @@ -638,6 +648,14 @@ #define MASK_C_LDSP 0xe003 #define MATCH_C_SDSP 0xe002 #define MASK_C_SDSP 0xe003 +#define MATCH_C_LQ 0x2000 +#define MASK_C_LQ 0xe003 +#define MATCH_C_SQ 0xa000 +#define MASK_C_SQ 0xe003 +#define MATCH_C_LQSP 0x2002 +#define MASK_C_LQSP 0xe003 +#define MATCH_C_SQSP 0xa002 +#define MASK_C_SQSP 0xe003 #define MATCH_C_ADDI4SPN 0x0 #define MASK_C_ADDI4SPN 0xe003 #define MATCH_C_FLD 0x2000 @@ -750,9 +768,756 @@ #define MASK_CUSTOM3_RD_RS1 0x707f #define MATCH_CUSTOM3_RD_RS1_RS2 0x707b #define MASK_CUSTOM3_RD_RS1_RS2 0x707f +#define MATCH_VSETVLI 0x7057 +#define MASK_VSETVLI 0x8000707f +#define MATCH_VSETVL 0x80007057 +#define MASK_VSETVL 0xfe00707f +#define MATCH_VLB_V 0x10000007 +#define MASK_VLB_V 0x1df0707f +#define MATCH_VLH_V 0x10005007 +#define MASK_VLH_V 0x1df0707f +#define MATCH_VLW_V 0x10006007 +#define MASK_VLW_V 0x1df0707f +#define MATCH_VLE_V 0x7007 +#define MASK_VLE_V 0x1df0707f +#define MATCH_VLBU_V 0x7 +#define MASK_VLBU_V 0x1df0707f +#define MATCH_VLHU_V 0x5007 +#define MASK_VLHU_V 0x1df0707f +#define MATCH_VLWU_V 0x6007 +#define MASK_VLWU_V 0x1df0707f +#define MATCH_VSB_V 0x27 +#define MASK_VSB_V 0x1df0707f +#define MATCH_VSH_V 0x5027 +#define MASK_VSH_V 0x1df0707f +#define MATCH_VSW_V 0x6027 +#define MASK_VSW_V 0x1df0707f +#define MATCH_VSE_V 0x7027 +#define MASK_VSE_V 0x1df0707f +#define MATCH_VLSB_V 0x18000007 +#define MASK_VLSB_V 0x1c00707f +#define MATCH_VLSH_V 0x18005007 +#define MASK_VLSH_V 0x1c00707f +#define MATCH_VLSW_V 0x18006007 +#define MASK_VLSW_V 0x1c00707f +#define MATCH_VLSE_V 0x8007007 +#define MASK_VLSE_V 0x1c00707f +#define MATCH_VLSBU_V 0x8000007 +#define MASK_VLSBU_V 0x1c00707f +#define MATCH_VLSHU_V 0x8005007 +#define MASK_VLSHU_V 0x1c00707f +#define MATCH_VLSWU_V 0x8006007 +#define MASK_VLSWU_V 0x1c00707f +#define MATCH_VSSB_V 0x8000027 +#define MASK_VSSB_V 0x1c00707f +#define MATCH_VSSH_V 0x8005027 +#define MASK_VSSH_V 0x1c00707f +#define MATCH_VSSW_V 0x8006027 +#define MASK_VSSW_V 0x1c00707f +#define MATCH_VSSE_V 0x8007027 +#define MASK_VSSE_V 0x1c00707f +#define MATCH_VLXB_V 0x1c000007 +#define MASK_VLXB_V 0x1c00707f +#define MATCH_VLXH_V 0x1c005007 +#define MASK_VLXH_V 0x1c00707f +#define MATCH_VLXW_V 0x1c006007 +#define MASK_VLXW_V 0x1c00707f +#define MATCH_VLXE_V 0xc007007 +#define MASK_VLXE_V 0x1c00707f +#define MATCH_VLXBU_V 0xc000007 +#define MASK_VLXBU_V 0x1c00707f +#define MATCH_VLXHU_V 0xc005007 +#define MASK_VLXHU_V 0x1c00707f +#define MATCH_VLXWU_V 0xc006007 +#define MASK_VLXWU_V 0x1c00707f +#define MATCH_VSXB_V 0xc000027 +#define MASK_VSXB_V 0x1c00707f +#define MATCH_VSXH_V 0xc005027 +#define MASK_VSXH_V 0x1c00707f +#define MATCH_VSXW_V 0xc006027 +#define MASK_VSXW_V 0x1c00707f +#define MATCH_VSXE_V 0xc007027 +#define MASK_VSXE_V 0x1c00707f +#define MATCH_VSUXB_V 0x1c000027 +#define MASK_VSUXB_V 0xfc00707f +#define MATCH_VSUXH_V 0x1c005027 +#define MASK_VSUXH_V 0xfc00707f +#define MATCH_VSUXW_V 0x1c006027 +#define MASK_VSUXW_V 0xfc00707f +#define MATCH_VSUXE_V 0x1c007027 +#define MASK_VSUXE_V 0xfc00707f +#define MATCH_VLBFF_V 0x11000007 +#define MASK_VLBFF_V 0x1df0707f +#define MATCH_VLHFF_V 0x11005007 +#define MASK_VLHFF_V 0x1df0707f +#define MATCH_VLWFF_V 0x11006007 +#define MASK_VLWFF_V 0x1df0707f +#define MATCH_VLEFF_V 0x1007007 +#define MASK_VLEFF_V 0x1df0707f +#define MATCH_VLBUFF_V 0x1000007 +#define MASK_VLBUFF_V 0x1df0707f +#define MATCH_VLHUFF_V 0x1005007 +#define MASK_VLHUFF_V 0x1df0707f +#define MATCH_VLWUFF_V 0x1006007 +#define MASK_VLWUFF_V 0x1df0707f +#define MATCH_VL1R_V 0x2807007 +#define MASK_VL1R_V 0xfff0707f +#define MATCH_VS1R_V 0x2807027 +#define MASK_VS1R_V 0xfff0707f +#define MATCH_VFADD_VF 0x5057 +#define MASK_VFADD_VF 0xfc00707f +#define MATCH_VFSUB_VF 0x8005057 +#define MASK_VFSUB_VF 0xfc00707f +#define MATCH_VFMIN_VF 0x10005057 +#define MASK_VFMIN_VF 0xfc00707f +#define MATCH_VFMAX_VF 0x18005057 +#define MASK_VFMAX_VF 0xfc00707f +#define MATCH_VFSGNJ_VF 0x20005057 +#define MASK_VFSGNJ_VF 0xfc00707f +#define MATCH_VFSGNJN_VF 0x24005057 +#define MASK_VFSGNJN_VF 0xfc00707f +#define MATCH_VFSGNJX_VF 0x28005057 +#define MASK_VFSGNJX_VF 0xfc00707f +#define MATCH_VFMV_S_F 0x42005057 +#define MASK_VFMV_S_F 0xfff0707f +#define MATCH_VFMERGE_VFM 0x5c005057 +#define MASK_VFMERGE_VFM 0xfe00707f +#define MATCH_VFMV_V_F 0x5e005057 +#define MASK_VFMV_V_F 0xfff0707f +#define MATCH_VMFEQ_VF 0x60005057 +#define MASK_VMFEQ_VF 0xfc00707f +#define MATCH_VMFLE_VF 0x64005057 +#define MASK_VMFLE_VF 0xfc00707f +#define MATCH_VMFLT_VF 0x6c005057 +#define MASK_VMFLT_VF 0xfc00707f +#define MATCH_VMFNE_VF 0x70005057 +#define MASK_VMFNE_VF 0xfc00707f +#define MATCH_VMFGT_VF 0x74005057 +#define MASK_VMFGT_VF 0xfc00707f +#define MATCH_VMFGE_VF 0x7c005057 +#define MASK_VMFGE_VF 0xfc00707f +#define MATCH_VFDIV_VF 0x80005057 +#define MASK_VFDIV_VF 0xfc00707f +#define MATCH_VFRDIV_VF 0x84005057 +#define MASK_VFRDIV_VF 0xfc00707f +#define MATCH_VFMUL_VF 0x90005057 +#define MASK_VFMUL_VF 0xfc00707f +#define MATCH_VFRSUB_VF 0x9c005057 +#define MASK_VFRSUB_VF 0xfc00707f +#define MATCH_VFMADD_VF 0xa0005057 +#define MASK_VFMADD_VF 0xfc00707f +#define MATCH_VFNMADD_VF 0xa4005057 +#define MASK_VFNMADD_VF 0xfc00707f +#define MATCH_VFMSUB_VF 0xa8005057 +#define MASK_VFMSUB_VF 0xfc00707f +#define MATCH_VFNMSUB_VF 0xac005057 +#define MASK_VFNMSUB_VF 0xfc00707f +#define MATCH_VFMACC_VF 0xb0005057 +#define MASK_VFMACC_VF 0xfc00707f +#define MATCH_VFNMACC_VF 0xb4005057 +#define MASK_VFNMACC_VF 0xfc00707f +#define MATCH_VFMSAC_VF 0xb8005057 +#define MASK_VFMSAC_VF 0xfc00707f +#define MATCH_VFNMSAC_VF 0xbc005057 +#define MASK_VFNMSAC_VF 0xfc00707f +#define MATCH_VFWADD_VF 0xc0005057 +#define MASK_VFWADD_VF 0xfc00707f +#define MATCH_VFWSUB_VF 0xc8005057 +#define MASK_VFWSUB_VF 0xfc00707f +#define MATCH_VFWADD_WF 0xd0005057 +#define MASK_VFWADD_WF 0xfc00707f +#define MATCH_VFWSUB_WF 0xd8005057 +#define MASK_VFWSUB_WF 0xfc00707f +#define MATCH_VFWMUL_VF 0xe0005057 +#define MASK_VFWMUL_VF 0xfc00707f +#define MATCH_VFWMACC_VF 0xf0005057 +#define MASK_VFWMACC_VF 0xfc00707f +#define MATCH_VFWNMACC_VF 0xf4005057 +#define MASK_VFWNMACC_VF 0xfc00707f +#define MATCH_VFWMSAC_VF 0xf8005057 +#define MASK_VFWMSAC_VF 0xfc00707f +#define MATCH_VFWNMSAC_VF 0xfc005057 +#define MASK_VFWNMSAC_VF 0xfc00707f +#define MATCH_VFADD_VV 0x1057 +#define MASK_VFADD_VV 0xfc00707f +#define MATCH_VFREDSUM_VS 0x4001057 +#define MASK_VFREDSUM_VS 0xfc00707f +#define MATCH_VFSUB_VV 0x8001057 +#define MASK_VFSUB_VV 0xfc00707f +#define MATCH_VFREDOSUM_VS 0xc001057 +#define MASK_VFREDOSUM_VS 0xfc00707f +#define MATCH_VFMIN_VV 0x10001057 +#define MASK_VFMIN_VV 0xfc00707f +#define MATCH_VFREDMIN_VS 0x14001057 +#define MASK_VFREDMIN_VS 0xfc00707f +#define MATCH_VFMAX_VV 0x18001057 +#define MASK_VFMAX_VV 0xfc00707f +#define MATCH_VFREDMAX_VS 0x1c001057 +#define MASK_VFREDMAX_VS 0xfc00707f +#define MATCH_VFSGNJ_VV 0x20001057 +#define MASK_VFSGNJ_VV 0xfc00707f +#define MATCH_VFSGNJN_VV 0x24001057 +#define MASK_VFSGNJN_VV 0xfc00707f +#define MATCH_VFSGNJX_VV 0x28001057 +#define MASK_VFSGNJX_VV 0xfc00707f +#define MATCH_VFMV_F_S 0x42001057 +#define MASK_VFMV_F_S 0xfe0ff07f +#define MATCH_VMFEQ_VV 0x60001057 +#define MASK_VMFEQ_VV 0xfc00707f +#define MATCH_VMFLE_VV 0x64001057 +#define MASK_VMFLE_VV 0xfc00707f +#define MATCH_VMFLT_VV 0x6c001057 +#define MASK_VMFLT_VV 0xfc00707f +#define MATCH_VMFNE_VV 0x70001057 +#define MASK_VMFNE_VV 0xfc00707f +#define MATCH_VFDIV_VV 0x80001057 +#define MASK_VFDIV_VV 0xfc00707f +#define MATCH_VFMUL_VV 0x90001057 +#define MASK_VFMUL_VV 0xfc00707f +#define MATCH_VFMADD_VV 0xa0001057 +#define MASK_VFMADD_VV 0xfc00707f +#define MATCH_VFNMADD_VV 0xa4001057 +#define MASK_VFNMADD_VV 0xfc00707f +#define MATCH_VFMSUB_VV 0xa8001057 +#define MASK_VFMSUB_VV 0xfc00707f +#define MATCH_VFNMSUB_VV 0xac001057 +#define MASK_VFNMSUB_VV 0xfc00707f +#define MATCH_VFMACC_VV 0xb0001057 +#define MASK_VFMACC_VV 0xfc00707f +#define MATCH_VFNMACC_VV 0xb4001057 +#define MASK_VFNMACC_VV 0xfc00707f +#define MATCH_VFMSAC_VV 0xb8001057 +#define MASK_VFMSAC_VV 0xfc00707f +#define MATCH_VFNMSAC_VV 0xbc001057 +#define MASK_VFNMSAC_VV 0xfc00707f +#define MATCH_VFCVT_XU_F_V 0x88001057 +#define MASK_VFCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFCVT_X_F_V 0x88009057 +#define MASK_VFCVT_X_F_V 0xfc0ff07f +#define MATCH_VFCVT_F_XU_V 0x88011057 +#define MASK_VFCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFCVT_F_X_V 0x88019057 +#define MASK_VFCVT_F_X_V 0xfc0ff07f +#define MATCH_VFWCVT_XU_F_V 0x88041057 +#define MASK_VFWCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFWCVT_X_F_V 0x88049057 +#define MASK_VFWCVT_X_F_V 0xfc0ff07f +#define MATCH_VFWCVT_F_XU_V 0x88051057 +#define MASK_VFWCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFWCVT_F_X_V 0x88059057 +#define MASK_VFWCVT_F_X_V 0xfc0ff07f +#define MATCH_VFWCVT_F_F_V 0x88061057 +#define MASK_VFWCVT_F_F_V 0xfc0ff07f +#define MATCH_VFNCVT_XU_F_W 0x88081057 +#define MASK_VFNCVT_XU_F_W 0xfc0ff07f +#define MATCH_VFNCVT_X_F_W 0x88089057 +#define MASK_VFNCVT_X_F_W 0xfc0ff07f +#define MATCH_VFNCVT_F_XU_W 0x88091057 +#define MASK_VFNCVT_F_XU_W 0xfc0ff07f +#define MATCH_VFNCVT_F_X_W 0x88099057 +#define MASK_VFNCVT_F_X_W 0xfc0ff07f +#define MATCH_VFNCVT_F_F_W 0x880a1057 +#define MASK_VFNCVT_F_F_W 0xfc0ff07f +#define MATCH_VFNCVT_ROD_F_F_W 0x880a9057 +#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f +#define MATCH_VFSQRT_V 0x8c001057 +#define MASK_VFSQRT_V 0xfc0ff07f +#define MATCH_VFCLASS_V 0x8c081057 +#define MASK_VFCLASS_V 0xfc0ff07f +#define MATCH_VFWADD_VV 0xc0001057 +#define MASK_VFWADD_VV 0xfc00707f +#define MATCH_VFWREDSUM_VS 0xc4001057 +#define MASK_VFWREDSUM_VS 0xfc00707f +#define MATCH_VFWSUB_VV 0xc8001057 +#define MASK_VFWSUB_VV 0xfc00707f +#define MATCH_VFWREDOSUM_VS 0xcc001057 +#define MASK_VFWREDOSUM_VS 0xfc00707f +#define MATCH_VFWADD_WV 0xd0001057 +#define MASK_VFWADD_WV 0xfc00707f +#define MATCH_VFWSUB_WV 0xd8001057 +#define MASK_VFWSUB_WV 0xfc00707f +#define MATCH_VFWMUL_VV 0xe0001057 +#define MASK_VFWMUL_VV 0xfc00707f +#define MATCH_VFDOT_VV 0xe4001057 +#define MASK_VFDOT_VV 0xfc00707f +#define MATCH_VFWMACC_VV 0xf0001057 +#define MASK_VFWMACC_VV 0xfc00707f +#define MATCH_VFWNMACC_VV 0xf4001057 +#define MASK_VFWNMACC_VV 0xfc00707f +#define MATCH_VFWMSAC_VV 0xf8001057 +#define MASK_VFWMSAC_VV 0xfc00707f +#define MATCH_VFWNMSAC_VV 0xfc001057 +#define MASK_VFWNMSAC_VV 0xfc00707f +#define MATCH_VADD_VX 0x4057 +#define MASK_VADD_VX 0xfc00707f +#define MATCH_VSUB_VX 0x8004057 +#define MASK_VSUB_VX 0xfc00707f +#define MATCH_VRSUB_VX 0xc004057 +#define MASK_VRSUB_VX 0xfc00707f +#define MATCH_VMINU_VX 0x10004057 +#define MASK_VMINU_VX 0xfc00707f +#define MATCH_VMIN_VX 0x14004057 +#define MASK_VMIN_VX 0xfc00707f +#define MATCH_VMAXU_VX 0x18004057 +#define MASK_VMAXU_VX 0xfc00707f +#define MATCH_VMAX_VX 0x1c004057 +#define MASK_VMAX_VX 0xfc00707f +#define MATCH_VAND_VX 0x24004057 +#define MASK_VAND_VX 0xfc00707f +#define MATCH_VOR_VX 0x28004057 +#define MASK_VOR_VX 0xfc00707f +#define MATCH_VXOR_VX 0x2c004057 +#define MASK_VXOR_VX 0xfc00707f +#define MATCH_VRGATHER_VX 0x30004057 +#define MASK_VRGATHER_VX 0xfc00707f +#define MATCH_VSLIDEUP_VX 0x38004057 +#define MASK_VSLIDEUP_VX 0xfc00707f +#define MATCH_VSLIDEDOWN_VX 0x3c004057 +#define MASK_VSLIDEDOWN_VX 0xfc00707f +#define MATCH_VADC_VXM 0x40004057 +#define MASK_VADC_VXM 0xfe00707f +#define MATCH_VMADC_VXM 0x44004057 +#define MASK_VMADC_VXM 0xfc00707f +#define MATCH_VSBC_VXM 0x48004057 +#define MASK_VSBC_VXM 0xfe00707f +#define MATCH_VMSBC_VXM 0x4c004057 +#define MASK_VMSBC_VXM 0xfc00707f +#define MATCH_VMERGE_VXM 0x5c004057 +#define MASK_VMERGE_VXM 0xfe00707f +#define MATCH_VMV_V_X 0x5e004057 +#define MASK_VMV_V_X 0xfff0707f +#define MATCH_VMSEQ_VX 0x60004057 +#define MASK_VMSEQ_VX 0xfc00707f +#define MATCH_VMSNE_VX 0x64004057 +#define MASK_VMSNE_VX 0xfc00707f +#define MATCH_VMSLTU_VX 0x68004057 +#define MASK_VMSLTU_VX 0xfc00707f +#define MATCH_VMSLT_VX 0x6c004057 +#define MASK_VMSLT_VX 0xfc00707f +#define MATCH_VMSLEU_VX 0x70004057 +#define MASK_VMSLEU_VX 0xfc00707f +#define MATCH_VMSLE_VX 0x74004057 +#define MASK_VMSLE_VX 0xfc00707f +#define MATCH_VMSGTU_VX 0x78004057 +#define MASK_VMSGTU_VX 0xfc00707f +#define MATCH_VMSGT_VX 0x7c004057 +#define MASK_VMSGT_VX 0xfc00707f +#define MATCH_VSADDU_VX 0x80004057 +#define MASK_VSADDU_VX 0xfc00707f +#define MATCH_VSADD_VX 0x84004057 +#define MASK_VSADD_VX 0xfc00707f +#define MATCH_VSSUBU_VX 0x88004057 +#define MASK_VSSUBU_VX 0xfc00707f +#define MATCH_VSSUB_VX 0x8c004057 +#define MASK_VSSUB_VX 0xfc00707f +#define MATCH_VSLL_VX 0x94004057 +#define MASK_VSLL_VX 0xfc00707f +#define MATCH_VSMUL_VX 0x9c004057 +#define MASK_VSMUL_VX 0xfc00707f +#define MATCH_VSRL_VX 0xa0004057 +#define MASK_VSRL_VX 0xfc00707f +#define MATCH_VSRA_VX 0xa4004057 +#define MASK_VSRA_VX 0xfc00707f +#define MATCH_VSSRL_VX 0xa8004057 +#define MASK_VSSRL_VX 0xfc00707f +#define MATCH_VSSRA_VX 0xac004057 +#define MASK_VSSRA_VX 0xfc00707f +#define MATCH_VNSRL_WX 0xb0004057 +#define MASK_VNSRL_WX 0xfc00707f +#define MATCH_VNSRA_WX 0xb4004057 +#define MASK_VNSRA_WX 0xfc00707f +#define MATCH_VNCLIPU_WX 0xb8004057 +#define MASK_VNCLIPU_WX 0xfc00707f +#define MATCH_VNCLIP_WX 0xbc004057 +#define MASK_VNCLIP_WX 0xfc00707f +#define MATCH_VQMACCU_VX 0xf0004057 +#define MASK_VQMACCU_VX 0xfc00707f +#define MATCH_VQMACC_VX 0xf4004057 +#define MASK_VQMACC_VX 0xfc00707f +#define MATCH_VQMACCUS_VX 0xf8004057 +#define MASK_VQMACCUS_VX 0xfc00707f +#define MATCH_VQMACCSU_VX 0xfc004057 +#define MASK_VQMACCSU_VX 0xfc00707f +#define MATCH_VADD_VV 0x57 +#define MASK_VADD_VV 0xfc00707f +#define MATCH_VSUB_VV 0x8000057 +#define MASK_VSUB_VV 0xfc00707f +#define MATCH_VMINU_VV 0x10000057 +#define MASK_VMINU_VV 0xfc00707f +#define MATCH_VMIN_VV 0x14000057 +#define MASK_VMIN_VV 0xfc00707f +#define MATCH_VMAXU_VV 0x18000057 +#define MASK_VMAXU_VV 0xfc00707f +#define MATCH_VMAX_VV 0x1c000057 +#define MASK_VMAX_VV 0xfc00707f +#define MATCH_VAND_VV 0x24000057 +#define MASK_VAND_VV 0xfc00707f +#define MATCH_VOR_VV 0x28000057 +#define MASK_VOR_VV 0xfc00707f +#define MATCH_VXOR_VV 0x2c000057 +#define MASK_VXOR_VV 0xfc00707f +#define MATCH_VRGATHER_VV 0x30000057 +#define MASK_VRGATHER_VV 0xfc00707f +#define MATCH_VADC_VVM 0x40000057 +#define MASK_VADC_VVM 0xfe00707f +#define MATCH_VMADC_VVM 0x44000057 +#define MASK_VMADC_VVM 0xfc00707f +#define MATCH_VSBC_VVM 0x48000057 +#define MASK_VSBC_VVM 0xfe00707f +#define MATCH_VMSBC_VVM 0x4c000057 +#define MASK_VMSBC_VVM 0xfc00707f +#define MATCH_VMERGE_VVM 0x5c000057 +#define MASK_VMERGE_VVM 0xfe00707f +#define MATCH_VMV_V_V 0x5e000057 +#define MASK_VMV_V_V 0xfff0707f +#define MATCH_VMSEQ_VV 0x60000057 +#define MASK_VMSEQ_VV 0xfc00707f +#define MATCH_VMSNE_VV 0x64000057 +#define MASK_VMSNE_VV 0xfc00707f +#define MATCH_VMSLTU_VV 0x68000057 +#define MASK_VMSLTU_VV 0xfc00707f +#define MATCH_VMSLT_VV 0x6c000057 +#define MASK_VMSLT_VV 0xfc00707f +#define MATCH_VMSLEU_VV 0x70000057 +#define MASK_VMSLEU_VV 0xfc00707f +#define MATCH_VMSLE_VV 0x74000057 +#define MASK_VMSLE_VV 0xfc00707f +#define MATCH_VSADDU_VV 0x80000057 +#define MASK_VSADDU_VV 0xfc00707f +#define MATCH_VSADD_VV 0x84000057 +#define MASK_VSADD_VV 0xfc00707f +#define MATCH_VSSUBU_VV 0x88000057 +#define MASK_VSSUBU_VV 0xfc00707f +#define MATCH_VSSUB_VV 0x8c000057 +#define MASK_VSSUB_VV 0xfc00707f +#define MATCH_VSLL_VV 0x94000057 +#define MASK_VSLL_VV 0xfc00707f +#define MATCH_VSMUL_VV 0x9c000057 +#define MASK_VSMUL_VV 0xfc00707f +#define MATCH_VSRL_VV 0xa0000057 +#define MASK_VSRL_VV 0xfc00707f +#define MATCH_VSRA_VV 0xa4000057 +#define MASK_VSRA_VV 0xfc00707f +#define MATCH_VSSRL_VV 0xa8000057 +#define MASK_VSSRL_VV 0xfc00707f +#define MATCH_VSSRA_VV 0xac000057 +#define MASK_VSSRA_VV 0xfc00707f +#define MATCH_VNSRL_WV 0xb0000057 +#define MASK_VNSRL_WV 0xfc00707f +#define MATCH_VNSRA_WV 0xb4000057 +#define MASK_VNSRA_WV 0xfc00707f +#define MATCH_VNCLIPU_WV 0xb8000057 +#define MASK_VNCLIPU_WV 0xfc00707f +#define MATCH_VNCLIP_WV 0xbc000057 +#define MASK_VNCLIP_WV 0xfc00707f +#define MATCH_VWREDSUMU_VS 0xc0000057 +#define MASK_VWREDSUMU_VS 0xfc00707f +#define MATCH_VWREDSUM_VS 0xc4000057 +#define MASK_VWREDSUM_VS 0xfc00707f +#define MATCH_VDOTU_VV 0xe0000057 +#define MASK_VDOTU_VV 0xfc00707f +#define MATCH_VDOT_VV 0xe4000057 +#define MASK_VDOT_VV 0xfc00707f +#define MATCH_VQMACCU_VV 0xf0000057 +#define MASK_VQMACCU_VV 0xfc00707f +#define MATCH_VQMACC_VV 0xf4000057 +#define MASK_VQMACC_VV 0xfc00707f +#define MATCH_VQMACCSU_VV 0xfc000057 +#define MASK_VQMACCSU_VV 0xfc00707f +#define MATCH_VADD_VI 0x3057 +#define MASK_VADD_VI 0xfc00707f +#define MATCH_VRSUB_VI 0xc003057 +#define MASK_VRSUB_VI 0xfc00707f +#define MATCH_VAND_VI 0x24003057 +#define MASK_VAND_VI 0xfc00707f +#define MATCH_VOR_VI 0x28003057 +#define MASK_VOR_VI 0xfc00707f +#define MATCH_VXOR_VI 0x2c003057 +#define MASK_VXOR_VI 0xfc00707f +#define MATCH_VRGATHER_VI 0x30003057 +#define MASK_VRGATHER_VI 0xfc00707f +#define MATCH_VSLIDEUP_VI 0x38003057 +#define MASK_VSLIDEUP_VI 0xfc00707f +#define MATCH_VSLIDEDOWN_VI 0x3c003057 +#define MASK_VSLIDEDOWN_VI 0xfc00707f +#define MATCH_VADC_VIM 0x40003057 +#define MASK_VADC_VIM 0xfe00707f +#define MATCH_VMADC_VIM 0x44003057 +#define MASK_VMADC_VIM 0xfc00707f +#define MATCH_VMERGE_VIM 0x5c003057 +#define MASK_VMERGE_VIM 0xfe00707f +#define MATCH_VMV_V_I 0x5e003057 +#define MASK_VMV_V_I 0xfff0707f +#define MATCH_VMSEQ_VI 0x60003057 +#define MASK_VMSEQ_VI 0xfc00707f +#define MATCH_VMSNE_VI 0x64003057 +#define MASK_VMSNE_VI 0xfc00707f +#define MATCH_VMSLEU_VI 0x70003057 +#define MASK_VMSLEU_VI 0xfc00707f +#define MATCH_VMSLE_VI 0x74003057 +#define MASK_VMSLE_VI 0xfc00707f +#define MATCH_VMSGTU_VI 0x78003057 +#define MASK_VMSGTU_VI 0xfc00707f +#define MATCH_VMSGT_VI 0x7c003057 +#define MASK_VMSGT_VI 0xfc00707f +#define MATCH_VSADDU_VI 0x80003057 +#define MASK_VSADDU_VI 0xfc00707f +#define MATCH_VSADD_VI 0x84003057 +#define MASK_VSADD_VI 0xfc00707f +#define MATCH_VSLL_VI 0x94003057 +#define MASK_VSLL_VI 0xfc00707f +#define MATCH_VMV1R_V 0x9e003057 +#define MASK_VMV1R_V 0xfe0ff07f +#define MATCH_VMV2R_V 0x9e00b057 +#define MASK_VMV2R_V 0xfe0ff07f +#define MATCH_VMV4R_V 0x9e01b057 +#define MASK_VMV4R_V 0xfe0ff07f +#define MATCH_VMV8R_V 0x9e03b057 +#define MASK_VMV8R_V 0xfe0ff07f +#define MATCH_VSRL_VI 0xa0003057 +#define MASK_VSRL_VI 0xfc00707f +#define MATCH_VSRA_VI 0xa4003057 +#define MASK_VSRA_VI 0xfc00707f +#define MATCH_VSSRL_VI 0xa8003057 +#define MASK_VSSRL_VI 0xfc00707f +#define MATCH_VSSRA_VI 0xac003057 +#define MASK_VSSRA_VI 0xfc00707f +#define MATCH_VNSRL_WI 0xb0003057 +#define MASK_VNSRL_WI 0xfc00707f +#define MATCH_VNSRA_WI 0xb4003057 +#define MASK_VNSRA_WI 0xfc00707f +#define MATCH_VNCLIPU_WI 0xb8003057 +#define MASK_VNCLIPU_WI 0xfc00707f +#define MATCH_VNCLIP_WI 0xbc003057 +#define MASK_VNCLIP_WI 0xfc00707f +#define MATCH_VREDSUM_VS 0x2057 +#define MASK_VREDSUM_VS 0xfc00707f +#define MATCH_VREDAND_VS 0x4002057 +#define MASK_VREDAND_VS 0xfc00707f +#define MATCH_VREDOR_VS 0x8002057 +#define MASK_VREDOR_VS 0xfc00707f +#define MATCH_VREDXOR_VS 0xc002057 +#define MASK_VREDXOR_VS 0xfc00707f +#define MATCH_VREDMINU_VS 0x10002057 +#define MASK_VREDMINU_VS 0xfc00707f +#define MATCH_VREDMIN_VS 0x14002057 +#define MASK_VREDMIN_VS 0xfc00707f +#define MATCH_VREDMAXU_VS 0x18002057 +#define MASK_VREDMAXU_VS 0xfc00707f +#define MATCH_VREDMAX_VS 0x1c002057 +#define MASK_VREDMAX_VS 0xfc00707f +#define MATCH_VAADDU_VV 0x20002057 +#define MASK_VAADDU_VV 0xfc00707f +#define MATCH_VAADD_VV 0x24002057 +#define MASK_VAADD_VV 0xfc00707f +#define MATCH_VASUBU_VV 0x28002057 +#define MASK_VASUBU_VV 0xfc00707f +#define MATCH_VASUB_VV 0x2c002057 +#define MASK_VASUB_VV 0xfc00707f +#define MATCH_VMV_X_S 0x42002057 +#define MASK_VMV_X_S 0xfe0ff07f +#define MATCH_VCOMPRESS_VM 0x5e002057 +#define MASK_VCOMPRESS_VM 0xfe00707f +#define MATCH_VMANDNOT_MM 0x60002057 +#define MASK_VMANDNOT_MM 0xfc00707f +#define MATCH_VMAND_MM 0x64002057 +#define MASK_VMAND_MM 0xfc00707f +#define MATCH_VMOR_MM 0x68002057 +#define MASK_VMOR_MM 0xfc00707f +#define MATCH_VMXOR_MM 0x6c002057 +#define MASK_VMXOR_MM 0xfc00707f +#define MATCH_VMORNOT_MM 0x70002057 +#define MASK_VMORNOT_MM 0xfc00707f +#define MATCH_VMNAND_MM 0x74002057 +#define MASK_VMNAND_MM 0xfc00707f +#define MATCH_VMNOR_MM 0x78002057 +#define MASK_VMNOR_MM 0xfc00707f +#define MATCH_VMXNOR_MM 0x7c002057 +#define MASK_VMXNOR_MM 0xfc00707f +#define MATCH_VMSBF_M 0x5000a057 +#define MASK_VMSBF_M 0xfc0ff07f +#define MATCH_VMSOF_M 0x50012057 +#define MASK_VMSOF_M 0xfc0ff07f +#define MATCH_VMSIF_M 0x5001a057 +#define MASK_VMSIF_M 0xfc0ff07f +#define MATCH_VIOTA_M 0x50082057 +#define MASK_VIOTA_M 0xfc0ff07f +#define MATCH_VID_V 0x5008a057 +#define MASK_VID_V 0xfdfff07f +#define MATCH_VPOPC_M 0x40082057 +#define MASK_VPOPC_M 0xfc0ff07f +#define MATCH_VFIRST_M 0x4008a057 +#define MASK_VFIRST_M 0xfc0ff07f +#define MATCH_VDIVU_VV 0x80002057 +#define MASK_VDIVU_VV 0xfc00707f +#define MATCH_VDIV_VV 0x84002057 +#define MASK_VDIV_VV 0xfc00707f +#define MATCH_VREMU_VV 0x88002057 +#define MASK_VREMU_VV 0xfc00707f +#define MATCH_VREM_VV 0x8c002057 +#define MASK_VREM_VV 0xfc00707f +#define MATCH_VMULHU_VV 0x90002057 +#define MASK_VMULHU_VV 0xfc00707f +#define MATCH_VMUL_VV 0x94002057 +#define MASK_VMUL_VV 0xfc00707f +#define MATCH_VMULHSU_VV 0x98002057 +#define MASK_VMULHSU_VV 0xfc00707f +#define MATCH_VMULH_VV 0x9c002057 +#define MASK_VMULH_VV 0xfc00707f +#define MATCH_VMADD_VV 0xa4002057 +#define MASK_VMADD_VV 0xfc00707f +#define MATCH_VNMSUB_VV 0xac002057 +#define MASK_VNMSUB_VV 0xfc00707f +#define MATCH_VMACC_VV 0xb4002057 +#define MASK_VMACC_VV 0xfc00707f +#define MATCH_VNMSAC_VV 0xbc002057 +#define MASK_VNMSAC_VV 0xfc00707f +#define MATCH_VWADDU_VV 0xc0002057 +#define MASK_VWADDU_VV 0xfc00707f +#define MATCH_VWADD_VV 0xc4002057 +#define MASK_VWADD_VV 0xfc00707f +#define MATCH_VWSUBU_VV 0xc8002057 +#define MASK_VWSUBU_VV 0xfc00707f +#define MATCH_VWSUB_VV 0xcc002057 +#define MASK_VWSUB_VV 0xfc00707f +#define MATCH_VWADDU_WV 0xd0002057 +#define MASK_VWADDU_WV 0xfc00707f +#define MATCH_VWADD_WV 0xd4002057 +#define MASK_VWADD_WV 0xfc00707f +#define MATCH_VWSUBU_WV 0xd8002057 +#define MASK_VWSUBU_WV 0xfc00707f +#define MATCH_VWSUB_WV 0xdc002057 +#define MASK_VWSUB_WV 0xfc00707f +#define MATCH_VWMULU_VV 0xe0002057 +#define MASK_VWMULU_VV 0xfc00707f +#define MATCH_VWMULSU_VV 0xe8002057 +#define MASK_VWMULSU_VV 0xfc00707f +#define MATCH_VWMUL_VV 0xec002057 +#define MASK_VWMUL_VV 0xfc00707f +#define MATCH_VWMACCU_VV 0xf0002057 +#define MASK_VWMACCU_VV 0xfc00707f +#define MATCH_VWMACC_VV 0xf4002057 +#define MASK_VWMACC_VV 0xfc00707f +#define MATCH_VWMACCSU_VV 0xfc002057 +#define MASK_VWMACCSU_VV 0xfc00707f +#define MATCH_VAADDU_VX 0x20006057 +#define MASK_VAADDU_VX 0xfc00707f +#define MATCH_VAADD_VX 0x24006057 +#define MASK_VAADD_VX 0xfc00707f +#define MATCH_VASUBU_VX 0x28006057 +#define MASK_VASUBU_VX 0xfc00707f +#define MATCH_VASUB_VX 0x2c006057 +#define MASK_VASUB_VX 0xfc00707f +#define MATCH_VMV_S_X 0x42006057 +#define MASK_VMV_S_X 0xfff0707f +#define MATCH_VSLIDE1UP_VX 0x38006057 +#define MASK_VSLIDE1UP_VX 0xfc00707f +#define MATCH_VSLIDE1DOWN_VX 0x3c006057 +#define MASK_VSLIDE1DOWN_VX 0xfc00707f +#define MATCH_VDIVU_VX 0x80006057 +#define MASK_VDIVU_VX 0xfc00707f +#define MATCH_VDIV_VX 0x84006057 +#define MASK_VDIV_VX 0xfc00707f +#define MATCH_VREMU_VX 0x88006057 +#define MASK_VREMU_VX 0xfc00707f +#define MATCH_VREM_VX 0x8c006057 +#define MASK_VREM_VX 0xfc00707f +#define MATCH_VMULHU_VX 0x90006057 +#define MASK_VMULHU_VX 0xfc00707f +#define MATCH_VMUL_VX 0x94006057 +#define MASK_VMUL_VX 0xfc00707f +#define MATCH_VMULHSU_VX 0x98006057 +#define MASK_VMULHSU_VX 0xfc00707f +#define MATCH_VMULH_VX 0x9c006057 +#define MASK_VMULH_VX 0xfc00707f +#define MATCH_VMADD_VX 0xa4006057 +#define MASK_VMADD_VX 0xfc00707f +#define MATCH_VNMSUB_VX 0xac006057 +#define MASK_VNMSUB_VX 0xfc00707f +#define MATCH_VMACC_VX 0xb4006057 +#define MASK_VMACC_VX 0xfc00707f +#define MATCH_VNMSAC_VX 0xbc006057 +#define MASK_VNMSAC_VX 0xfc00707f +#define MATCH_VWADDU_VX 0xc0006057 +#define MASK_VWADDU_VX 0xfc00707f +#define MATCH_VWADD_VX 0xc4006057 +#define MASK_VWADD_VX 0xfc00707f +#define MATCH_VWSUBU_VX 0xc8006057 +#define MASK_VWSUBU_VX 0xfc00707f +#define MATCH_VWSUB_VX 0xcc006057 +#define MASK_VWSUB_VX 0xfc00707f +#define MATCH_VWADDU_WX 0xd0006057 +#define MASK_VWADDU_WX 0xfc00707f +#define MATCH_VWADD_WX 0xd4006057 +#define MASK_VWADD_WX 0xfc00707f +#define MATCH_VWSUBU_WX 0xd8006057 +#define MASK_VWSUBU_WX 0xfc00707f +#define MATCH_VWSUB_WX 0xdc006057 +#define MASK_VWSUB_WX 0xfc00707f +#define MATCH_VWMULU_VX 0xe0006057 +#define MASK_VWMULU_VX 0xfc00707f +#define MATCH_VWMULSU_VX 0xe8006057 +#define MASK_VWMULSU_VX 0xfc00707f +#define MATCH_VWMUL_VX 0xec006057 +#define MASK_VWMUL_VX 0xfc00707f +#define MATCH_VWMACCU_VX 0xf0006057 +#define MASK_VWMACCU_VX 0xfc00707f +#define MATCH_VWMACC_VX 0xf4006057 +#define MASK_VWMACC_VX 0xfc00707f +#define MATCH_VWMACCUS_VX 0xf8006057 +#define MASK_VWMACCUS_VX 0xfc00707f +#define MATCH_VWMACCSU_VX 0xfc006057 +#define MASK_VWMACCSU_VX 0xfc00707f +#define MATCH_VAMOSWAPW_V 0x800602f +#define MASK_VAMOSWAPW_V 0xf800707f +#define MATCH_VAMOADDW_V 0x602f +#define MASK_VAMOADDW_V 0xf800707f +#define MATCH_VAMOXORW_V 0x2000602f +#define MASK_VAMOXORW_V 0xf800707f +#define MATCH_VAMOANDW_V 0x6000602f +#define MASK_VAMOANDW_V 0xf800707f +#define MATCH_VAMOORW_V 0x4000602f +#define MASK_VAMOORW_V 0xf800707f +#define MATCH_VAMOMINW_V 0x8000602f +#define MASK_VAMOMINW_V 0xf800707f +#define MATCH_VAMOMAXW_V 0xa000602f +#define MASK_VAMOMAXW_V 0xf800707f +#define MATCH_VAMOMINUW_V 0xc000602f +#define MASK_VAMOMINUW_V 0xf800707f +#define MATCH_VAMOMAXUW_V 0xe000602f +#define MASK_VAMOMAXUW_V 0xf800707f +#define MATCH_VAMOSWAPE_V 0x800702f +#define MASK_VAMOSWAPE_V 0xf800707f +#define MATCH_VAMOADDE_V 0x702f +#define MASK_VAMOADDE_V 0xf800707f +#define MATCH_VAMOXORE_V 0x2000702f +#define MASK_VAMOXORE_V 0xf800707f +#define MATCH_VAMOANDE_V 0x6000702f +#define MASK_VAMOANDE_V 0xf800707f +#define MATCH_VAMOORE_V 0x4000702f +#define MASK_VAMOORE_V 0xf800707f +#define MATCH_VAMOMINE_V 0x8000702f +#define MASK_VAMOMINE_V 0xf800707f +#define MATCH_VAMOMAXE_V 0xa000702f +#define MASK_VAMOMAXE_V 0xf800707f +#define MATCH_VAMOMINUE_V 0xc000702f +#define MASK_VAMOMINUE_V 0xf800707f +#define MATCH_VAMOMAXUE_V 0xe000702f +#define MASK_VAMOMAXUE_V 0xf800707f #define CSR_FFLAGS 0x1 #define CSR_FRM 0x2 #define CSR_FCSR 0x3 +#define CSR_USTATUS 0x0 +#define CSR_UIE 0x4 +#define CSR_UTVEC 0x5 +#define CSR_VSTART 0x8 +#define CSR_VXSAT 0x9 +#define CSR_VXRM 0xa +#define CSR_USCRATCH 0x40 +#define CSR_UEPC 0x41 +#define CSR_UCAUSE 0x42 +#define CSR_UTVAL 0x43 +#define CSR_UIP 0x44 #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 #define CSR_INSTRET 0xc02 @@ -785,6 +1550,9 @@ #define CSR_HPMCOUNTER29 0xc1d #define CSR_HPMCOUNTER30 0xc1e #define CSR_HPMCOUNTER31 0xc1f +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 #define CSR_SSTATUS 0x100 #define CSR_SIE 0x104 #define CSR_STVEC 0x105 @@ -795,6 +1563,35 @@ #define CSR_STVAL 0x143 #define CSR_SIP 0x144 #define CSR_SATP 0x180 +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGATP 0x680 +#define CSR_UTVT 0x7 +#define CSR_UNXTI 0x45 +#define CSR_UINTSTATUS 0x46 +#define CSR_USCRATCHCSW 0x48 +#define CSR_USCRATCHCSWL 0x49 +#define CSR_STVT 0x107 +#define CSR_SNXTI 0x145 +#define CSR_SINTSTATUS 0x146 +#define CSR_SSCRATCHCSW 0x148 +#define CSR_SSCRATCHCSWL 0x149 +#define CSR_MTVT 0x307 +#define CSR_MNXTI 0x345 +#define CSR_MINTSTATUS 0x346 +#define CSR_MSCRATCHCSW 0x348 +#define CSR_MSCRATCHCSWL 0x349 #define CSR_MSTATUS 0x300 #define CSR_MISA 0x301 #define CSR_MEDELEG 0x302 @@ -1078,6 +1875,8 @@ DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC) DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI) DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI) +DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA) +DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA) DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S) DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S) DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S) @@ -1179,11 +1978,18 @@ DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP) DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK) +DECLARE_INSN(c_srli_rv32, MATCH_C_SRLI_RV32, MASK_C_SRLI_RV32) +DECLARE_INSN(c_srai_rv32, MATCH_C_SRAI_RV32, MASK_C_SRAI_RV32) +DECLARE_INSN(c_slli_rv32, MATCH_C_SLLI_RV32, MASK_C_SLLI_RV32) DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD) DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD) DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW) DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP) DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP) +DECLARE_INSN(c_lq, MATCH_C_LQ, MASK_C_LQ) +DECLARE_INSN(c_sq, MATCH_C_SQ, MASK_C_SQ) +DECLARE_INSN(c_lqsp, MATCH_C_LQSP, MASK_C_LQSP) +DECLARE_INSN(c_sqsp, MATCH_C_SQSP, MASK_C_SQSP) DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN) DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD) DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW) @@ -1240,11 +2046,390 @@ DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2) DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD) DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1) DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2) +DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI) +DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL) +DECLARE_INSN(vlb_v, MATCH_VLB_V, MASK_VLB_V) +DECLARE_INSN(vlh_v, MATCH_VLH_V, MASK_VLH_V) +DECLARE_INSN(vlw_v, MATCH_VLW_V, MASK_VLW_V) +DECLARE_INSN(vle_v, MATCH_VLE_V, MASK_VLE_V) +DECLARE_INSN(vlbu_v, MATCH_VLBU_V, MASK_VLBU_V) +DECLARE_INSN(vlhu_v, MATCH_VLHU_V, MASK_VLHU_V) +DECLARE_INSN(vlwu_v, MATCH_VLWU_V, MASK_VLWU_V) +DECLARE_INSN(vsb_v, MATCH_VSB_V, MASK_VSB_V) +DECLARE_INSN(vsh_v, MATCH_VSH_V, MASK_VSH_V) +DECLARE_INSN(vsw_v, MATCH_VSW_V, MASK_VSW_V) +DECLARE_INSN(vse_v, MATCH_VSE_V, MASK_VSE_V) +DECLARE_INSN(vlsb_v, MATCH_VLSB_V, MASK_VLSB_V) +DECLARE_INSN(vlsh_v, MATCH_VLSH_V, MASK_VLSH_V) +DECLARE_INSN(vlsw_v, MATCH_VLSW_V, MASK_VLSW_V) +DECLARE_INSN(vlse_v, MATCH_VLSE_V, MASK_VLSE_V) +DECLARE_INSN(vlsbu_v, MATCH_VLSBU_V, MASK_VLSBU_V) +DECLARE_INSN(vlshu_v, MATCH_VLSHU_V, MASK_VLSHU_V) +DECLARE_INSN(vlswu_v, MATCH_VLSWU_V, MASK_VLSWU_V) +DECLARE_INSN(vssb_v, MATCH_VSSB_V, MASK_VSSB_V) +DECLARE_INSN(vssh_v, MATCH_VSSH_V, MASK_VSSH_V) +DECLARE_INSN(vssw_v, MATCH_VSSW_V, MASK_VSSW_V) +DECLARE_INSN(vsse_v, MATCH_VSSE_V, MASK_VSSE_V) +DECLARE_INSN(vlxb_v, MATCH_VLXB_V, MASK_VLXB_V) +DECLARE_INSN(vlxh_v, MATCH_VLXH_V, MASK_VLXH_V) +DECLARE_INSN(vlxw_v, MATCH_VLXW_V, MASK_VLXW_V) +DECLARE_INSN(vlxe_v, MATCH_VLXE_V, MASK_VLXE_V) +DECLARE_INSN(vlxbu_v, MATCH_VLXBU_V, MASK_VLXBU_V) +DECLARE_INSN(vlxhu_v, MATCH_VLXHU_V, MASK_VLXHU_V) +DECLARE_INSN(vlxwu_v, MATCH_VLXWU_V, MASK_VLXWU_V) +DECLARE_INSN(vsxb_v, MATCH_VSXB_V, MASK_VSXB_V) +DECLARE_INSN(vsxh_v, MATCH_VSXH_V, MASK_VSXH_V) +DECLARE_INSN(vsxw_v, MATCH_VSXW_V, MASK_VSXW_V) +DECLARE_INSN(vsxe_v, MATCH_VSXE_V, MASK_VSXE_V) +DECLARE_INSN(vsuxb_v, MATCH_VSUXB_V, MASK_VSUXB_V) +DECLARE_INSN(vsuxh_v, MATCH_VSUXH_V, MASK_VSUXH_V) +DECLARE_INSN(vsuxw_v, MATCH_VSUXW_V, MASK_VSUXW_V) +DECLARE_INSN(vsuxe_v, MATCH_VSUXE_V, MASK_VSUXE_V) +DECLARE_INSN(vlbff_v, MATCH_VLBFF_V, MASK_VLBFF_V) +DECLARE_INSN(vlhff_v, MATCH_VLHFF_V, MASK_VLHFF_V) +DECLARE_INSN(vlwff_v, MATCH_VLWFF_V, MASK_VLWFF_V) +DECLARE_INSN(vleff_v, MATCH_VLEFF_V, MASK_VLEFF_V) +DECLARE_INSN(vlbuff_v, MATCH_VLBUFF_V, MASK_VLBUFF_V) +DECLARE_INSN(vlhuff_v, MATCH_VLHUFF_V, MASK_VLHUFF_V) +DECLARE_INSN(vlwuff_v, MATCH_VLWUFF_V, MASK_VLWUFF_V) +DECLARE_INSN(vl1r_v, MATCH_VL1R_V, MASK_VL1R_V) +DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V) +DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF) +DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF) +DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF) +DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF) +DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF) +DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF) +DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF) +DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F) +DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM) +DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F) +DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF) +DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF) +DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF) +DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF) +DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF) +DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF) +DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF) +DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF) +DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF) +DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF) +DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF) +DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF) +DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF) +DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF) +DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF) +DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF) +DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF) +DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF) +DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF) +DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF) +DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF) +DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF) +DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF) +DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF) +DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF) +DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF) +DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF) +DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV) +DECLARE_INSN(vfredsum_vs, MATCH_VFREDSUM_VS, MASK_VFREDSUM_VS) +DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV) +DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS) +DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV) +DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS) +DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV) +DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS) +DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV) +DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV) +DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV) +DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S) +DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV) +DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV) +DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV) +DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV) +DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV) +DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV) +DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV) +DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV) +DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV) +DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV) +DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV) +DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV) +DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV) +DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV) +DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V) +DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V) +DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V) +DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V) +DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V) +DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V) +DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V) +DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V) +DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V) +DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W) +DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W) +DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W) +DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W) +DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W) +DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W) +DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V) +DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V) +DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV) +DECLARE_INSN(vfwredsum_vs, MATCH_VFWREDSUM_VS, MASK_VFWREDSUM_VS) +DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV) +DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS) +DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV) +DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV) +DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV) +DECLARE_INSN(vfdot_vv, MATCH_VFDOT_VV, MASK_VFDOT_VV) +DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV) +DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV) +DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV) +DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV) +DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX) +DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX) +DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX) +DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX) +DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX) +DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX) +DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX) +DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX) +DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX) +DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX) +DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX) +DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX) +DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX) +DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM) +DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM) +DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM) +DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM) +DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM) +DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X) +DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX) +DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX) +DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX) +DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX) +DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX) +DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX) +DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX) +DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX) +DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX) +DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX) +DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX) +DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX) +DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX) +DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX) +DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX) +DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX) +DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX) +DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX) +DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX) +DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX) +DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX) +DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX) +DECLARE_INSN(vqmaccu_vx, MATCH_VQMACCU_VX, MASK_VQMACCU_VX) +DECLARE_INSN(vqmacc_vx, MATCH_VQMACC_VX, MASK_VQMACC_VX) +DECLARE_INSN(vqmaccus_vx, MATCH_VQMACCUS_VX, MASK_VQMACCUS_VX) +DECLARE_INSN(vqmaccsu_vx, MATCH_VQMACCSU_VX, MASK_VQMACCSU_VX) +DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV) +DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV) +DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV) +DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV) +DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV) +DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV) +DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV) +DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV) +DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV) +DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV) +DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM) +DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM) +DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM) +DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM) +DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM) +DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V) +DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV) +DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV) +DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV) +DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV) +DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV) +DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV) +DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV) +DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV) +DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV) +DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV) +DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV) +DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV) +DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV) +DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV) +DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV) +DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV) +DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV) +DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV) +DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV) +DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV) +DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS) +DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS) +DECLARE_INSN(vdotu_vv, MATCH_VDOTU_VV, MASK_VDOTU_VV) +DECLARE_INSN(vdot_vv, MATCH_VDOT_VV, MASK_VDOT_VV) +DECLARE_INSN(vqmaccu_vv, MATCH_VQMACCU_VV, MASK_VQMACCU_VV) +DECLARE_INSN(vqmacc_vv, MATCH_VQMACC_VV, MASK_VQMACC_VV) +DECLARE_INSN(vqmaccsu_vv, MATCH_VQMACCSU_VV, MASK_VQMACCSU_VV) +DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI) +DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI) +DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI) +DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI) +DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI) +DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI) +DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI) +DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI) +DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM) +DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM) +DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM) +DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I) +DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI) +DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI) +DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI) +DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI) +DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI) +DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI) +DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI) +DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI) +DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI) +DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V) +DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V) +DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V) +DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V) +DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI) +DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI) +DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI) +DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI) +DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI) +DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI) +DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI) +DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI) +DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS) +DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS) +DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS) +DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS) +DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS) +DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS) +DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS) +DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS) +DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV) +DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV) +DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV) +DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV) +DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S) +DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM) +DECLARE_INSN(vmandnot_mm, MATCH_VMANDNOT_MM, MASK_VMANDNOT_MM) +DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM) +DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM) +DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM) +DECLARE_INSN(vmornot_mm, MATCH_VMORNOT_MM, MASK_VMORNOT_MM) +DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM) +DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM) +DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM) +DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M) +DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M) +DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M) +DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M) +DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V) +DECLARE_INSN(vpopc_m, MATCH_VPOPC_M, MASK_VPOPC_M) +DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M) +DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV) +DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV) +DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV) +DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV) +DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV) +DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV) +DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV) +DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV) +DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV) +DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV) +DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV) +DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV) +DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV) +DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV) +DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV) +DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV) +DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV) +DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV) +DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV) +DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV) +DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV) +DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV) +DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV) +DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV) +DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV) +DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV) +DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX) +DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX) +DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX) +DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX) +DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X) +DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX) +DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX) +DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX) +DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX) +DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX) +DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX) +DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX) +DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX) +DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX) +DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX) +DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX) +DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX) +DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX) +DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX) +DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX) +DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX) +DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX) +DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX) +DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX) +DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX) +DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX) +DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX) +DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX) +DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX) +DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX) +DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX) +DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX) +DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX) +DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX) +DECLARE_INSN(vamoswapw_v, MATCH_VAMOSWAPW_V, MASK_VAMOSWAPW_V) +DECLARE_INSN(vamoaddw_v, MATCH_VAMOADDW_V, MASK_VAMOADDW_V) +DECLARE_INSN(vamoxorw_v, MATCH_VAMOXORW_V, MASK_VAMOXORW_V) +DECLARE_INSN(vamoandw_v, MATCH_VAMOANDW_V, MASK_VAMOANDW_V) +DECLARE_INSN(vamoorw_v, MATCH_VAMOORW_V, MASK_VAMOORW_V) +DECLARE_INSN(vamominw_v, MATCH_VAMOMINW_V, MASK_VAMOMINW_V) +DECLARE_INSN(vamomaxw_v, MATCH_VAMOMAXW_V, MASK_VAMOMAXW_V) +DECLARE_INSN(vamominuw_v, MATCH_VAMOMINUW_V, MASK_VAMOMINUW_V) +DECLARE_INSN(vamomaxuw_v, MATCH_VAMOMAXUW_V, MASK_VAMOMAXUW_V) +DECLARE_INSN(vamoswape_v, MATCH_VAMOSWAPE_V, MASK_VAMOSWAPE_V) +DECLARE_INSN(vamoadde_v, MATCH_VAMOADDE_V, MASK_VAMOADDE_V) +DECLARE_INSN(vamoxore_v, MATCH_VAMOXORE_V, MASK_VAMOXORE_V) +DECLARE_INSN(vamoande_v, MATCH_VAMOANDE_V, MASK_VAMOANDE_V) +DECLARE_INSN(vamoore_v, MATCH_VAMOORE_V, MASK_VAMOORE_V) +DECLARE_INSN(vamomine_v, MATCH_VAMOMINE_V, MASK_VAMOMINE_V) +DECLARE_INSN(vamomaxe_v, MATCH_VAMOMAXE_V, MASK_VAMOMAXE_V) +DECLARE_INSN(vamominue_v, MATCH_VAMOMINUE_V, MASK_VAMOMINUE_V) +DECLARE_INSN(vamomaxue_v, MATCH_VAMOMAXUE_V, MASK_VAMOMAXUE_V) #endif #ifdef DECLARE_CSR DECLARE_CSR(fflags, CSR_FFLAGS) DECLARE_CSR(frm, CSR_FRM) DECLARE_CSR(fcsr, CSR_FCSR) +DECLARE_CSR(ustatus, CSR_USTATUS) +DECLARE_CSR(uie, CSR_UIE) +DECLARE_CSR(utvec, CSR_UTVEC) +DECLARE_CSR(vstart, CSR_VSTART) +DECLARE_CSR(vxsat, CSR_VXSAT) +DECLARE_CSR(vxrm, CSR_VXRM) +DECLARE_CSR(uscratch, CSR_USCRATCH) +DECLARE_CSR(uepc, CSR_UEPC) +DECLARE_CSR(ucause, CSR_UCAUSE) +DECLARE_CSR(utval, CSR_UTVAL) +DECLARE_CSR(uip, CSR_UIP) DECLARE_CSR(cycle, CSR_CYCLE) DECLARE_CSR(time, CSR_TIME) DECLARE_CSR(instret, CSR_INSTRET) @@ -1277,6 +2462,9 @@ DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28) DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29) DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30) DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31) +DECLARE_CSR(vl, CSR_VL) +DECLARE_CSR(vtype, CSR_VTYPE) +DECLARE_CSR(vlenb, CSR_VLENB) DECLARE_CSR(sstatus, CSR_SSTATUS) DECLARE_CSR(sie, CSR_SIE) DECLARE_CSR(stvec, CSR_STVEC) @@ -1287,6 +2475,35 @@ DECLARE_CSR(scause, CSR_SCAUSE) DECLARE_CSR(stval, CSR_STVAL) DECLARE_CSR(sip, CSR_SIP) DECLARE_CSR(satp, CSR_SATP) +DECLARE_CSR(vsstatus, CSR_VSSTATUS) +DECLARE_CSR(vsie, CSR_VSIE) +DECLARE_CSR(vstvec, CSR_VSTVEC) +DECLARE_CSR(vsscratch, CSR_VSSCRATCH) +DECLARE_CSR(vsepc, CSR_VSEPC) +DECLARE_CSR(vscause, CSR_VSCAUSE) +DECLARE_CSR(vstval, CSR_VSTVAL) +DECLARE_CSR(vsip, CSR_VSIP) +DECLARE_CSR(vsatp, CSR_VSATP) +DECLARE_CSR(hstatus, CSR_HSTATUS) +DECLARE_CSR(hedeleg, CSR_HEDELEG) +DECLARE_CSR(hideleg, CSR_HIDELEG) +DECLARE_CSR(hcounteren, CSR_HCOUNTEREN) +DECLARE_CSR(hgatp, CSR_HGATP) +DECLARE_CSR(utvt, CSR_UTVT) +DECLARE_CSR(unxti, CSR_UNXTI) +DECLARE_CSR(uintstatus, CSR_UINTSTATUS) +DECLARE_CSR(uscratchcsw, CSR_USCRATCHCSW) +DECLARE_CSR(uscratchcswl, CSR_USCRATCHCSWL) +DECLARE_CSR(stvt, CSR_STVT) +DECLARE_CSR(snxti, CSR_SNXTI) +DECLARE_CSR(sintstatus, CSR_SINTSTATUS) +DECLARE_CSR(sscratchcsw, CSR_SSCRATCHCSW) +DECLARE_CSR(sscratchcswl, CSR_SSCRATCHCSWL) +DECLARE_CSR(mtvt, CSR_MTVT) +DECLARE_CSR(mnxti, CSR_MNXTI) +DECLARE_CSR(mintstatus, CSR_MINTSTATUS) +DECLARE_CSR(mscratchcsw, CSR_MSCRATCHCSW) +DECLARE_CSR(mscratchcswl, CSR_MSCRATCHCSWL) DECLARE_CSR(mstatus, CSR_MSTATUS) DECLARE_CSR(misa, CSR_MISA) DECLARE_CSR(medeleg, CSR_MEDELEG) diff --git a/src/target/riscv/gdb_regs.h b/src/target/riscv/gdb_regs.h index 04f8756..b118b7a 100644 --- a/src/target/riscv/gdb_regs.h +++ b/src/target/riscv/gdb_regs.h @@ -76,6 +76,12 @@ enum gdb_regno { GDB_REGNO_FT11, GDB_REGNO_FPR31 = GDB_REGNO_FT11, GDB_REGNO_CSR0 = 65, + GDB_REGNO_VSTART = CSR_VSTART + GDB_REGNO_CSR0, + GDB_REGNO_VXSAT = CSR_VXSAT + GDB_REGNO_CSR0, + GDB_REGNO_VXRM = CSR_VXRM + GDB_REGNO_CSR0, + GDB_REGNO_VLENB = CSR_VLENB + GDB_REGNO_CSR0, + GDB_REGNO_VL = CSR_VL + GDB_REGNO_CSR0, + GDB_REGNO_VTYPE = CSR_VTYPE + GDB_REGNO_CSR0, GDB_REGNO_TSELECT = CSR_TSELECT + GDB_REGNO_CSR0, GDB_REGNO_TDATA1 = CSR_TDATA1 + GDB_REGNO_CSR0, GDB_REGNO_TDATA2 = CSR_TDATA2 + GDB_REGNO_CSR0, @@ -89,6 +95,18 @@ enum gdb_regno { GDB_REGNO_SATP = CSR_SATP + GDB_REGNO_CSR0, GDB_REGNO_CSR4095 = GDB_REGNO_CSR0 + 4095, GDB_REGNO_PRIV = 4161, + /* It's still undecided what register numbers GDB will actually use for + * these. See + * https://groups.google.com/a/groups.riscv.org/d/msg/sw-dev/7lQYiTUN9Ms/gTxGhzaYBQAJ + */ + GDB_REGNO_V0, GDB_REGNO_V1, GDB_REGNO_V2, GDB_REGNO_V3, + GDB_REGNO_V4, GDB_REGNO_V5, GDB_REGNO_V6, GDB_REGNO_V7, + GDB_REGNO_V8, GDB_REGNO_V9, GDB_REGNO_V10, GDB_REGNO_V11, + GDB_REGNO_V12, GDB_REGNO_V13, GDB_REGNO_V14, GDB_REGNO_V15, + GDB_REGNO_V16, GDB_REGNO_V17, GDB_REGNO_V18, GDB_REGNO_V19, + GDB_REGNO_V20, GDB_REGNO_V21, GDB_REGNO_V22, GDB_REGNO_V23, + GDB_REGNO_V24, GDB_REGNO_V25, GDB_REGNO_V26, GDB_REGNO_V27, + GDB_REGNO_V28, GDB_REGNO_V29, GDB_REGNO_V30, GDB_REGNO_V31, GDB_REGNO_COUNT }; diff --git a/src/target/riscv/opcodes.h b/src/target/riscv/opcodes.h index 52e9a3b..4557f98 100644 --- a/src/target/riscv/opcodes.h +++ b/src/target/riscv/opcodes.h @@ -323,3 +323,33 @@ static uint32_t auipc(unsigned int dest) { return MATCH_AUIPC | (dest << 7); } + +static uint32_t vsetvli(unsigned int dest, unsigned int src, uint16_t imm) __attribute__((unused)); +static uint32_t vsetvli(unsigned int dest, unsigned int src, uint16_t imm) +{ + return (bits(imm, 10, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_VSETVLI; +} + +static uint32_t vmv_x_s(unsigned int rd, unsigned int vs2) __attribute__((unused)); +static uint32_t vmv_x_s(unsigned int rd, unsigned int vs2) +{ + return (vs2 << 20) | (rd << 7) | MATCH_VMV_X_S; +} + +static uint32_t vmv_s_x(unsigned int vd, unsigned int vs2) __attribute__((unused)); +static uint32_t vmv_s_x(unsigned int vd, unsigned int rs1) +{ + return (rs1 << 15) | (vd << 7) | MATCH_VMV_S_X; +} + +static uint32_t vslide1down_vx(unsigned int vd, unsigned int vs2, + unsigned int rs1, unsigned int vm) __attribute__((unused)); +static uint32_t vslide1down_vx(unsigned int vd, unsigned int vs2, + unsigned int rs1, unsigned int vm) +{ + return (vm << 25) | (vs2 << 20) | (rs1 << 15) | (vd << 7) | + MATCH_VSLIDE1DOWN_VX; +} diff --git a/src/target/riscv/program.c b/src/target/riscv/program.c index 8645ed6..b370401 100644 --- a/src/target/riscv/program.c +++ b/src/target/riscv/program.c @@ -56,7 +56,8 @@ int riscv_program_exec(struct riscv_program *p, struct target *t) if (riscv_program_ebreak(p) != ERROR_OK) { LOG_ERROR("Unable to write ebreak"); for (size_t i = 0; i < riscv_debug_buffer_size(p->target); ++i) - LOG_ERROR("ram[%02x]: DASM(0x%08lx) [0x%08lx]", (int)i, (long)p->debug_buffer[i], (long)p->debug_buffer[i]); + LOG_ERROR("ram[%02x]: DASM(0x%08lx) [0x%08lx]", (int)i, + (long)p->debug_buffer[i], (long)p->debug_buffer[i]); return ERROR_FAIL; } diff --git a/src/target/riscv/riscv-013.c b/src/target/riscv/riscv-013.c index 4ad585b..4381f41 100644 --- a/src/target/riscv/riscv-013.c +++ b/src/target/riscv/riscv-013.c @@ -858,6 +858,8 @@ static uint32_t access_register_command(struct target *target, uint32_t number, command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3); break; default: + LOG_ERROR("%d-bit register %s not supported.", size, + gdb_regno_name(number)); assert(0); } @@ -877,6 +879,8 @@ static uint32_t access_register_command(struct target *target, uint32_t number, assert(reg_info); command = set_field(command, AC_ACCESS_REGISTER_REGNO, 0xc000 + reg_info->custom_number); + } else { + assert(0); } command |= flags; @@ -895,6 +899,9 @@ static int register_read_abstract(struct target *target, uint64_t *value, if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 && !info->abstract_read_csr_supported) return ERROR_FAIL; + /* The spec doesn't define abstract register numbers for vector registers. */ + if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31) + return ERROR_FAIL; uint32_t command = access_register_command(target, number, size, AC_ACCESS_REGISTER_TRANSFER); @@ -1053,6 +1060,54 @@ static int examine_progbuf(struct target *target) return ERROR_OK; } +static int is_fpu_reg(uint32_t gdb_regno) +{ + return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) || + (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) || + (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) || + (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR); +} + +static int is_vector_reg(uint32_t gdb_regno) +{ + return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) || + gdb_regno == GDB_REGNO_VSTART || + gdb_regno == GDB_REGNO_VXSAT || + gdb_regno == GDB_REGNO_VXRM || + gdb_regno == GDB_REGNO_VL || + gdb_regno == GDB_REGNO_VTYPE || + gdb_regno == GDB_REGNO_VLENB; +} + +static int prep_for_register_access(struct target *target, uint64_t *mstatus, + int regno) +{ + if (is_fpu_reg(regno) || is_vector_reg(regno)) { + if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) + return ERROR_FAIL; + if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) { + if (register_write_direct(target, GDB_REGNO_MSTATUS, + set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK) + return ERROR_FAIL; + } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) { + if (register_write_direct(target, GDB_REGNO_MSTATUS, + set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK) + return ERROR_FAIL; + } + } + return ERROR_OK; +} + +static int cleanup_after_register_access(struct target *target, + uint64_t mstatus, int regno) +{ + if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) || + (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0)) + if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK) + return ERROR_FAIL; + return ERROR_OK; +} + typedef enum { SPACE_DMI_DATA, SPACE_DMI_PROGBUF, @@ -1251,6 +1306,10 @@ static int register_write_direct(struct target *target, unsigned number, if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK) return ERROR_FAIL; + uint64_t mstatus; + if (prep_for_register_access(target, &mstatus, number) != ERROR_OK) + return ERROR_FAIL; + scratch_mem_t scratch; bool use_scratch = false; if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 && @@ -1275,6 +1334,10 @@ static int register_write_direct(struct target *target, unsigned number, return ERROR_FAIL; } + } else if (number == GDB_REGNO_VTYPE) { + riscv_program_insert(&program, csrr(S0, CSR_VL)); + riscv_program_insert(&program, vsetvli(ZERO, S0, value)); + } else { if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK) return ERROR_FAIL; @@ -1284,6 +1347,15 @@ static int register_write_direct(struct target *target, unsigned number, riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0)); else riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0)); + } else if (number == GDB_REGNO_VL) { + /* "The XLEN-bit-wide read-only vl CSR can only be updated by the + * vsetvli and vsetvl instructions, and the fault-only-rst vector + * load instruction variants." */ + riscv_reg_t vtype; + if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK) + return ERROR_FAIL; + if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK) + return ERROR_FAIL; } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) { riscv_program_csrw(&program, S0, number); } else { @@ -1302,6 +1374,9 @@ static int register_write_direct(struct target *target, unsigned number, if (use_scratch) scratch_release(target, &scratch); + if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK) + return ERROR_FAIL; + /* Restore S0. */ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK) return ERROR_FAIL; @@ -1326,14 +1401,6 @@ static int register_read(struct target *target, uint64_t *value, uint32_t number return ERROR_OK; } -static int is_fpu_reg(uint32_t gdb_regno) -{ - return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) || - (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) || - (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) || - (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR); -} - /** Actually read registers from the target right now. */ static int register_read_direct(struct target *target, uint64_t *value, uint32_t number) { @@ -1352,21 +1419,15 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t scratch_mem_t scratch; bool use_scratch = false; - uint64_t s0; + riscv_reg_t s0; if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK) return ERROR_FAIL; /* Write program to move data into s0. */ uint64_t mstatus; - if (is_fpu_reg(number)) { - if (register_read(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) - return ERROR_FAIL; - if ((mstatus & MSTATUS_FS) == 0) - if (register_write_direct(target, GDB_REGNO_MSTATUS, - set_field(mstatus, MSTATUS_FS, 1)) != ERROR_OK) - return ERROR_FAIL; - } + if (prep_for_register_access(target, &mstatus, number) != ERROR_OK) + return ERROR_FAIL; if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) { if (riscv_supports_extension(target, riscv_current_hartid(target), 'D') @@ -1394,7 +1455,7 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) { riscv_program_csrr(&program, S0, number); } else { - LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number); + LOG_ERROR("Unsupported register: %s", gdb_regno_name(number)); return ERROR_FAIL; } @@ -1413,9 +1474,8 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t return ERROR_FAIL; } - if (is_fpu_reg(number) && (mstatus & MSTATUS_FS) == 0) - if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK) - return ERROR_FAIL; + if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK) + return ERROR_FAIL; /* Restore S0. */ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK) @@ -1476,6 +1536,20 @@ static int set_haltgroup(struct target *target, bool *supported) return ERROR_OK; } +static int discover_vlenb(struct target *target, int hartid) +{ + RISCV_INFO(r); + riscv_reg_t vlenb; + + if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) + return ERROR_FAIL; + r->vlenb[hartid] = vlenb; + + LOG_INFO("hart %d: Vector support with vlenb=%d", hartid, r->vlenb[hartid]); + + return ERROR_OK; +} + static int examine(struct target *target) { /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */ @@ -1654,6 +1728,11 @@ static int examine(struct target *target) return ERROR_FAIL; } + if (riscv_supports_extension(target, i, 'V')) { + if (discover_vlenb(target, i) != ERROR_OK) + return ERROR_FAIL; + } + /* Now init registers based on what we discovered. */ if (riscv_init_registers(target) != ERROR_OK) return ERROR_FAIL; @@ -1761,6 +1840,153 @@ static unsigned riscv013_data_bits(struct target *target) return riscv_xlen(target); } +static int prep_for_vector_access(struct target *target, uint64_t *vtype, + uint64_t *vl, unsigned *debug_vl) +{ + RISCV_INFO(r); + /* TODO: this continuous save/restore is terrible for performance. */ + /* Write vtype and vl. */ + unsigned encoded_vsew; + switch (riscv_xlen(target)) { + case 32: + encoded_vsew = 2; + break; + case 64: + encoded_vsew = 3; + break; + default: + LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target)); + return ERROR_FAIL; + } + + /* Save vtype and vl. */ + if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK) + return ERROR_FAIL; + if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK) + return ERROR_FAIL; + + if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 2) != ERROR_OK) + return ERROR_FAIL; + *debug_vl = DIV_ROUND_UP(r->vlenb[r->current_hartid] * 8, + riscv_xlen(target)); + if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK) + return ERROR_FAIL; + + return ERROR_OK; +} + +static int cleanup_after_vector_access(struct target *target, uint64_t vtype, + uint64_t vl) +{ + /* Restore vtype and vl. */ + if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK) + return ERROR_FAIL; + if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK) + return ERROR_FAIL; + return ERROR_OK; +} + +static int riscv013_get_register_buf(struct target *target, + uint8_t *value, int regno) +{ + assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31); + + riscv_reg_t s0; + if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK) + return ERROR_FAIL; + + uint64_t mstatus; + if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK) + return ERROR_FAIL; + + uint64_t vtype, vl; + unsigned debug_vl; + if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK) + return ERROR_FAIL; + + unsigned vnum = regno - GDB_REGNO_V0; + unsigned xlen = riscv_xlen(target); + + struct riscv_program program; + riscv_program_init(&program, target); + riscv_program_insert(&program, vmv_x_s(S0, vnum)); + riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true)); + + int result = ERROR_OK; + for (unsigned i = 0; i < debug_vl; i++) { + /* Executing the program might result in an exception if there is some + * issue with the vector implementation/instructions we're using. If that + * happens, attempt to restore as usual. We may have clobbered the + * vector register we tried to read already. + * For other failures, we just return error because things are probably + * so messed up that attempting to restore isn't going to help. */ + result = riscv_program_exec(&program, target); + if (result == ERROR_OK) { + uint64_t v; + if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK) + return ERROR_FAIL; + buf_set_u64(value, xlen * i, xlen, v); + } else { + break; + } + } + + if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK) + return ERROR_FAIL; + + if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK) + return ERROR_FAIL; + if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK) + return ERROR_FAIL; + + return result; +} + +static int riscv013_set_register_buf(struct target *target, + int regno, const uint8_t *value) +{ + assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31); + + riscv_reg_t s0; + if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK) + return ERROR_FAIL; + + uint64_t mstatus; + if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK) + return ERROR_FAIL; + + uint64_t vtype, vl; + unsigned debug_vl; + if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK) + return ERROR_FAIL; + + unsigned vnum = regno - GDB_REGNO_V0; + unsigned xlen = riscv_xlen(target); + + struct riscv_program program; + riscv_program_init(&program, target); + riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true)); + int result = ERROR_OK; + for (unsigned i = 0; i < debug_vl; i++) { + if (register_write_direct(target, GDB_REGNO_S0, + buf_get_u64(value, xlen * i, xlen)) != ERROR_OK) + return ERROR_FAIL; + result = riscv_program_exec(&program, target); + if (result != ERROR_OK) + break; + } + + if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK) + return ERROR_FAIL; + + if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK) + return ERROR_FAIL; + if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK) + return ERROR_FAIL; + + return result; +} + static int init_target(struct command_context *cmd_ctx, struct target *target) { @@ -1769,6 +1995,8 @@ static int init_target(struct command_context *cmd_ctx, generic_info->get_register = &riscv013_get_register; generic_info->set_register = &riscv013_set_register; + generic_info->get_register_buf = &riscv013_get_register_buf; + generic_info->set_register_buf = &riscv013_set_register_buf; generic_info->select_current_hart = &riscv013_select_current_hart; generic_info->is_halted = &riscv013_is_halted; generic_info->resume_go = &riscv013_resume_go; @@ -3385,10 +3613,12 @@ static int riscv013_get_register(struct target *target, int result = ERROR_OK; if (rid == GDB_REGNO_PC) { + /* TODO: move this into riscv.c. */ result = register_read(target, value, GDB_REGNO_DPC); LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value); } else if (rid == GDB_REGNO_PRIV) { uint64_t dcsr; + /* TODO: move this into riscv.c. */ result = register_read(target, &dcsr, GDB_REGNO_DCSR); *value = get_field(dcsr, CSR_DCSR_PRV); } else { diff --git a/src/target/riscv/riscv.c b/src/target/riscv/riscv.c index 7e7f8ba..35dbe77 100644 --- a/src/target/riscv/riscv.c +++ b/src/target/riscv/riscv.c @@ -3229,6 +3229,74 @@ const char *gdb_regno_name(enum gdb_regno regno) return "priv"; case GDB_REGNO_SATP: return "satp"; + case GDB_REGNO_VTYPE: + return "vtype"; + case GDB_REGNO_VL: + return "vl"; + case GDB_REGNO_V0: + return "v0"; + case GDB_REGNO_V1: + return "v1"; + case GDB_REGNO_V2: + return "v2"; + case GDB_REGNO_V3: + return "v3"; + case GDB_REGNO_V4: + return "v4"; + case GDB_REGNO_V5: + return "v5"; + case GDB_REGNO_V6: + return "v6"; + case GDB_REGNO_V7: + return "v7"; + case GDB_REGNO_V8: + return "v8"; + case GDB_REGNO_V9: + return "v9"; + case GDB_REGNO_V10: + return "v10"; + case GDB_REGNO_V11: + return "v11"; + case GDB_REGNO_V12: + return "v12"; + case GDB_REGNO_V13: + return "v13"; + case GDB_REGNO_V14: + return "v14"; + case GDB_REGNO_V15: + return "v15"; + case GDB_REGNO_V16: + return "v16"; + case GDB_REGNO_V17: + return "v17"; + case GDB_REGNO_V18: + return "v18"; + case GDB_REGNO_V19: + return "v19"; + case GDB_REGNO_V20: + return "v20"; + case GDB_REGNO_V21: + return "v21"; + case GDB_REGNO_V22: + return "v22"; + case GDB_REGNO_V23: + return "v23"; + case GDB_REGNO_V24: + return "v24"; + case GDB_REGNO_V25: + return "v25"; + case GDB_REGNO_V26: + return "v26"; + case GDB_REGNO_V27: + return "v27"; + case GDB_REGNO_V28: + return "v28"; + case GDB_REGNO_V29: + return "v29"; + case GDB_REGNO_V30: + return "v30"; + case GDB_REGNO_V31: + return "v31"; default: if (regno <= GDB_REGNO_XPR31) sprintf(buf, "x%d", regno - GDB_REGNO_ZERO); @@ -3246,20 +3314,35 @@ static int register_get(struct reg *reg) { riscv_reg_info_t *reg_info = reg->arch_info; struct target *target = reg_info->target; - uint64_t value; - int result = riscv_get_register(target, &value, reg->number); - if (result != ERROR_OK) - return result; - buf_set_u64(reg->value, 0, reg->size, value); + RISCV_INFO(r); + + if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) { + if (!r->get_register_buf) { + LOG_ERROR("Reading register %s not supported on this RISC-V target.", + gdb_regno_name(reg->number)); + return ERROR_FAIL; + } + + if (r->get_register_buf(target, reg->value, reg->number) != ERROR_OK) + return ERROR_FAIL; + } else { + uint64_t value; + int result = riscv_get_register(target, &value, reg->number); + if (result != ERROR_OK) + return result; + buf_set_u64(reg->value, 0, reg->size, value); + } /* CSRs (and possibly other extension) registers may change value at any * time. */ if (reg->number <= GDB_REGNO_XPR31 || (reg->number >= GDB_REGNO_FPR0 && reg->number <= GDB_REGNO_FPR31) || + (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) || reg->number == GDB_REGNO_PC) reg->valid = true; - LOG_DEBUG("[%d]{%d} read 0x%" PRIx64 " from %s (valid=%d)", - target->coreid, riscv_current_hartid(target), value, reg->name, - reg->valid); + char *str = buf_to_str(reg->value, reg->size, 16); + LOG_DEBUG("[%d]{%d} read 0x%s from %s (valid=%d)", target->coreid, + riscv_current_hartid(target), str, reg->name, reg->valid); + free(str); return ERROR_OK; } @@ -3267,22 +3350,37 @@ static int register_set(struct reg *reg, uint8_t *buf) { riscv_reg_info_t *reg_info = reg->arch_info; struct target *target = reg_info->target; + RISCV_INFO(r); - uint64_t value = buf_get_u64(buf, 0, reg->size); + char *str = buf_to_str(buf, reg->size, 16); + LOG_DEBUG("[%d]{%d} write 0x%s to %s (valid=%d)", target->coreid, + riscv_current_hartid(target), str, reg->name, reg->valid); + free(str); - LOG_DEBUG("[%d]{%d} write 0x%" PRIx64 " to %s (valid=%d)", - target->coreid, riscv_current_hartid(target), value, reg->name, - reg->valid); - struct reg *r = &target->reg_cache->reg_list[reg->number]; + memcpy(reg->value, buf, DIV_ROUND_UP(reg->size, 8)); /* CSRs (and possibly other extension) registers may change value at any * time. */ if (reg->number <= GDB_REGNO_XPR31 || (reg->number >= GDB_REGNO_FPR0 && reg->number <= GDB_REGNO_FPR31) || + (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) || reg->number == GDB_REGNO_PC) - r->valid = true; - memcpy(r->value, buf, (r->size + 7) / 8); + reg->valid = true; + + if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) { + if (!r->set_register_buf) { + LOG_ERROR("Writing register %s not supported on this RISC-V target.", + gdb_regno_name(reg->number)); + return ERROR_FAIL; + } + + if (r->set_register_buf(target, reg->number, reg->value) != ERROR_OK) + return ERROR_FAIL; + } else { + uint64_t value = buf_get_u64(buf, 0, reg->size); + if (riscv_set_register(target, reg->number, value) != ERROR_OK) + return ERROR_FAIL; + } - riscv_set_register(target, reg->number, value); return ERROR_OK; } @@ -3333,6 +3431,8 @@ int riscv_init_registers(struct target *target) calloc(target->reg_cache->num_regs, max_reg_name_len); char *reg_name = info->reg_names; + int hartid = riscv_current_hartid(target); + static struct reg_feature feature_cpu = { .name = "org.gnu.gdb.riscv.cpu" }; @@ -3342,6 +3442,9 @@ int riscv_init_registers(struct target *target) static struct reg_feature feature_csr = { .name = "org.gnu.gdb.riscv.csr" }; + static struct reg_feature feature_vector = { + .name = "org.gnu.gdb.riscv.vector" + }; static struct reg_feature feature_virtual = { .name = "org.gnu.gdb.riscv.virtual" }; @@ -3349,14 +3452,104 @@ int riscv_init_registers(struct target *target) .name = "org.gnu.gdb.riscv.custom" }; - static struct reg_data_type type_ieee_single = { - .type = REG_TYPE_IEEE_SINGLE, - .id = "ieee_single" - }; - static struct reg_data_type type_ieee_double = { - .type = REG_TYPE_IEEE_DOUBLE, - .id = "ieee_double" - }; + /* These types are built into gdb. */ + static struct reg_data_type type_ieee_single = { .type = REG_TYPE_IEEE_SINGLE, .id = "ieee_single" }; + static struct reg_data_type type_ieee_double = { .type = REG_TYPE_IEEE_DOUBLE, .id = "ieee_double" }; + static struct reg_data_type type_uint8 = { .type = REG_TYPE_UINT8, .id = "uint8" }; + static struct reg_data_type type_uint16 = { .type = REG_TYPE_UINT16, .id = "uint16" }; + static struct reg_data_type type_uint32 = { .type = REG_TYPE_UINT32, .id = "uint32" }; + static struct reg_data_type type_uint64 = { .type = REG_TYPE_UINT64, .id = "uint64" }; + static struct reg_data_type type_uint128 = { .type = REG_TYPE_UINT128, .id = "uint128" }; + + /* This is roughly the XML we want: + * <vector id="bytes" type="uint8" count="16"/> + * <vector id="shorts" type="uint16" count="8"/> + * <vector id="words" type="uint32" count="4"/> + * <vector id="longs" type="uint64" count="2"/> + * <vector id="quads" type="uint128" count="1"/> + * <union id="riscv_vector_type"> + * <field name="b" type="bytes"/> + * <field name="s" type="shorts"/> + * <field name="w" type="words"/> + * <field name="l" type="longs"/> + * <field name="q" type="quads"/> + * </union> + */ + + info->vector_uint8.type = &type_uint8; + info->vector_uint8.count = info->vlenb[hartid]; + info->type_uint8_vector.type = REG_TYPE_ARCH_DEFINED; + info->type_uint8_vector.id = "bytes"; + info->type_uint8_vector.type_class = REG_TYPE_CLASS_VECTOR; + info->type_uint8_vector.reg_type_vector = &info->vector_uint8; + + info->vector_uint16.type = &type_uint16; + info->vector_uint16.count = info->vlenb[hartid] / 2; + info->type_uint16_vector.type = REG_TYPE_ARCH_DEFINED; + info->type_uint16_vector.id = "shorts"; + info->type_uint16_vector.type_class = REG_TYPE_CLASS_VECTOR; + info->type_uint16_vector.reg_type_vector = &info->vector_uint16; + + info->vector_uint32.type = &type_uint32; + info->vector_uint32.count = info->vlenb[hartid] / 4; + info->type_uint32_vector.type = REG_TYPE_ARCH_DEFINED; + info->type_uint32_vector.id = "words"; + info->type_uint32_vector.type_class = REG_TYPE_CLASS_VECTOR; + info->type_uint32_vector.reg_type_vector = &info->vector_uint32; + + info->vector_uint64.type = &type_uint64; + info->vector_uint64.count = info->vlenb[hartid] / 8; + info->type_uint64_vector.type = REG_TYPE_ARCH_DEFINED; + info->type_uint64_vector.id = "longs"; + info->type_uint64_vector.type_class = REG_TYPE_CLASS_VECTOR; + info->type_uint64_vector.reg_type_vector = &info->vector_uint64; + + info->vector_uint128.type = &type_uint128; + info->vector_uint128.count = info->vlenb[hartid] / 16; + info->type_uint128_vector.type = REG_TYPE_ARCH_DEFINED; + info->type_uint128_vector.id = "quads"; + info->type_uint128_vector.type_class = REG_TYPE_CLASS_VECTOR; + info->type_uint128_vector.reg_type_vector = &info->vector_uint128; + + info->vector_fields[0].name = "b"; + info->vector_fields[0].type = &info->type_uint8_vector; + if (info->vlenb[hartid] >= 2) { + info->vector_fields[0].next = info->vector_fields + 1; + info->vector_fields[1].name = "s"; + info->vector_fields[1].type = &info->type_uint16_vector; + } else { + info->vector_fields[0].next = NULL; + } + if (info->vlenb[hartid] >= 4) { + info->vector_fields[1].next = info->vector_fields + 2; + info->vector_fields[2].name = "w"; + info->vector_fields[2].type = &info->type_uint32_vector; + } else { + info->vector_fields[1].next = NULL; + } + if (info->vlenb[hartid] >= 8) { + info->vector_fields[2].next = info->vector_fields + 3; + info->vector_fields[3].name = "l"; + info->vector_fields[3].type = &info->type_uint64_vector; + } else { + info->vector_fields[2].next = NULL; + } + if (info->vlenb[hartid] >= 16) { + info->vector_fields[3].next = info->vector_fields + 4; + info->vector_fields[4].name = "q"; + info->vector_fields[4].type = &info->type_uint128_vector; + } else { + info->vector_fields[3].next = NULL; + } + info->vector_fields[4].next = NULL; + + info->vector_union.fields = info->vector_fields; + + info->type_vector.type = REG_TYPE_ARCH_DEFINED; + info->type_vector.id = "riscv_vector"; + info->type_vector.type_class = REG_TYPE_CLASS_UNION; + info->type_vector.reg_type_union = &info->vector_union; + struct csr_info csr_info[] = { #define DECLARE_CSR(name, number) { number, #name }, #include "encoding.h" @@ -3372,8 +3565,6 @@ int riscv_init_registers(struct target *target) riscv_reg_info_t *shared_reg_info = calloc(1, sizeof(riscv_reg_info_t)); shared_reg_info->target = target; - int hartid = riscv_current_hartid(target); - /* When gdb requests register N, gdb_get_register_packet() assumes that this * is register at index N in reg_list. So if there are certain registers * that don't exist, we need to leave holes in the list (or renumber, but @@ -3731,6 +3922,15 @@ int riscv_init_registers(struct target *target) case CSR_MHPMCOUNTER31H: r->exist = riscv_xlen(target) == 32; break; + + case CSR_VSTART: + case CSR_VXSAT: + case CSR_VXRM: + case CSR_VL: + case CSR_VTYPE: + case CSR_VLENB: + r->exist = riscv_supports_extension(target, hartid, 'V'); + break; } if (!r->exist && expose_csr) { @@ -3749,6 +3949,15 @@ int riscv_init_registers(struct target *target) r->feature = &feature_virtual; r->size = 8; + } else if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31) { + r->caller_save = false; + r->exist = riscv_supports_extension(target, hartid, 'V'); + r->size = info->vlenb[hartid] * 8; + sprintf(reg_name, "v%d", number - GDB_REGNO_V0); + r->group = "vector"; + r->feature = &feature_vector; + r->reg_data_type = &info->type_vector; + } else if (number >= GDB_REGNO_COUNT) { /* Custom registers. */ assert(expose_custom); diff --git a/src/target/riscv/riscv.h b/src/target/riscv/riscv.h index 87975f5..9fa2138 100644 --- a/src/target/riscv/riscv.h +++ b/src/target/riscv/riscv.h @@ -7,6 +7,7 @@ struct riscv_program; #include "opcodes.h" #include "gdb_regs.h" #include "jtag/jtag.h" +#include "target/register.h" /* The register cache is statically allocated. */ #define RISCV_MAX_HARTS 1024 @@ -74,6 +75,7 @@ typedef struct { /* It's possible that each core has a different supported ISA set. */ int xlen[RISCV_MAX_HARTS]; riscv_reg_t misa[RISCV_MAX_HARTS]; + unsigned vlenb[RISCV_MAX_HARTS]; /* The number of triggers per hart. */ unsigned trigger_count[RISCV_MAX_HARTS]; @@ -110,6 +112,9 @@ typedef struct { riscv_reg_t *value, int hid, int rid); int (*set_register)(struct target *, int hartid, int regid, uint64_t value); + int (*get_register_buf)(struct target *target, uint8_t *buf, int regno); + int (*set_register_buf)(struct target *target, int regno, + const uint8_t *buf); int (*select_current_hart)(struct target *); bool (*is_halted)(struct target *target); /* Resume this target, as well as every other prepped target that can be @@ -148,6 +153,21 @@ typedef struct { /* How many harts are attached to the DM that this target is attached to? */ int (*hart_count)(struct target *target); unsigned (*data_bits)(struct target *target); + + /* Storage for vector register types. */ + struct reg_data_type_vector vector_uint8; + struct reg_data_type_vector vector_uint16; + struct reg_data_type_vector vector_uint32; + struct reg_data_type_vector vector_uint64; + struct reg_data_type_vector vector_uint128; + struct reg_data_type type_uint8_vector; + struct reg_data_type type_uint16_vector; + struct reg_data_type type_uint32_vector; + struct reg_data_type type_uint64_vector; + struct reg_data_type type_uint128_vector; + struct reg_data_type_union_field vector_fields[5]; + struct reg_data_type_union vector_union; + struct reg_data_type type_vector; } riscv_info_t; typedef struct { |