aboutsummaryrefslogtreecommitdiff
path: root/src/target/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'src/target/riscv')
-rw-r--r--src/target/riscv/asm.h2
-rw-r--r--src/target/riscv/batch.c110
-rw-r--r--src/target/riscv/batch.h17
-rw-r--r--src/target/riscv/debug_defines.h2622
-rw-r--r--src/target/riscv/encoding.h2122
-rw-r--r--src/target/riscv/gdb_regs.h26
-rw-r--r--src/target/riscv/opcodes.h44
-rw-r--r--src/target/riscv/program.c29
-rw-r--r--src/target/riscv/program.h6
-rw-r--r--src/target/riscv/riscv-011.c135
-rw-r--r--src/target/riscv/riscv-013.c2523
-rw-r--r--src/target/riscv/riscv.c2043
-rw-r--r--src/target/riscv/riscv.h136
-rw-r--r--src/target/riscv/riscv_semihosting.c70
14 files changed, 7226 insertions, 2659 deletions
diff --git a/src/target/riscv/asm.h b/src/target/riscv/asm.h
index d81aa02..6ceb8c9 100644
--- a/src/target/riscv/asm.h
+++ b/src/target/riscv/asm.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifndef TARGET__RISCV__ASM_H
#define TARGET__RISCV__ASM_H
diff --git a/src/target/riscv/batch.c b/src/target/riscv/batch.c
index d041ed1..43f2ffb 100644
--- a/src/target/riscv/batch.c
+++ b/src/target/riscv/batch.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
@@ -9,21 +11,53 @@
#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
+#define DTM_DMI_MAX_ADDRESS_LENGTH ((1<<DTM_DTMCS_ABITS_LENGTH)-1)
+#define DMI_SCAN_MAX_BIT_LENGTH (DTM_DMI_MAX_ADDRESS_LENGTH + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH)
+#define DMI_SCAN_BUF_SIZE (DIV_ROUND_UP(DMI_SCAN_MAX_BIT_LENGTH, 8))
+
static void dump_field(int idle, const struct scan_field *field);
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle)
{
scans += 4;
struct riscv_batch *out = calloc(1, sizeof(*out));
+ if (!out)
+ goto error0;
out->target = target;
out->allocated_scans = scans;
out->idle_count = idle;
- out->data_out = malloc(sizeof(*out->data_out) * (scans) * sizeof(uint64_t));
- out->data_in = malloc(sizeof(*out->data_in) * (scans) * sizeof(uint64_t));
+ out->data_out = malloc(sizeof(*out->data_out) * (scans) * DMI_SCAN_BUF_SIZE);
+ if (!out->data_out)
+ goto error1;
+ out->data_in = malloc(sizeof(*out->data_in) * (scans) * DMI_SCAN_BUF_SIZE);
+ if (!out->data_in)
+ goto error2;
out->fields = malloc(sizeof(*out->fields) * (scans));
+ if (!out->fields)
+ goto error3;
+ if (bscan_tunnel_ir_width != 0) {
+ out->bscan_ctxt = malloc(sizeof(*out->bscan_ctxt) * (scans));
+ if (!out->bscan_ctxt)
+ goto error4;
+ }
out->last_scan = RISCV_SCAN_TYPE_INVALID;
out->read_keys = malloc(sizeof(*out->read_keys) * (scans));
+ if (!out->read_keys)
+ goto error5;
return out;
+
+error5:
+ free(out->bscan_ctxt);
+error4:
+ free(out->fields);
+error3:
+ free(out->data_in);
+error2:
+ free(out->data_out);
+error1:
+ free(out);
+error0:
+ return NULL;
}
void riscv_batch_free(struct riscv_batch *batch)
@@ -31,6 +65,8 @@ void riscv_batch_free(struct riscv_batch *batch)
free(batch->data_in);
free(batch->data_out);
free(batch->fields);
+ free(batch->bscan_ctxt);
+ free(batch->read_keys);
free(batch);
}
@@ -51,7 +87,11 @@ int riscv_batch_run(struct riscv_batch *batch)
riscv_batch_add_nop(batch);
for (size_t i = 0; i < batch->used_scans; ++i) {
- jtag_add_dr_scan(batch->target->tap, 1, batch->fields + i, TAP_IDLE);
+ if (bscan_tunnel_ir_width != 0)
+ riscv_add_bscan_tunneled_scan(batch->target, batch->fields+i, batch->bscan_ctxt+i);
+ else
+ jtag_add_dr_scan(batch->target->tap, 1, batch->fields + i, TAP_IDLE);
+
if (batch->idle_count > 0)
jtag_add_runtest(batch->idle_count, TAP_IDLE);
}
@@ -61,6 +101,12 @@ int riscv_batch_run(struct riscv_batch *batch)
return ERROR_FAIL;
}
+ if (bscan_tunnel_ir_width != 0) {
+ /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
+ for (size_t i = 0; i < batch->used_scans; ++i)
+ buffer_shr((batch->fields + i)->in_value, DMI_SCAN_BUF_SIZE, 1);
+ }
+
for (size_t i = 0; i < batch->used_scans; ++i)
dump_field(batch->idle_count, batch->fields + i);
@@ -72,8 +118,8 @@ void riscv_batch_add_dmi_write(struct riscv_batch *batch, unsigned address, uint
assert(batch->used_scans < batch->allocated_scans);
struct scan_field *field = batch->fields + batch->used_scans;
field->num_bits = riscv_dmi_write_u64_bits(batch->target);
- field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t));
- field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t));
+ field->out_value = (void *)(batch->data_out + batch->used_scans * DMI_SCAN_BUF_SIZE);
+ field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE);
riscv_fill_dmi_write_u64(batch->target, (char *)field->out_value, address, data);
riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value);
batch->last_scan = RISCV_SCAN_TYPE_WRITE;
@@ -85,35 +131,35 @@ size_t riscv_batch_add_dmi_read(struct riscv_batch *batch, unsigned address)
assert(batch->used_scans < batch->allocated_scans);
struct scan_field *field = batch->fields + batch->used_scans;
field->num_bits = riscv_dmi_write_u64_bits(batch->target);
- field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t));
- field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t));
+ field->out_value = (void *)(batch->data_out + batch->used_scans * DMI_SCAN_BUF_SIZE);
+ field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE);
riscv_fill_dmi_read_u64(batch->target, (char *)field->out_value, address);
riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value);
batch->last_scan = RISCV_SCAN_TYPE_READ;
batch->used_scans++;
- /* FIXME We get the read response back on the next scan. For now I'm
- * just sticking a NOP in there, but this should be coalesced away. */
- riscv_batch_add_nop(batch);
-
- batch->read_keys[batch->read_keys_used] = batch->used_scans - 1;
+ batch->read_keys[batch->read_keys_used] = batch->used_scans;
return batch->read_keys_used++;
}
-uint64_t riscv_batch_get_dmi_read(struct riscv_batch *batch, size_t key)
+unsigned riscv_batch_get_dmi_read_op(struct riscv_batch *batch, size_t key)
{
assert(key < batch->read_keys_used);
size_t index = batch->read_keys[key];
assert(index <= batch->used_scans);
- uint8_t *base = batch->data_in + 8 * index;
- return base[0] |
- ((uint64_t) base[1]) << 8 |
- ((uint64_t) base[2]) << 16 |
- ((uint64_t) base[3]) << 24 |
- ((uint64_t) base[4]) << 32 |
- ((uint64_t) base[5]) << 40 |
- ((uint64_t) base[6]) << 48 |
- ((uint64_t) base[7]) << 56;
+ uint8_t *base = batch->data_in + DMI_SCAN_BUF_SIZE * index;
+ /* extract "op" field from the DMI read result */
+ return (unsigned)buf_get_u32(base, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
+}
+
+uint32_t riscv_batch_get_dmi_read_data(struct riscv_batch *batch, size_t key)
+{
+ assert(key < batch->read_keys_used);
+ size_t index = batch->read_keys[key];
+ assert(index <= batch->used_scans);
+ uint8_t *base = batch->data_in + DMI_SCAN_BUF_SIZE * index;
+ /* extract "data" field from the DMI read result */
+ return buf_get_u32(base, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
}
void riscv_batch_add_nop(struct riscv_batch *batch)
@@ -121,8 +167,8 @@ void riscv_batch_add_nop(struct riscv_batch *batch)
assert(batch->used_scans < batch->allocated_scans);
struct scan_field *field = batch->fields + batch->used_scans;
field->num_bits = riscv_dmi_write_u64_bits(batch->target);
- field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t));
- field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t));
+ field->out_value = (void *)(batch->data_out + batch->used_scans * DMI_SCAN_BUF_SIZE);
+ field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE);
riscv_fill_dmi_nop_u64(batch->target, (char *)field->out_value);
riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value);
batch->last_scan = RISCV_SCAN_TYPE_NOP;
@@ -151,13 +197,17 @@ void dump_field(int idle, const struct scan_field *field)
log_printf_lf(LOG_LVL_DEBUG,
__FILE__, __LINE__, __PRETTY_FUNCTION__,
- "%db %di %s %08x @%02x -> %s %08x @%02x",
- field->num_bits, idle,
- op_string[out_op], out_data, out_address,
- status_string[in_op], in_data, in_address);
+ "%db %s %08x @%02x -> %s %08x @%02x; %di",
+ field->num_bits, op_string[out_op], out_data, out_address,
+ status_string[in_op], in_data, in_address, idle);
} else {
log_printf_lf(LOG_LVL_DEBUG,
- __FILE__, __LINE__, __PRETTY_FUNCTION__, "%db %di %s %08x @%02x -> ?",
- field->num_bits, idle, op_string[out_op], out_data, out_address);
+ __FILE__, __LINE__, __PRETTY_FUNCTION__, "%db %s %08x @%02x -> ?; %di",
+ field->num_bits, op_string[out_op], out_data, out_address, idle);
}
}
+
+size_t riscv_batch_available_scans(struct riscv_batch *batch)
+{
+ return batch->allocated_scans - batch->used_scans - 4;
+}
diff --git a/src/target/riscv/batch.h b/src/target/riscv/batch.h
index 70690a6..9c42ba8 100644
--- a/src/target/riscv/batch.h
+++ b/src/target/riscv/batch.h
@@ -1,8 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifndef TARGET__RISCV__SCANS_H
#define TARGET__RISCV__SCANS_H
#include "target/target.h"
#include "jtag/jtag.h"
+#include "riscv.h"
enum riscv_scan_type {
RISCV_SCAN_TYPE_INVALID,
@@ -27,6 +30,11 @@ struct riscv_batch {
uint8_t *data_in;
struct scan_field *fields;
+ /* If in BSCAN mode, this field will be allocated (one per scan),
+ and utilized to tunnel all the scans in the batch. If not in
+ BSCAN mode, this field is unallocated and stays NULL */
+ riscv_bscan_tunneled_scan_context_t *bscan_ctxt;
+
/* In JTAG we scan out the previous value's output when performing a
* scan. This is a pain for users, so we just provide them the
* illusion of not having to do this by eliding all but the last NOP.
@@ -54,11 +62,16 @@ int riscv_batch_run(struct riscv_batch *batch);
void riscv_batch_add_dmi_write(struct riscv_batch *batch, unsigned address, uint64_t data);
/* DMI reads must be handled in two parts: the first one schedules a read and
- * provides a key, the second one actually obtains the value of that read .*/
+ * provides a key, the second one actually obtains the result of the read -
+ * status (op) and the actual data. */
size_t riscv_batch_add_dmi_read(struct riscv_batch *batch, unsigned address);
-uint64_t riscv_batch_get_dmi_read(struct riscv_batch *batch, size_t key);
+unsigned riscv_batch_get_dmi_read_op(struct riscv_batch *batch, size_t key);
+uint32_t riscv_batch_get_dmi_read_data(struct riscv_batch *batch, size_t key);
/* Scans in a NOP. */
void riscv_batch_add_nop(struct riscv_batch *batch);
+/* Returns the number of available scans. */
+size_t riscv_batch_available_scans(struct riscv_batch *batch);
+
#endif
diff --git a/src/target/riscv/debug_defines.h b/src/target/riscv/debug_defines.h
index d6ddd4f..cb518a8 100644
--- a/src/target/riscv/debug_defines.h
+++ b/src/target/riscv/debug_defines.h
@@ -1,22 +1,27 @@
+/*
+ * This file is auto-generated by running 'make debug_defines.h' in
+ * https://github.com/riscv/riscv-debug-spec/ (30b1a97)
+ */
+
#define DTM_IDCODE 0x01
/*
-* Identifies the release version of this part.
+ * Identifies the release version of this part.
*/
#define DTM_IDCODE_VERSION_OFFSET 28
#define DTM_IDCODE_VERSION_LENGTH 4
#define DTM_IDCODE_VERSION (0xfU << DTM_IDCODE_VERSION_OFFSET)
/*
-* Identifies the designer's part number of this part.
+ * Identifies the designer's part number of this part.
*/
#define DTM_IDCODE_PARTNUMBER_OFFSET 12
#define DTM_IDCODE_PARTNUMBER_LENGTH 16
#define DTM_IDCODE_PARTNUMBER (0xffffU << DTM_IDCODE_PARTNUMBER_OFFSET)
/*
-* Identifies the designer/manufacturer of this part. Bits 6:0 must be
-* bits 6:0 of the designer/manufacturer's Identification Code as
-* assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16
-* count of the number of continuation characters (0x7f) in that same
-* Identification Code.
+ * Identifies the designer/manufacturer of this part. Bits 6:0 must be
+ * bits 6:0 of the designer/manufacturer's Identification Code as
+ * assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16
+ * count of the number of continuation characters (0x7f) in that same
+ * Identification Code.
*/
#define DTM_IDCODE_MANUFID_OFFSET 1
#define DTM_IDCODE_MANUFID_LENGTH 11
@@ -26,1389 +31,1892 @@
#define DTM_IDCODE_1 (0x1U << DTM_IDCODE_1_OFFSET)
#define DTM_DTMCS 0x10
/*
-* Writing 1 to this bit does a hard reset of the DTM,
-* causing the DTM to forget about any outstanding DMI transactions.
-* In general this should only be used when the Debugger has
-* reason to expect that the outstanding DMI transaction will never
-* complete (e.g. a reset condition caused an inflight DMI transaction to
-* be cancelled).
+ * Writing 1 to this bit does a hard reset of the DTM,
+ * causing the DTM to forget about any outstanding DMI transactions, and
+ * returning all registers and internal state to their reset value.
+ * In general this should only be used when the Debugger has
+ * reason to expect that the outstanding DMI transaction will never
+ * complete (e.g. a reset condition caused an inflight DMI transaction to
+ * be cancelled).
*/
#define DTM_DTMCS_DMIHARDRESET_OFFSET 17
#define DTM_DTMCS_DMIHARDRESET_LENGTH 1
#define DTM_DTMCS_DMIHARDRESET (0x1U << DTM_DTMCS_DMIHARDRESET_OFFSET)
/*
-* Writing 1 to this bit clears the sticky error state
-* and allows the DTM to retry or complete the previous
-* transaction.
+ * Writing 1 to this bit clears the sticky error state, but does
+ * not affect outstanding DMI transactions.
*/
#define DTM_DTMCS_DMIRESET_OFFSET 16
#define DTM_DTMCS_DMIRESET_LENGTH 1
#define DTM_DTMCS_DMIRESET (0x1U << DTM_DTMCS_DMIRESET_OFFSET)
/*
-* This is a hint to the debugger of the minimum number of
-* cycles a debugger should spend in
-* Run-Test/Idle after every DMI scan to avoid a `busy'
-* return code (\Fdmistat of 3). A debugger must still
-* check \Fdmistat when necessary.
-*
-* 0: It is not necessary to enter Run-Test/Idle at all.
-*
-* 1: Enter Run-Test/Idle and leave it immediately.
-*
-* 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving.
-*
-* And so on.
+ * This is a hint to the debugger of the minimum number of
+ * cycles a debugger should spend in
+ * Run-Test/Idle after every DMI scan to avoid a `busy'
+ * return code (\FdtmDtmcsDmistat of 3). A debugger must still
+ * check \FdtmDtmcsDmistat when necessary.
+ *
+ * 0: It is not necessary to enter Run-Test/Idle at all.
+ *
+ * 1: Enter Run-Test/Idle and leave it immediately.
+ *
+ * 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving.
+ *
+ * And so on.
*/
#define DTM_DTMCS_IDLE_OFFSET 12
#define DTM_DTMCS_IDLE_LENGTH 3
#define DTM_DTMCS_IDLE (0x7U << DTM_DTMCS_IDLE_OFFSET)
/*
-* 0: No error.
-*
-* 1: Reserved. Interpret the same as 2.
-*
-* 2: An operation failed (resulted in \Fop of 2).
-*
-* 3: An operation was attempted while a DMI access was still in
-* progress (resulted in \Fop of 3).
+ * 0: No error.
+ *
+ * 1: Reserved. Interpret the same as 2.
+ *
+ * 2: An operation failed (resulted in \FdtmDmiOp of 2).
+ *
+ * 3: An operation was attempted while a DMI access was still in
+ * progress (resulted in \FdtmDmiOp of 3).
*/
#define DTM_DTMCS_DMISTAT_OFFSET 10
#define DTM_DTMCS_DMISTAT_LENGTH 2
#define DTM_DTMCS_DMISTAT (0x3U << DTM_DTMCS_DMISTAT_OFFSET)
/*
-* The size of \Faddress in \Rdmi.
+ * The size of \FdmSbaddressZeroAddress in \RdtmDmi.
*/
#define DTM_DTMCS_ABITS_OFFSET 4
#define DTM_DTMCS_ABITS_LENGTH 6
#define DTM_DTMCS_ABITS (0x3fU << DTM_DTMCS_ABITS_OFFSET)
/*
-* 0: Version described in spec version 0.11.
-*
-* 1: Version described in spec version 0.13 (and later?), which
-* reduces the DMI data width to 32 bits.
-*
-* 15: Version not described in any available version of this spec.
+ * 0: Version described in spec version 0.11.
+ *
+ * 1: Version described in spec version 0.13.
+ *
+ * 15: Version not described in any available version of this spec.
*/
#define DTM_DTMCS_VERSION_OFFSET 0
#define DTM_DTMCS_VERSION_LENGTH 4
#define DTM_DTMCS_VERSION (0xfU << DTM_DTMCS_VERSION_OFFSET)
#define DTM_DMI 0x11
/*
-* Address used for DMI access. In Update-DR this value is used
-* to access the DM over the DMI.
+ * Address used for DMI access. In Update-DR this value is used
+ * to access the DM over the DMI.
*/
#define DTM_DMI_ADDRESS_OFFSET 34
#define DTM_DMI_ADDRESS_LENGTH abits
-#define DTM_DMI_ADDRESS (((1L<<abits)-1) << DTM_DMI_ADDRESS_OFFSET)
+#define DTM_DMI_ADDRESS (((1L << abits) - 1) << DTM_DMI_ADDRESS_OFFSET)
/*
-* The data to send to the DM over the DMI during Update-DR, and
-* the data returned from the DM as a result of the previous operation.
+ * The data to send to the DM over the DMI during Update-DR, and
+ * the data returned from the DM as a result of the previous operation.
*/
#define DTM_DMI_DATA_OFFSET 2
#define DTM_DMI_DATA_LENGTH 32
#define DTM_DMI_DATA (0xffffffffULL << DTM_DMI_DATA_OFFSET)
/*
-* When the debugger writes this field, it has the following meaning:
-*
-* 0: Ignore \Fdata and \Faddress. (nop)
-*
-* Don't send anything over the DMI during Update-DR.
-* This operation should never result in a busy or error response.
-* The address and data reported in the following Capture-DR
-* are undefined.
-*
-* 1: Read from \Faddress. (read)
-*
-* 2: Write \Fdata to \Faddress. (write)
-*
-* 3: Reserved.
-*
-* When the debugger reads this field, it means the following:
-*
-* 0: The previous operation completed successfully.
-*
-* 1: Reserved.
-*
-* 2: A previous operation failed. The data scanned into \Rdmi in
-* this access will be ignored. This status is sticky and can be
-* cleared by writing \Fdmireset in \Rdtmcs.
-*
-* This indicates that the DM itself responded with an error.
-* Note: there are no specified cases in which the DM would
-* respond with an error, and DMI is not required to support
-* returning errors.
-*
-* 3: An operation was attempted while a DMI request is still in
-* progress. The data scanned into \Rdmi in this access will be
-* ignored. This status is sticky and can be cleared by writing
-* \Fdmireset in \Rdtmcs. If a debugger sees this status, it
-* needs to give the target more TCK edges between Update-DR and
-* Capture-DR. The simplest way to do that is to add extra transitions
-* in Run-Test/Idle.
-*
-* (The DTM, DM, and/or component may be in different clock domains,
-* so synchronization may be required. Some relatively fixed number of
-* TCK ticks may be needed for the request to reach the DM, complete,
-* and for the response to be synchronized back into the TCK domain.)
+ * When the debugger writes this field, it has the following meaning:
+ *
+ * 0: Ignore \FdmSbdataZeroData and \FdmSbaddressZeroAddress. (nop)
+ *
+ * Don't send anything over the DMI during Update-DR.
+ * This operation should never result in a busy or error response.
+ * The address and data reported in the following Capture-DR
+ * are undefined.
+ *
+ * 1: Read from \FdmSbaddressZeroAddress. (read)
+ *
+ * 2: Write \FdmSbdataZeroData to \FdmSbaddressZeroAddress. (write)
+ *
+ * 3: Reserved.
+ *
+ * When the debugger reads this field, it means the following:
+ *
+ * 0: The previous operation completed successfully.
+ *
+ * 1: Reserved.
+ *
+ * 2: A previous operation failed. The data scanned into \RdtmDmi in
+ * this access will be ignored. This status is sticky and can be
+ * cleared by writing \FdtmDtmcsDmireset in \RdtmDtmcs.
+ *
+ * This indicates that the DM itself responded with an error.
+ * There are no specified cases in which the DM would
+ * respond with an error, and DMI is not required to support
+ * returning errors.
+ *
+ * 3: An operation was attempted while a DMI request is still in
+ * progress. The data scanned into \RdtmDmi in this access will be
+ * ignored. This status is sticky and can be cleared by writing
+ * \FdtmDtmcsDmireset in \RdtmDtmcs. If a debugger sees this status, it
+ * needs to give the target more TCK edges between Update-DR and
+ * Capture-DR. The simplest way to do that is to add extra transitions
+ * in Run-Test/Idle.
*/
#define DTM_DMI_OP_OFFSET 0
#define DTM_DMI_OP_LENGTH 2
#define DTM_DMI_OP (0x3ULL << DTM_DMI_OP_OFFSET)
#define CSR_DCSR 0x7b0
/*
-* 0: There is no external debug support.
-*
-* 4: External debug support exists as it is described in this document.
-*
-* 15: There is external debug support, but it does not conform to any
-* available version of this spec.
+ * 0: There is no external debug support.
+ *
+ * 4: External debug support exists as it is described in this document.
+ *
+ * 15: There is external debug support, but it does not conform to any
+ * available version of this spec.
*/
#define CSR_DCSR_XDEBUGVER_OFFSET 28
#define CSR_DCSR_XDEBUGVER_LENGTH 4
#define CSR_DCSR_XDEBUGVER (0xfU << CSR_DCSR_XDEBUGVER_OFFSET)
/*
-* When 1, {\tt ebreak} instructions in Machine Mode enter Debug Mode.
+ * 0: {\tt ebreak} instructions in M-mode behave as described in the
+ * Privileged Spec.
+ *
+ * 1: {\tt ebreak} instructions in M-mode enter Debug Mode.
*/
#define CSR_DCSR_EBREAKM_OFFSET 15
#define CSR_DCSR_EBREAKM_LENGTH 1
#define CSR_DCSR_EBREAKM (0x1U << CSR_DCSR_EBREAKM_OFFSET)
/*
-* When 1, {\tt ebreak} instructions in Supervisor Mode enter Debug Mode.
+ * 0: {\tt ebreak} instructions in S-mode behave as described in the
+ * Privileged Spec.
+ *
+ * 1: {\tt ebreak} instructions in S-mode enter Debug Mode.
+ *
+ * This bit is hardwired to 0 if the hart does not support S mode.
*/
#define CSR_DCSR_EBREAKS_OFFSET 13
#define CSR_DCSR_EBREAKS_LENGTH 1
#define CSR_DCSR_EBREAKS (0x1U << CSR_DCSR_EBREAKS_OFFSET)
/*
-* When 1, {\tt ebreak} instructions in User/Application Mode enter
-* Debug Mode.
+ * 0: {\tt ebreak} instructions in U-mode behave as described in the
+ * Privileged Spec.
+ *
+ * 1: {\tt ebreak} instructions in U-mode enter Debug Mode.
+ *
+ * This bit is hardwired to 0 if the hart does not support U mode.
*/
#define CSR_DCSR_EBREAKU_OFFSET 12
#define CSR_DCSR_EBREAKU_LENGTH 1
#define CSR_DCSR_EBREAKU (0x1U << CSR_DCSR_EBREAKU_OFFSET)
/*
-* 0: Interrupts are disabled during single stepping.
-*
-* 1: Interrupts are enabled during single stepping.
-*
-* Implementations may hard wire this bit to 0.
-* The debugger must read back the value it
-* writes to check whether the feature is supported. If not
-* supported, interrupt behavior can be emulated by the debugger.
+ * 0: Interrupts (including NMI) are disabled during single stepping.
+ *
+ * 1: Interrupts (including NMI) are enabled during single stepping.
+ *
+ * Implementations may hard wire this bit to 0.
+ * In that case interrupt behavior can be emulated by the debugger.
+ *
+ * The debugger must not change the value of this bit while the hart
+ * is running.
*/
#define CSR_DCSR_STEPIE_OFFSET 11
#define CSR_DCSR_STEPIE_LENGTH 1
#define CSR_DCSR_STEPIE (0x1U << CSR_DCSR_STEPIE_OFFSET)
/*
-* 0: Increment counters as usual.
-*
-* 1: Don't increment any counters while in Debug Mode or on {\tt
-* ebreak} instructions that cause entry into Debug Mode. These
-* counters include the {\tt cycle} and {\tt instret} CSRs. This is
-* preferred for most debugging scenarios.
-*
-* An implementation may choose not to support writing to this bit.
-* The debugger must read back the value it writes to check whether
-* the feature is supported.
+ * 0: Increment counters as usual.
+ *
+ * 1: Don't increment any hart-local counters while in Debug Mode or
+ * on {\tt ebreak} instructions that cause entry into Debug Mode.
+ * These counters include the {\tt instret} CSR. On single-hart cores
+ * {\tt cycle} should be stopped, but on multi-hart cores it must keep
+ * incrementing.
+ *
+ * An implementation may hardwire this bit to 0 or 1.
*/
#define CSR_DCSR_STOPCOUNT_OFFSET 10
#define CSR_DCSR_STOPCOUNT_LENGTH 1
#define CSR_DCSR_STOPCOUNT (0x1U << CSR_DCSR_STOPCOUNT_OFFSET)
/*
-* 0: Increment timers as usual.
-*
-* 1: Don't increment any hart-local timers while in Debug Mode.
-*
-* An implementation may choose not to support writing to this bit.
-* The debugger must read back the value it writes to check whether
-* the feature is supported.
+ * 0: Increment timers as usual.
+ *
+ * 1: Don't increment any hart-local timers while in Debug Mode.
+ *
+ * An implementation may hardwire this bit to 0 or 1.
*/
#define CSR_DCSR_STOPTIME_OFFSET 9
#define CSR_DCSR_STOPTIME_LENGTH 1
#define CSR_DCSR_STOPTIME (0x1U << CSR_DCSR_STOPTIME_OFFSET)
/*
-* Explains why Debug Mode was entered.
-*
-* When there are multiple reasons to enter Debug Mode in a single
-* cycle, hardware should set \Fcause to the cause with the highest
-* priority.
-*
-* 1: An {\tt ebreak} instruction was executed. (priority 3)
-*
-* 2: The Trigger Module caused a breakpoint exception. (priority 4)
-*
-* 3: The debugger requested entry to Debug Mode. (priority 2)
-*
-* 4: The hart single stepped because \Fstep was set. (priority 1)
-*
-* Other values are reserved for future use.
+ * Explains why Debug Mode was entered.
+ *
+ * When there are multiple reasons to enter Debug Mode in a single
+ * cycle, hardware should set \FcsrDcsrCause to the cause with the highest
+ * priority.
+ *
+ * 1: An {\tt ebreak} instruction was executed. (priority 3)
+ *
+ * 2: The Trigger Module caused a breakpoint exception. (priority 4)
+ *
+ * 3: The debugger requested entry to Debug Mode using \FdmDmcontrolHaltreq.
+ * (priority 1)
+ *
+ * 4: The hart single stepped because \FcsrDcsrStep was set. (priority 0, lowest)
+ *
+ * 5: The hart halted directly out of reset due to \Fresethaltreq. It
+ * is also acceptable to report 3 when this happens. (priority 2)
+ *
+ * 6: The hart halted because it's part of a halt group. (priority 5,
+ * highest) Harts may report 3 for this cause instead.
+ *
+ * Other values are reserved for future use.
*/
#define CSR_DCSR_CAUSE_OFFSET 6
#define CSR_DCSR_CAUSE_LENGTH 3
#define CSR_DCSR_CAUSE (0x7U << CSR_DCSR_CAUSE_OFFSET)
/*
-* When 1, \Fmprv in \Rmstatus takes effect during debug mode.
-* When 0, it is ignored during debug mode.
-* Implementing this bit is optional.
-* If not implemented it should be tied to 0.
+ * 0: \FcsrMcontrolMprv in \Rmstatus is ignored in Debug Mode.
+ *
+ * 1: \FcsrMcontrolMprv in \Rmstatus takes effect in Debug Mode.
+ *
+ * Implementing this bit is optional. It may be tied to either 0 or 1.
*/
#define CSR_DCSR_MPRVEN_OFFSET 4
#define CSR_DCSR_MPRVEN_LENGTH 1
#define CSR_DCSR_MPRVEN (0x1U << CSR_DCSR_MPRVEN_OFFSET)
/*
-* When set, there is a Non-Maskable-Interrupt (NMI) pending for the hart.
-*
-* Since an NMI can indicate a hardware error condition,
-* reliable debugging may no longer be possible once this bit becomes set.
-* This is implementation-dependent.
+ * When set, there is a Non-Maskable-Interrupt (NMI) pending for the hart.
+ *
+ * Since an NMI can indicate a hardware error condition,
+ * reliable debugging may no longer be possible once this bit becomes set.
+ * This is implementation-dependent.
*/
#define CSR_DCSR_NMIP_OFFSET 3
#define CSR_DCSR_NMIP_LENGTH 1
#define CSR_DCSR_NMIP (0x1U << CSR_DCSR_NMIP_OFFSET)
/*
-* When set and not in Debug Mode, the hart will only execute a single
-* instruction and then enter Debug Mode.
-* If the instruction does not complete due to an exception,
-* the hart will immediately enter Debug Mode before executing
-* the trap handler, with appropriate exception registers set.
+ * When set and not in Debug Mode, the hart will only execute a single
+ * instruction and then enter Debug Mode. See Section~\ref{stepBit}
+ * for details.
+ *
+ * The debugger must not change the value of this bit while the hart
+ * is running.
*/
#define CSR_DCSR_STEP_OFFSET 2
#define CSR_DCSR_STEP_LENGTH 1
#define CSR_DCSR_STEP (0x1U << CSR_DCSR_STEP_OFFSET)
/*
-* Contains the privilege level the hart was operating in when Debug
-* Mode was entered. The encoding is described in Table
-* \ref{tab:privlevel}. A debugger can change this value to change
-* the hart's privilege level when exiting Debug Mode.
-*
-* Not all privilege levels are supported on all harts. If the
-* encoding written is not supported or the debugger is not allowed to
-* change to it, the hart may change to any supported privilege level.
+ * Contains the privilege level the hart was operating in when Debug
+ * Mode was entered. The encoding is described in Table
+ * \ref{tab:privlevel}. A debugger can change this value to change
+ * the hart's privilege level when exiting Debug Mode.
+ *
+ * Not all privilege levels are supported on all harts. If the
+ * encoding written is not supported or the debugger is not allowed to
+ * change to it, the hart may change to any supported privilege level.
*/
#define CSR_DCSR_PRV_OFFSET 0
#define CSR_DCSR_PRV_LENGTH 2
#define CSR_DCSR_PRV (0x3U << CSR_DCSR_PRV_OFFSET)
#define CSR_DPC 0x7b1
#define CSR_DPC_DPC_OFFSET 0
-#define CSR_DPC_DPC_LENGTH MXLEN
-#define CSR_DPC_DPC (((1L<<MXLEN)-1) << CSR_DPC_DPC_OFFSET)
+#define CSR_DPC_DPC_LENGTH DXLEN
+#define CSR_DPC_DPC (((1L << DXLEN) - 1) << CSR_DPC_DPC_OFFSET)
#define CSR_DSCRATCH0 0x7b2
#define CSR_DSCRATCH1 0x7b3
#define CSR_TSELECT 0x7a0
#define CSR_TSELECT_INDEX_OFFSET 0
-#define CSR_TSELECT_INDEX_LENGTH MXLEN
-#define CSR_TSELECT_INDEX (((1L<<MXLEN)-1) << CSR_TSELECT_INDEX_OFFSET)
+#define CSR_TSELECT_INDEX_LENGTH XLEN
+#define CSR_TSELECT_INDEX (((1L << XLEN) - 1) << CSR_TSELECT_INDEX_OFFSET)
#define CSR_TDATA1 0x7a1
/*
-* 0: There is no trigger at this \Rtselect.
-*
-* 1: The trigger is a legacy SiFive address match trigger. These
-* should not be implemented and aren't further documented here.
-*
-* 2: The trigger is an address/data match trigger. The remaining bits
-* in this register act as described in \Rmcontrol.
-*
-* 3: The trigger is an instruction count trigger. The remaining bits
-* in this register act as described in \Ricount.
-*
-* 4: The trigger is an interrupt trigger. The remaining bits
-* in this register act as described in \Ritrigger.
-*
-* 5: The trigger is an exception trigger. The remaining bits
-* in this register act as described in \Retrigger.
-*
-* 15: This trigger exists (so enumeration shouldn't terminate), but
-* is not currently available.
-*
-* Other values are reserved for future use.
-*
-* When this field is written to an unsupported value, it takes on its
-* reset value instead. The reset value is any one of the types
-* supported by the trigger selected by \Rtselect.
- */
-#define CSR_TDATA1_TYPE_OFFSET (MXLEN-4)
+ * 0: There is no trigger at this \RcsrTselect.
+ *
+ * 1: The trigger is a legacy SiFive address match trigger. These
+ * should not be implemented and aren't further documented here.
+ *
+ * 2: The trigger is an address/data match trigger. The remaining bits
+ * in this register act as described in \RcsrMcontrol.
+ *
+ * 3: The trigger is an instruction count trigger. The remaining bits
+ * in this register act as described in \RcsrIcount.
+ *
+ * 4: The trigger is an interrupt trigger. The remaining bits
+ * in this register act as described in \RcsrItrigger.
+ *
+ * 5: The trigger is an exception trigger. The remaining bits
+ * in this register act as described in \RcsrEtrigger.
+ *
+ * 12--14: These trigger types are available for non-standard use.
+ *
+ * 15: This trigger exists (so enumeration shouldn't terminate), but
+ * is not currently available.
+ *
+ * Other values are reserved for future use.
+ */
+#define CSR_TDATA1_TYPE_OFFSET (XLEN-4)
#define CSR_TDATA1_TYPE_LENGTH 4
#define CSR_TDATA1_TYPE (0xfULL << CSR_TDATA1_TYPE_OFFSET)
/*
-* 0: Both Debug and M Mode can write the {\tt tdata} registers at the
-* selected \Rtselect.
-*
-* 1: Only Debug Mode can write the {\tt tdata} registers at the
-* selected \Rtselect. Writes from other modes are ignored.
-*
-* This bit is only writable from Debug Mode.
- */
-#define CSR_TDATA1_DMODE_OFFSET (MXLEN-5)
+ * If \FcsrTdataOneType is 0, then this bit is hard-wired to 0.
+ *
+ * 0: Both Debug and M-mode can write the {\tt tdata} registers at the
+ * selected \RcsrTselect.
+ *
+ * 1: Only Debug Mode can write the {\tt tdata} registers at the
+ * selected \RcsrTselect. Writes from other modes are ignored.
+ *
+ * This bit is only writable from Debug Mode.
+ * When clearing this bit, the debugger should also clear the action field
+ * (whose location depends on \FcsrTdataOneType).
+ */
+#define CSR_TDATA1_DMODE_OFFSET (XLEN-5)
#define CSR_TDATA1_DMODE_LENGTH 1
#define CSR_TDATA1_DMODE (0x1ULL << CSR_TDATA1_DMODE_OFFSET)
/*
-* Trigger-specific data.
+ * If \FcsrTdataOneType is 0, then this field is hard-wired to 0.
+ *
+ * Trigger-specific data.
*/
#define CSR_TDATA1_DATA_OFFSET 0
-#define CSR_TDATA1_DATA_LENGTH (MXLEN - 5)
-#define CSR_TDATA1_DATA (((1L<<MXLEN - 5)-1) << CSR_TDATA1_DATA_OFFSET)
+#define CSR_TDATA1_DATA_LENGTH (XLEN - 5)
+#define CSR_TDATA1_DATA (((1L << XLEN - 5) - 1) << CSR_TDATA1_DATA_OFFSET)
#define CSR_TDATA2 0x7a2
#define CSR_TDATA2_DATA_OFFSET 0
-#define CSR_TDATA2_DATA_LENGTH MXLEN
-#define CSR_TDATA2_DATA (((1L<<MXLEN)-1) << CSR_TDATA2_DATA_OFFSET)
+#define CSR_TDATA2_DATA_LENGTH XLEN
+#define CSR_TDATA2_DATA (((1L << XLEN) - 1) << CSR_TDATA2_DATA_OFFSET)
#define CSR_TDATA3 0x7a3
#define CSR_TDATA3_DATA_OFFSET 0
-#define CSR_TDATA3_DATA_LENGTH MXLEN
-#define CSR_TDATA3_DATA (((1L<<MXLEN)-1) << CSR_TDATA3_DATA_OFFSET)
+#define CSR_TDATA3_DATA_LENGTH XLEN
+#define CSR_TDATA3_DATA (((1L << XLEN) - 1) << CSR_TDATA3_DATA_OFFSET)
#define CSR_TINFO 0x7a4
/*
-* One bit for each possible \Ftype enumerated in \Rtdataone. Bit N
-* corresponds to type N. If the bit is set, then that type is
-* supported by the currently selected trigger.
-*
-* If the currently selected trigger doesn't exist, this field
-* contains 1.
-*
-* If \Ftype is not writable, this register may be unimplemented, in
-* which case reading it causes an illegal instruction exception. In
-* this case the debugger can read the only supported type from
-* \Rtdataone.
+ * One bit for each possible \FcsrTdataOneType enumerated in \RcsrTdataOne. Bit N
+ * corresponds to type N. If the bit is set, then that type is
+ * supported by the currently selected trigger.
+ *
+ * If the currently selected trigger doesn't exist, this field
+ * contains 1.
*/
#define CSR_TINFO_INFO_OFFSET 0
#define CSR_TINFO_INFO_LENGTH 16
#define CSR_TINFO_INFO (0xffffULL << CSR_TINFO_INFO_OFFSET)
+#define CSR_TCONTROL 0x7a5
+/*
+ * M-mode previous trigger enable field.
+ *
+ * When a trap into M-mode is taken, \FcsrTcontrolMpte is set to the value of
+ * \FcsrTcontrolMte.
+ */
+#define CSR_TCONTROL_MPTE_OFFSET 7
+#define CSR_TCONTROL_MPTE_LENGTH 1
+#define CSR_TCONTROL_MPTE (0x1ULL << CSR_TCONTROL_MPTE_OFFSET)
+/*
+ * M-mode trigger enable field.
+ *
+ * 0: Triggers with action=0 do not match/fire while the hart is in M-mode.
+ *
+ * 1: Triggers do match/fire while the hart is in M-mode.
+ *
+ * When a trap into M-mode is taken, \FcsrTcontrolMte is set to 0. When {\tt
+ * mret} is executed, \FcsrTcontrolMte is set to the value of \FcsrTcontrolMpte.
+ */
+#define CSR_TCONTROL_MTE_OFFSET 3
+#define CSR_TCONTROL_MTE_LENGTH 1
+#define CSR_TCONTROL_MTE (0x1ULL << CSR_TCONTROL_MTE_OFFSET)
+#define CSR_MCONTEXT 0x7a8
+/*
+ * Machine mode software can write a context number to this register,
+ * which can be used to set triggers that only fire in that specific
+ * context.
+ *
+ * An implementation may tie any number of upper bits in this field to
+ * 0. It's recommended to implement no more than 6 bits on RV32, and
+ * 13 on RV64.
+ */
+#define CSR_MCONTEXT_MCONTEXT_OFFSET 0
+#define CSR_MCONTEXT_MCONTEXT_LENGTH XLEN
+#define CSR_MCONTEXT_MCONTEXT (((1L << XLEN) - 1) << CSR_MCONTEXT_MCONTEXT_OFFSET)
+#define CSR_SCONTEXT 0x7aa
+/*
+ * Supervisor mode software can write a context number to this
+ * register, which can be used to set triggers that only fire in that
+ * specific context.
+ *
+ * An implementation may tie any number of high bits in this field to
+ * 0. It's recommended to implement no more than 16 bits on RV32, and
+ * 34 on RV64.
+ */
+#define CSR_SCONTEXT_DATA_OFFSET 0
+#define CSR_SCONTEXT_DATA_LENGTH XLEN
+#define CSR_SCONTEXT_DATA (((1L << XLEN) - 1) << CSR_SCONTEXT_DATA_OFFSET)
#define CSR_MCONTROL 0x7a1
-#define CSR_MCONTROL_TYPE_OFFSET (MXLEN-4)
+#define CSR_MCONTROL_TYPE_OFFSET (XLEN-4)
#define CSR_MCONTROL_TYPE_LENGTH 4
#define CSR_MCONTROL_TYPE (0xfULL << CSR_MCONTROL_TYPE_OFFSET)
-#define CSR_MCONTROL_DMODE_OFFSET (MXLEN-5)
+#define CSR_MCONTROL_DMODE_OFFSET (XLEN-5)
#define CSR_MCONTROL_DMODE_LENGTH 1
#define CSR_MCONTROL_DMODE (0x1ULL << CSR_MCONTROL_DMODE_OFFSET)
/*
-* Specifies the largest naturally aligned powers-of-two (NAPOT) range
-* supported by the hardware when \Fmatch is 1. The value is the
-* logarithm base 2 of the
-* number of bytes in that range. A value of 0 indicates that only
-* exact value matches are supported (one byte range). A value of 63
-* corresponds to the maximum NAPOT range, which is $2^{63}$ bytes in
-* size.
+ * Specifies the largest naturally aligned powers-of-two (NAPOT) range
+ * supported by the hardware when \FcsrMcontrolMatch is 1. The value is the
+ * logarithm base 2 of the
+ * number of bytes in that range. A value of 0 indicates that only
+ * exact value matches are supported (one byte range). A value of 63
+ * corresponds to the maximum NAPOT range, which is $2^{63}$ bytes in
+ * size.
*/
-#define CSR_MCONTROL_MASKMAX_OFFSET (MXLEN-11)
+#define CSR_MCONTROL_MASKMAX_OFFSET (XLEN-11)
#define CSR_MCONTROL_MASKMAX_LENGTH 6
#define CSR_MCONTROL_MASKMAX (0x3fULL << CSR_MCONTROL_MASKMAX_OFFSET)
/*
-* If this optional bit is implemented, the hardware sets it when this
-* trigger matches. The trigger's user can set or clear it at any
-* time. The trigger's user can use this bit to determine which
-* trigger(s) matched. If the bit is not implemented, it is always 0
-* and writing it has no effect.
+ * This field only exists when XLEN is at least 64.
+ * It contains the 2 high bits of the access size. The low bits
+ * come from \FcsrMcontrolSizelo. See \FcsrMcontrolSizelo for how this
+ * is used.
+ */
+#define CSR_MCONTROL_SIZEHI_OFFSET 21
+#define CSR_MCONTROL_SIZEHI_LENGTH 2
+#define CSR_MCONTROL_SIZEHI (0x3ULL << CSR_MCONTROL_SIZEHI_OFFSET)
+/*
+ * If this bit is implemented, the hardware sets it when this
+ * trigger matches. The trigger's user can set or clear it at any
+ * time. It is used to determine which
+ * trigger(s) matched. If the bit is not implemented, it is always 0
+ * and writing it has no effect.
*/
#define CSR_MCONTROL_HIT_OFFSET 20
#define CSR_MCONTROL_HIT_LENGTH 1
#define CSR_MCONTROL_HIT (0x1ULL << CSR_MCONTROL_HIT_OFFSET)
/*
-* 0: Perform a match on the virtual address.
-*
-* 1: Perform a match on the data value loaded/stored, or the
-* instruction executed.
+ * 0: Perform a match on the lowest virtual address of the access. In
+ * addition, it is recommended that the trigger also fires if any of
+ * the other accessed virtual addresses match.
+ * (E.g. on a 32-bit read from 0x4000, the lowest address is 0x4000
+ * and the other addresses are 0x4001, 0x4002, and 0x4003.)
+ *
+ * 1: Perform a match on the data value loaded or stored, or the
+ * instruction executed.
*/
#define CSR_MCONTROL_SELECT_OFFSET 19
#define CSR_MCONTROL_SELECT_LENGTH 1
#define CSR_MCONTROL_SELECT (0x1ULL << CSR_MCONTROL_SELECT_OFFSET)
/*
-* 0: The action for this trigger will be taken just before the
-* instruction that triggered it is executed, but after all preceding
-* instructions are are committed.
-*
-* 1: The action for this trigger will be taken after the instruction
-* that triggered it is executed. It should be taken before the next
-* instruction is executed, but it is better to implement triggers and
-* not implement that suggestion than to not implement them at all.
-*
-* Most hardware will only implement one timing or the other, possibly
-* dependent on \Fselect, \Fexecute, \Fload, and \Fstore. This bit
-* primarily exists for the hardware to communicate to the debugger
-* what will happen. Hardware may implement the bit fully writable, in
-* which case the debugger has a little more control.
-*
-* Data load triggers with \Ftiming of 0 will result in the same load
-* happening again when the debugger lets the hart run. For data load
-* triggers, debuggers must first attempt to set the breakpoint with
-* \Ftiming of 1.
-*
-* A chain of triggers that don't all have the same \Ftiming value
-* will never fire (unless consecutive instructions match the
-* appropriate triggers).
+ * 0: The action for this trigger will be taken just before the
+ * instruction that triggered it is executed, but after all preceding
+ * instructions are committed. \Rmepc or \RcsrDpc (depending on
+ * \FcsrMcontrolAction) must be set to the virtual address of the
+ * instruction that matched.
+ *
+ * If this is combined with \FcsrMcontrolLoad then a memory access will be
+ * performed (including any side effects of performing such an access) even
+ * though the load will not update its destination register. Debuggers
+ * should consider this when setting such breakpoints on, for example,
+ * memory-mapped I/O addresses.
+ *
+ * 1: The action for this trigger will be taken after the instruction
+ * that triggered it is executed. It should be taken before the next
+ * instruction is executed, but it is better to implement triggers imprecisely
+ * than to not implement them at all.
+ * \Rmepc or \RcsrDpc (depending on \FcsrMcontrolAction) must be set to
+ * the virtual address of the next instruction that must be executed to
+ * preserve the program flow.
+ *
+ * Most hardware will only implement one timing or the other, possibly
+ * dependent on \FcsrMcontrolSelect, \FcsrMcontrolExecute,
+ * \FcsrMcontrolLoad, and \FcsrMcontrolStore. This bit
+ * primarily exists for the hardware to communicate to the debugger
+ * what will happen. Hardware may implement the bit fully writable, in
+ * which case the debugger has a little more control.
+ *
+ * Data load triggers with \FcsrMcontrolTiming of 0 will result in the same load
+ * happening again when the debugger lets the hart run. For data load
+ * triggers, debuggers must first attempt to set the breakpoint with
+ * \FcsrMcontrolTiming of 1.
+ *
+ * If a trigger with \FcsrMcontrolTiming of 0 matches, it is
+ * implementation-dependent whether that prevents a trigger with
+ * \FcsrMcontrolTiming of 1 matching as well.
*/
#define CSR_MCONTROL_TIMING_OFFSET 18
#define CSR_MCONTROL_TIMING_LENGTH 1
#define CSR_MCONTROL_TIMING (0x1ULL << CSR_MCONTROL_TIMING_OFFSET)
/*
-* The action to take when the trigger fires. The values are explained
-* in Table~\ref{tab:action}.
+ * This field contains the 2 low bits of the access size. The high bits come
+ * from \FcsrMcontrolSizehi. The combined value is interpreted as follows:
+ *
+ * 0: The trigger will attempt to match against an access of any size.
+ * The behavior is only well-defined if $|select|=0$, or if the access
+ * size is XLEN.
+ *
+ * 1: The trigger will only match against 8-bit memory accesses.
+ *
+ * 2: The trigger will only match against 16-bit memory accesses or
+ * execution of 16-bit instructions.
+ *
+ * 3: The trigger will only match against 32-bit memory accesses or
+ * execution of 32-bit instructions.
+ *
+ * 4: The trigger will only match against execution of 48-bit instructions.
+ *
+ * 5: The trigger will only match against 64-bit memory accesses or
+ * execution of 64-bit instructions.
+ *
+ * 6: The trigger will only match against execution of 80-bit instructions.
+ *
+ * 7: The trigger will only match against execution of 96-bit instructions.
+ *
+ * 8: The trigger will only match against execution of 112-bit instructions.
+ *
+ * 9: The trigger will only match against 128-bit memory accesses or
+ * execution of 128-bit instructions.
+ *
+ * An implementation must support the value of 0, but all other values
+ * are optional. It is recommended to support triggers for every
+ * access size the hart supports, as well as for every instruction
+ * size the hart supports.
+ */
+#define CSR_MCONTROL_SIZELO_OFFSET 16
+#define CSR_MCONTROL_SIZELO_LENGTH 2
+#define CSR_MCONTROL_SIZELO (0x3ULL << CSR_MCONTROL_SIZELO_OFFSET)
+/*
+ * The action to take when the trigger fires. The values are explained
+ * in Table~\ref{tab:action}.
*/
#define CSR_MCONTROL_ACTION_OFFSET 12
-#define CSR_MCONTROL_ACTION_LENGTH 6
-#define CSR_MCONTROL_ACTION (0x3fULL << CSR_MCONTROL_ACTION_OFFSET)
-/*
-* 0: When this trigger matches, the configured action is taken.
-*
-* 1: While this trigger does not match, it prevents the trigger with
-* the next index from matching.
-*
-* Because \Fchain affects the next trigger, hardware must zero it in
-* writes to \Rmcontrol that set \Fdmode to 0 if the next trigger has
-* \Fdmode of 1.
-* In addition hardware should ignore writes to \Rmcontrol that set
-* \Fdmode to 1 if the previous trigger has both \Fdmode of 0 and
-* \Fchain of 1. Debuggers must avoid the latter case by checking
-* \Fchain on the previous trigger if they're writing \Rmcontrol.
-*
-* Implementations that wish to limit the maximum length of a trigger
-* chain (eg. to meet timing requirements) may do so by zeroing
-* \Fchain in writes to \Rmcontrol that would make the chain too long.
+#define CSR_MCONTROL_ACTION_LENGTH 4
+#define CSR_MCONTROL_ACTION (0xfULL << CSR_MCONTROL_ACTION_OFFSET)
+/*
+ * 0: When this trigger matches, the configured action is taken.
+ *
+ * 1: While this trigger does not match, it prevents the trigger with
+ * the next index from matching.
+ *
+ * A trigger chain starts on the first trigger with $|chain|=1$ after
+ * a trigger with $|chain|=0$, or simply on the first trigger if that
+ * has $|chain|=1$. It ends on the first trigger after that which has
+ * $|chain|=0$. This final trigger is part of the chain. The action
+ * on all but the final trigger is ignored. The action on that final
+ * trigger will be taken if and only if all the triggers in the chain
+ * match at the same time.
+ *
+ * Because \FcsrMcontrolChain affects the next trigger, hardware must zero it in
+ * writes to \RcsrMcontrol that set \FcsrTdataOneDmode to 0 if the next trigger has
+ * \FcsrTdataOneDmode of 1.
+ * In addition hardware should ignore writes to \RcsrMcontrol that set
+ * \FcsrTdataOneDmode to 1 if the previous trigger has both \FcsrTdataOneDmode of 0 and
+ * \FcsrMcontrolChain of 1. Debuggers must avoid the latter case by checking
+ * \FcsrMcontrolChain on the previous trigger if they're writing \RcsrMcontrol.
+ *
+ * Implementations that wish to limit the maximum length of a trigger
+ * chain (eg. to meet timing requirements) may do so by zeroing
+ * \FcsrMcontrolChain in writes to \RcsrMcontrol that would make the chain too long.
*/
#define CSR_MCONTROL_CHAIN_OFFSET 11
#define CSR_MCONTROL_CHAIN_LENGTH 1
#define CSR_MCONTROL_CHAIN (0x1ULL << CSR_MCONTROL_CHAIN_OFFSET)
/*
-* 0: Matches when the value equals \Rtdatatwo.
-*
-* 1: Matches when the top M bits of the value match the top M bits of
-* \Rtdatatwo. M is MXLEN-1 minus the index of the least-significant
-* bit containing 0 in \Rtdatatwo.
-*
-* 2: Matches when the value is greater than (unsigned) or equal to
-* \Rtdatatwo.
-*
-* 3: Matches when the value is less than (unsigned) \Rtdatatwo.
-*
-* 4: Matches when the lower half of the value equals the lower half
-* of \Rtdatatwo after the lower half of the value is ANDed with the
-* upper half of \Rtdatatwo.
-*
-* 5: Matches when the upper half of the value equals the lower half
-* of \Rtdatatwo after the upper half of the value is ANDed with the
-* upper half of \Rtdatatwo.
-*
-* Other values are reserved for future use.
+ * 0: Matches when the value equals \RcsrTdataTwo.
+ *
+ * 1: Matches when the top M bits of the value match the top M bits of
+ * \RcsrTdataTwo. M is XLEN-1 minus the index of the least-significant
+ * bit containing 0 in \RcsrTdataTwo. Debuggers should only write values
+ * to \RcsrTdataTwo such that M + \FcsrMcontrolMaskmax $\geq$ XLEN, otherwise it's
+ * undefined on what conditions the trigger will fire.
+ *
+ * 2: Matches when the value is greater than (unsigned) or equal to
+ * \RcsrTdataTwo.
+ *
+ * 3: Matches when the value is less than (unsigned) \RcsrTdataTwo.
+ *
+ * 4: Matches when the lower half of the value equals the lower half
+ * of \RcsrTdataTwo after the lower half of the value is ANDed with the
+ * upper half of \RcsrTdataTwo.
+ *
+ * 5: Matches when the upper half of the value equals the lower half
+ * of \RcsrTdataTwo after the upper half of the value is ANDed with the
+ * upper half of \RcsrTdataTwo.
+ *
+ * 8: Matches when \FcsrMcontrolMatch$=0$ would not match.
+ *
+ * 9: Matches when \FcsrMcontrolMatch$=1$ would not match.
+ *
+ * 12: Matches when \FcsrMcontrolMatch$=4$ would not match.
+ *
+ * 13: Matches when \FcsrMcontrolMatch$=5$ would not match.
+ *
+ * Other values are reserved for future use.
*/
#define CSR_MCONTROL_MATCH_OFFSET 7
#define CSR_MCONTROL_MATCH_LENGTH 4
#define CSR_MCONTROL_MATCH (0xfULL << CSR_MCONTROL_MATCH_OFFSET)
/*
-* When set, enable this trigger in M mode.
+ * When set, enable this trigger in M-mode.
*/
#define CSR_MCONTROL_M_OFFSET 6
#define CSR_MCONTROL_M_LENGTH 1
#define CSR_MCONTROL_M (0x1ULL << CSR_MCONTROL_M_OFFSET)
/*
-* When set, enable this trigger in S mode.
+ * When set, enable this trigger in S-mode.
*/
#define CSR_MCONTROL_S_OFFSET 4
#define CSR_MCONTROL_S_LENGTH 1
#define CSR_MCONTROL_S (0x1ULL << CSR_MCONTROL_S_OFFSET)
/*
-* When set, enable this trigger in U mode.
+ * When set, enable this trigger in U-mode.
*/
#define CSR_MCONTROL_U_OFFSET 3
#define CSR_MCONTROL_U_LENGTH 1
#define CSR_MCONTROL_U (0x1ULL << CSR_MCONTROL_U_OFFSET)
/*
-* When set, the trigger fires on the virtual address or opcode of an
-* instruction that is executed.
+ * When set, the trigger fires on the virtual address or opcode of an
+ * instruction that is executed.
*/
#define CSR_MCONTROL_EXECUTE_OFFSET 2
#define CSR_MCONTROL_EXECUTE_LENGTH 1
#define CSR_MCONTROL_EXECUTE (0x1ULL << CSR_MCONTROL_EXECUTE_OFFSET)
/*
-* When set, the trigger fires on the virtual address or data of a store.
+ * When set, the trigger fires on the virtual address or data of any
+ * store.
*/
#define CSR_MCONTROL_STORE_OFFSET 1
#define CSR_MCONTROL_STORE_LENGTH 1
#define CSR_MCONTROL_STORE (0x1ULL << CSR_MCONTROL_STORE_OFFSET)
/*
-* When set, the trigger fires on the virtual address or data of a load.
+ * When set, the trigger fires on the virtual address or data of any
+ * load.
*/
#define CSR_MCONTROL_LOAD_OFFSET 0
#define CSR_MCONTROL_LOAD_LENGTH 1
#define CSR_MCONTROL_LOAD (0x1ULL << CSR_MCONTROL_LOAD_OFFSET)
#define CSR_ICOUNT 0x7a1
-#define CSR_ICOUNT_TYPE_OFFSET (MXLEN-4)
+#define CSR_ICOUNT_TYPE_OFFSET (XLEN-4)
#define CSR_ICOUNT_TYPE_LENGTH 4
#define CSR_ICOUNT_TYPE (0xfULL << CSR_ICOUNT_TYPE_OFFSET)
-#define CSR_ICOUNT_DMODE_OFFSET (MXLEN-5)
+#define CSR_ICOUNT_DMODE_OFFSET (XLEN-5)
#define CSR_ICOUNT_DMODE_LENGTH 1
#define CSR_ICOUNT_DMODE (0x1ULL << CSR_ICOUNT_DMODE_OFFSET)
/*
-* If this optional bit is implemented, the hardware sets it when this
-* trigger matches. The trigger's user can set or clear it at any
-* time. The trigger's user can use this bit to determine which
-* trigger(s) matched. If the bit is not implemented, it is always 0
-* and writing it has no effect.
+ * If this bit is implemented, the hardware sets it when this
+ * trigger matches. The trigger's user can set or clear it at any
+ * time. It is used to determine which
+ * trigger(s) matched. If the bit is not implemented, it is always 0
+ * and writing it has no effect.
*/
#define CSR_ICOUNT_HIT_OFFSET 24
#define CSR_ICOUNT_HIT_LENGTH 1
#define CSR_ICOUNT_HIT (0x1ULL << CSR_ICOUNT_HIT_OFFSET)
/*
-* When count is decremented to 0, the trigger fires. Instead of
-* changing \Fcount from 1 to 0, it is also acceptable for hardware to
-* clear \Fm, \Fs, and \Fu. This allows \Fcount to be hard-wired
-* to 1 if this register just exists for single step.
+ * When count is decremented to 0, the trigger fires. Instead of
+ * changing \FcsrIcountCount from 1 to 0, it is also acceptable for hardware to
+ * clear \FcsrMcontrolM, \FcsrMcontrolS, and \FcsrMcontrolU. This allows \FcsrIcountCount to be hard-wired
+ * to 1 if this register just exists for single step.
*/
#define CSR_ICOUNT_COUNT_OFFSET 10
#define CSR_ICOUNT_COUNT_LENGTH 14
#define CSR_ICOUNT_COUNT (0x3fffULL << CSR_ICOUNT_COUNT_OFFSET)
/*
-* When set, every instruction completed or exception taken in M mode decrements \Fcount
-* by 1.
+ * When set, every instruction completed in or trap taken from
+ * M-mode decrements \FcsrIcountCount by 1.
*/
#define CSR_ICOUNT_M_OFFSET 9
#define CSR_ICOUNT_M_LENGTH 1
#define CSR_ICOUNT_M (0x1ULL << CSR_ICOUNT_M_OFFSET)
/*
-* When set, every instruction completed or exception taken in S mode decrements \Fcount
-* by 1.
+ * When set, every instruction completed in or trap taken from
+ * S-mode decrements \FcsrIcountCount by 1.
*/
#define CSR_ICOUNT_S_OFFSET 7
#define CSR_ICOUNT_S_LENGTH 1
#define CSR_ICOUNT_S (0x1ULL << CSR_ICOUNT_S_OFFSET)
/*
-* When set, every instruction completed or exception taken in U mode decrements \Fcount
-* by 1.
+ * When set, every instruction completed in or trap taken from
+ * U-mode decrements \FcsrIcountCount by 1.
*/
#define CSR_ICOUNT_U_OFFSET 6
#define CSR_ICOUNT_U_LENGTH 1
#define CSR_ICOUNT_U (0x1ULL << CSR_ICOUNT_U_OFFSET)
/*
-* The action to take when the trigger fires. The values are explained
-* in Table~\ref{tab:action}.
+ * The action to take when the trigger fires. The values are explained
+ * in Table~\ref{tab:action}.
*/
#define CSR_ICOUNT_ACTION_OFFSET 0
#define CSR_ICOUNT_ACTION_LENGTH 6
#define CSR_ICOUNT_ACTION (0x3fULL << CSR_ICOUNT_ACTION_OFFSET)
#define CSR_ITRIGGER 0x7a1
-#define CSR_ITRIGGER_TYPE_OFFSET (MXLEN-4)
+#define CSR_ITRIGGER_TYPE_OFFSET (XLEN-4)
#define CSR_ITRIGGER_TYPE_LENGTH 4
#define CSR_ITRIGGER_TYPE (0xfULL << CSR_ITRIGGER_TYPE_OFFSET)
-#define CSR_ITRIGGER_DMODE_OFFSET (MXLEN-5)
+#define CSR_ITRIGGER_DMODE_OFFSET (XLEN-5)
#define CSR_ITRIGGER_DMODE_LENGTH 1
#define CSR_ITRIGGER_DMODE (0x1ULL << CSR_ITRIGGER_DMODE_OFFSET)
/*
-* If this optional bit is implemented, the hardware sets it when this
-* trigger matches. The trigger's user can set or clear it at any
-* time. The trigger's user can use this bit to determine which
-* trigger(s) matched. If the bit is not implemented, it is always 0
-* and writing it has no effect.
+ * If this bit is implemented, the hardware sets it when this
+ * trigger matches. The trigger's user can set or clear it at any
+ * time. It is used to determine which
+ * trigger(s) matched. If the bit is not implemented, it is always 0
+ * and writing it has no effect.
*/
-#define CSR_ITRIGGER_HIT_OFFSET (MXLEN-6)
+#define CSR_ITRIGGER_HIT_OFFSET (XLEN-6)
#define CSR_ITRIGGER_HIT_LENGTH 1
#define CSR_ITRIGGER_HIT (0x1ULL << CSR_ITRIGGER_HIT_OFFSET)
/*
-* When set, enable this trigger for interrupts that are taken from M
-* mode.
+ * When set, enable this trigger for interrupts that are taken from M
+ * mode.
*/
#define CSR_ITRIGGER_M_OFFSET 9
#define CSR_ITRIGGER_M_LENGTH 1
#define CSR_ITRIGGER_M (0x1ULL << CSR_ITRIGGER_M_OFFSET)
/*
-* When set, enable this trigger for interrupts that are taken from S
-* mode.
+ * When set, enable this trigger for interrupts that are taken from S
+ * mode.
*/
#define CSR_ITRIGGER_S_OFFSET 7
#define CSR_ITRIGGER_S_LENGTH 1
#define CSR_ITRIGGER_S (0x1ULL << CSR_ITRIGGER_S_OFFSET)
/*
-* When set, enable this trigger for interrupts that are taken from U
-* mode.
+ * When set, enable this trigger for interrupts that are taken from U
+ * mode.
*/
#define CSR_ITRIGGER_U_OFFSET 6
#define CSR_ITRIGGER_U_LENGTH 1
#define CSR_ITRIGGER_U (0x1ULL << CSR_ITRIGGER_U_OFFSET)
/*
-* The action to take when the trigger fires. The values are explained
-* in Table~\ref{tab:action}.
+ * The action to take when the trigger fires. The values are explained
+ * in Table~\ref{tab:action}.
*/
#define CSR_ITRIGGER_ACTION_OFFSET 0
#define CSR_ITRIGGER_ACTION_LENGTH 6
#define CSR_ITRIGGER_ACTION (0x3fULL << CSR_ITRIGGER_ACTION_OFFSET)
#define CSR_ETRIGGER 0x7a1
-#define CSR_ETRIGGER_TYPE_OFFSET (MXLEN-4)
+#define CSR_ETRIGGER_TYPE_OFFSET (XLEN-4)
#define CSR_ETRIGGER_TYPE_LENGTH 4
#define CSR_ETRIGGER_TYPE (0xfULL << CSR_ETRIGGER_TYPE_OFFSET)
-#define CSR_ETRIGGER_DMODE_OFFSET (MXLEN-5)
+#define CSR_ETRIGGER_DMODE_OFFSET (XLEN-5)
#define CSR_ETRIGGER_DMODE_LENGTH 1
#define CSR_ETRIGGER_DMODE (0x1ULL << CSR_ETRIGGER_DMODE_OFFSET)
/*
-* If this optional bit is implemented, the hardware sets it when this
-* trigger matches. The trigger's user can set or clear it at any
-* time. The trigger's user can use this bit to determine which
-* trigger(s) matched. If the bit is not implemented, it is always 0
-* and writing it has no effect.
+ * If this bit is implemented, the hardware sets it when this
+ * trigger matches. The trigger's user can set or clear it at any
+ * time. It is used to determine which
+ * trigger(s) matched. If the bit is not implemented, it is always 0
+ * and writing it has no effect.
*/
-#define CSR_ETRIGGER_HIT_OFFSET (MXLEN-6)
+#define CSR_ETRIGGER_HIT_OFFSET (XLEN-6)
#define CSR_ETRIGGER_HIT_LENGTH 1
#define CSR_ETRIGGER_HIT (0x1ULL << CSR_ETRIGGER_HIT_OFFSET)
/*
-* When set, enable this trigger for exceptions that are taken from M
-* mode.
+ * When set, non-maskable interrupts cause this
+ * trigger to fire, regardless of the values of \FcsrMcontrolM, \FcsrMcontrolS, and \FcsrMcontrolU.
+ */
+#define CSR_ETRIGGER_NMI_OFFSET 10
+#define CSR_ETRIGGER_NMI_LENGTH 1
+#define CSR_ETRIGGER_NMI (0x1ULL << CSR_ETRIGGER_NMI_OFFSET)
+/*
+ * When set, enable this trigger for exceptions that are taken from M
+ * mode.
*/
#define CSR_ETRIGGER_M_OFFSET 9
#define CSR_ETRIGGER_M_LENGTH 1
#define CSR_ETRIGGER_M (0x1ULL << CSR_ETRIGGER_M_OFFSET)
/*
-* When set, enable this trigger for exceptions that are taken from S
-* mode.
+ * When set, enable this trigger for exceptions that are taken from S
+ * mode.
*/
#define CSR_ETRIGGER_S_OFFSET 7
#define CSR_ETRIGGER_S_LENGTH 1
#define CSR_ETRIGGER_S (0x1ULL << CSR_ETRIGGER_S_OFFSET)
/*
-* When set, enable this trigger for exceptions that are taken from U
-* mode.
+ * When set, enable this trigger for exceptions that are taken from U
+ * mode.
*/
#define CSR_ETRIGGER_U_OFFSET 6
#define CSR_ETRIGGER_U_LENGTH 1
#define CSR_ETRIGGER_U (0x1ULL << CSR_ETRIGGER_U_OFFSET)
/*
-* The action to take when the trigger fires. The values are explained
-* in Table~\ref{tab:action}.
+ * The action to take when the trigger fires. The values are explained
+ * in Table~\ref{tab:action}.
*/
#define CSR_ETRIGGER_ACTION_OFFSET 0
#define CSR_ETRIGGER_ACTION_LENGTH 6
#define CSR_ETRIGGER_ACTION (0x3fULL << CSR_ETRIGGER_ACTION_OFFSET)
-#define DMI_DMSTATUS 0x11
-/*
-* If 1, then there is an implicit {\tt ebreak} instruction at the
-* non-existent word immediately after the Program Buffer. This saves
-* the debugger from having to write the {\tt ebreak} itself, and
-* allows the Program Buffer to be one word smaller.
-*
-* This must be 1 when \Fprogbufsize is 1.
- */
-#define DMI_DMSTATUS_IMPEBREAK_OFFSET 22
-#define DMI_DMSTATUS_IMPEBREAK_LENGTH 1
-#define DMI_DMSTATUS_IMPEBREAK (0x1U << DMI_DMSTATUS_IMPEBREAK_OFFSET)
-/*
-* This field is 1 when all currently selected harts have been reset but the reset has not been acknowledged.
- */
-#define DMI_DMSTATUS_ALLHAVERESET_OFFSET 19
-#define DMI_DMSTATUS_ALLHAVERESET_LENGTH 1
-#define DMI_DMSTATUS_ALLHAVERESET (0x1U << DMI_DMSTATUS_ALLHAVERESET_OFFSET)
-/*
-* This field is 1 when any currently selected hart has been reset but the reset has not been acknowledged.
- */
-#define DMI_DMSTATUS_ANYHAVERESET_OFFSET 18
-#define DMI_DMSTATUS_ANYHAVERESET_LENGTH 1
-#define DMI_DMSTATUS_ANYHAVERESET (0x1U << DMI_DMSTATUS_ANYHAVERESET_OFFSET)
-/*
-* This field is 1 when all currently selected harts have acknowledged
-* the previous resume request.
- */
-#define DMI_DMSTATUS_ALLRESUMEACK_OFFSET 17
-#define DMI_DMSTATUS_ALLRESUMEACK_LENGTH 1
-#define DMI_DMSTATUS_ALLRESUMEACK (0x1U << DMI_DMSTATUS_ALLRESUMEACK_OFFSET)
-/*
-* This field is 1 when any currently selected hart has acknowledged
-* the previous resume request.
- */
-#define DMI_DMSTATUS_ANYRESUMEACK_OFFSET 16
-#define DMI_DMSTATUS_ANYRESUMEACK_LENGTH 1
-#define DMI_DMSTATUS_ANYRESUMEACK (0x1U << DMI_DMSTATUS_ANYRESUMEACK_OFFSET)
-/*
-* This field is 1 when all currently selected harts do not exist in this system.
- */
-#define DMI_DMSTATUS_ALLNONEXISTENT_OFFSET 15
-#define DMI_DMSTATUS_ALLNONEXISTENT_LENGTH 1
-#define DMI_DMSTATUS_ALLNONEXISTENT (0x1U << DMI_DMSTATUS_ALLNONEXISTENT_OFFSET)
-/*
-* This field is 1 when any currently selected hart does not exist in this system.
- */
-#define DMI_DMSTATUS_ANYNONEXISTENT_OFFSET 14
-#define DMI_DMSTATUS_ANYNONEXISTENT_LENGTH 1
-#define DMI_DMSTATUS_ANYNONEXISTENT (0x1U << DMI_DMSTATUS_ANYNONEXISTENT_OFFSET)
+#define CSR_TEXTRA32 0x7a3
+/*
+ * Data used together with \FcsrTextraThirtytwoMselect.
+ */
+#define CSR_TEXTRA32_MVALUE_OFFSET 26
+#define CSR_TEXTRA32_MVALUE_LENGTH 6
+#define CSR_TEXTRA32_MVALUE (0x3fU << CSR_TEXTRA32_MVALUE_OFFSET)
+/*
+ * 0: Ignore \FcsrTextraThirtytwoMvalue.
+ *
+ * 1: This trigger will only match if the low bits of
+ * \RcsrMcontext equal \FcsrTextraThirtytwoMvalue.
+ */
+#define CSR_TEXTRA32_MSELECT_OFFSET 25
+#define CSR_TEXTRA32_MSELECT_LENGTH 1
+#define CSR_TEXTRA32_MSELECT (0x1U << CSR_TEXTRA32_MSELECT_OFFSET)
+/*
+ * Data used together with \FcsrTextraThirtytwoSselect.
+ *
+ * This field should be tied to 0 when S-mode is not supported.
+ */
+#define CSR_TEXTRA32_SVALUE_OFFSET 2
+#define CSR_TEXTRA32_SVALUE_LENGTH 16
+#define CSR_TEXTRA32_SVALUE (0xffffU << CSR_TEXTRA32_SVALUE_OFFSET)
+/*
+ * 0: Ignore \FcsrTextraThirtytwoSvalue.
+ *
+ * 1: This trigger will only match if the low bits of
+ * \RcsrScontext equal \FcsrTextraThirtytwoSvalue.
+ *
+ * 2: This trigger will only match if \Fasid in \Rsatp
+ * equals the lower ASIDMAX (defined in the Privileged Spec) bits of
+ * \FcsrTextraThirtytwoSvalue.
+ *
+ * This field should be tied to 0 when S-mode is not supported.
+ */
+#define CSR_TEXTRA32_SSELECT_OFFSET 0
+#define CSR_TEXTRA32_SSELECT_LENGTH 2
+#define CSR_TEXTRA32_SSELECT (0x3U << CSR_TEXTRA32_SSELECT_OFFSET)
+#define CSR_TEXTRA64 0x7a3
+#define CSR_TEXTRA64_MVALUE_OFFSET 51
+#define CSR_TEXTRA64_MVALUE_LENGTH 13
+#define CSR_TEXTRA64_MVALUE (0x1fffULL << CSR_TEXTRA64_MVALUE_OFFSET)
+#define CSR_TEXTRA64_MSELECT_OFFSET 50
+#define CSR_TEXTRA64_MSELECT_LENGTH 1
+#define CSR_TEXTRA64_MSELECT (0x1ULL << CSR_TEXTRA64_MSELECT_OFFSET)
+#define CSR_TEXTRA64_SVALUE_OFFSET 2
+#define CSR_TEXTRA64_SVALUE_LENGTH 34
+#define CSR_TEXTRA64_SVALUE (0x3ffffffffULL << CSR_TEXTRA64_SVALUE_OFFSET)
+#define CSR_TEXTRA64_SSELECT_OFFSET 0
+#define CSR_TEXTRA64_SSELECT_LENGTH 2
+#define CSR_TEXTRA64_SSELECT (0x3ULL << CSR_TEXTRA64_SSELECT_OFFSET)
+#define DM_DMSTATUS 0x11
+/*
+ * If 1, then there is an implicit {\tt ebreak} instruction at the
+ * non-existent word immediately after the Program Buffer. This saves
+ * the debugger from having to write the {\tt ebreak} itself, and
+ * allows the Program Buffer to be one word smaller.
+ *
+ * This must be 1 when \FdmAbstractcsProgbufsize is 1.
+ */
+#define DM_DMSTATUS_IMPEBREAK_OFFSET 22
+#define DM_DMSTATUS_IMPEBREAK_LENGTH 1
+#define DM_DMSTATUS_IMPEBREAK (0x1U << DM_DMSTATUS_IMPEBREAK_OFFSET)
+/*
+ * This field is 1 when all currently selected harts have been reset
+ * and reset has not been acknowledged for any of them.
+ */
+#define DM_DMSTATUS_ALLHAVERESET_OFFSET 19
+#define DM_DMSTATUS_ALLHAVERESET_LENGTH 1
+#define DM_DMSTATUS_ALLHAVERESET (0x1U << DM_DMSTATUS_ALLHAVERESET_OFFSET)
+/*
+ * This field is 1 when at least one currently selected hart has been
+ * reset and reset has not been acknowledged for that hart.
+ */
+#define DM_DMSTATUS_ANYHAVERESET_OFFSET 18
+#define DM_DMSTATUS_ANYHAVERESET_LENGTH 1
+#define DM_DMSTATUS_ANYHAVERESET (0x1U << DM_DMSTATUS_ANYHAVERESET_OFFSET)
+/*
+ * This field is 1 when all currently selected harts have acknowledged
+ * their last resume request.
+ */
+#define DM_DMSTATUS_ALLRESUMEACK_OFFSET 17
+#define DM_DMSTATUS_ALLRESUMEACK_LENGTH 1
+#define DM_DMSTATUS_ALLRESUMEACK (0x1U << DM_DMSTATUS_ALLRESUMEACK_OFFSET)
+/*
+ * This field is 1 when any currently selected hart has acknowledged
+ * its last resume request.
+ */
+#define DM_DMSTATUS_ANYRESUMEACK_OFFSET 16
+#define DM_DMSTATUS_ANYRESUMEACK_LENGTH 1
+#define DM_DMSTATUS_ANYRESUMEACK (0x1U << DM_DMSTATUS_ANYRESUMEACK_OFFSET)
+/*
+ * This field is 1 when all currently selected harts do not exist in
+ * this platform.
+ */
+#define DM_DMSTATUS_ALLNONEXISTENT_OFFSET 15
+#define DM_DMSTATUS_ALLNONEXISTENT_LENGTH 1
+#define DM_DMSTATUS_ALLNONEXISTENT (0x1U << DM_DMSTATUS_ALLNONEXISTENT_OFFSET)
+/*
+ * This field is 1 when any currently selected hart does not exist in
+ * this platform.
+ */
+#define DM_DMSTATUS_ANYNONEXISTENT_OFFSET 14
+#define DM_DMSTATUS_ANYNONEXISTENT_LENGTH 1
+#define DM_DMSTATUS_ANYNONEXISTENT (0x1U << DM_DMSTATUS_ANYNONEXISTENT_OFFSET)
+/*
+ * This field is 1 when all currently selected harts are unavailable.
+ */
+#define DM_DMSTATUS_ALLUNAVAIL_OFFSET 13
+#define DM_DMSTATUS_ALLUNAVAIL_LENGTH 1
+#define DM_DMSTATUS_ALLUNAVAIL (0x1U << DM_DMSTATUS_ALLUNAVAIL_OFFSET)
+/*
+ * This field is 1 when any currently selected hart is unavailable.
+ */
+#define DM_DMSTATUS_ANYUNAVAIL_OFFSET 12
+#define DM_DMSTATUS_ANYUNAVAIL_LENGTH 1
+#define DM_DMSTATUS_ANYUNAVAIL (0x1U << DM_DMSTATUS_ANYUNAVAIL_OFFSET)
+/*
+ * This field is 1 when all currently selected harts are running.
+ */
+#define DM_DMSTATUS_ALLRUNNING_OFFSET 11
+#define DM_DMSTATUS_ALLRUNNING_LENGTH 1
+#define DM_DMSTATUS_ALLRUNNING (0x1U << DM_DMSTATUS_ALLRUNNING_OFFSET)
+/*
+ * This field is 1 when any currently selected hart is running.
+ */
+#define DM_DMSTATUS_ANYRUNNING_OFFSET 10
+#define DM_DMSTATUS_ANYRUNNING_LENGTH 1
+#define DM_DMSTATUS_ANYRUNNING (0x1U << DM_DMSTATUS_ANYRUNNING_OFFSET)
+/*
+ * This field is 1 when all currently selected harts are halted.
+ */
+#define DM_DMSTATUS_ALLHALTED_OFFSET 9
+#define DM_DMSTATUS_ALLHALTED_LENGTH 1
+#define DM_DMSTATUS_ALLHALTED (0x1U << DM_DMSTATUS_ALLHALTED_OFFSET)
+/*
+ * This field is 1 when any currently selected hart is halted.
+ */
+#define DM_DMSTATUS_ANYHALTED_OFFSET 8
+#define DM_DMSTATUS_ANYHALTED_LENGTH 1
+#define DM_DMSTATUS_ANYHALTED (0x1U << DM_DMSTATUS_ANYHALTED_OFFSET)
+/*
+ * 0: Authentication is required before using the DM.
+ *
+ * 1: The authentication check has passed.
+ *
+ * On components that don't implement authentication, this bit must be
+ * preset as 1.
+ */
+#define DM_DMSTATUS_AUTHENTICATED_OFFSET 7
+#define DM_DMSTATUS_AUTHENTICATED_LENGTH 1
+#define DM_DMSTATUS_AUTHENTICATED (0x1U << DM_DMSTATUS_AUTHENTICATED_OFFSET)
+/*
+ * 0: The authentication module is ready to process the next
+ * read/write to \RdmAuthdata.
+ *
+ * 1: The authentication module is busy. Accessing \RdmAuthdata results
+ * in unspecified behavior.
+ *
+ * \FdmDmstatusAuthbusy only becomes set in immediate response to an access to
+ * \RdmAuthdata.
+ */
+#define DM_DMSTATUS_AUTHBUSY_OFFSET 6
+#define DM_DMSTATUS_AUTHBUSY_LENGTH 1
+#define DM_DMSTATUS_AUTHBUSY (0x1U << DM_DMSTATUS_AUTHBUSY_OFFSET)
+/*
+ * 1 if this Debug Module supports halt-on-reset functionality
+ * controllable by the \FdmDmcontrolSetresethaltreq and \FdmDmcontrolClrresethaltreq bits.
+ * 0 otherwise.
+ */
+#define DM_DMSTATUS_HASRESETHALTREQ_OFFSET 5
+#define DM_DMSTATUS_HASRESETHALTREQ_LENGTH 1
+#define DM_DMSTATUS_HASRESETHALTREQ (0x1U << DM_DMSTATUS_HASRESETHALTREQ_OFFSET)
+/*
+ * 0: \RdmConfstrptrZero--\RdmConfstrptrThree hold information which
+ * is not relevant to the configuration string.
+ *
+ * 1: \RdmConfstrptrZero--\RdmConfstrptrThree hold the address of the
+ * configuration string.
+ */
+#define DM_DMSTATUS_CONFSTRPTRVALID_OFFSET 4
+#define DM_DMSTATUS_CONFSTRPTRVALID_LENGTH 1
+#define DM_DMSTATUS_CONFSTRPTRVALID (0x1U << DM_DMSTATUS_CONFSTRPTRVALID_OFFSET)
+/*
+ * 0: There is no Debug Module present.
+ *
+ * 1: There is a Debug Module and it conforms to version 0.11 of this
+ * specification.
+ *
+ * 2: There is a Debug Module and it conforms to version 0.13 of this
+ * specification.
+ *
+ * 3: There is a Debug Module and it conforms to version 0.14 of this
+ * specification.
+ *
+ * 15: There is a Debug Module but it does not conform to any
+ * available version of this spec.
+ */
+#define DM_DMSTATUS_VERSION_OFFSET 0
+#define DM_DMSTATUS_VERSION_LENGTH 4
+#define DM_DMSTATUS_VERSION (0xfU << DM_DMSTATUS_VERSION_OFFSET)
+#define DM_DMCONTROL 0x10
+/*
+ * Writing 0 clears the halt request bit for all currently selected
+ * harts. This may cancel outstanding halt requests for those harts.
+ *
+ * Writing 1 sets the halt request bit for all currently selected
+ * harts. Running harts will halt whenever their halt request bit is
+ * set.
+ *
+ * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel.
+ */
+#define DM_DMCONTROL_HALTREQ_OFFSET 31
+#define DM_DMCONTROL_HALTREQ_LENGTH 1
+#define DM_DMCONTROL_HALTREQ (0x1U << DM_DMCONTROL_HALTREQ_OFFSET)
+/*
+ * Writing 1 causes the currently selected harts to resume once, if
+ * they are halted when the write occurs. It also clears the resume
+ * ack bit for those harts.
+ *
+ * \FdmDmcontrolResumereq is ignored if \FdmDmcontrolHaltreq is set.
+ *
+ * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel.
+ */
+#define DM_DMCONTROL_RESUMEREQ_OFFSET 30
+#define DM_DMCONTROL_RESUMEREQ_LENGTH 1
+#define DM_DMCONTROL_RESUMEREQ (0x1U << DM_DMCONTROL_RESUMEREQ_OFFSET)
+/*
+ * This optional field writes the reset bit for all the currently
+ * selected harts. To perform a reset the debugger writes 1, and then
+ * writes 0 to deassert the reset signal.
+ *
+ * While this bit is 1, the debugger must not change which harts are
+ * selected.
+ *
+ * If this feature is not implemented, the bit always stays 0, so
+ * after writing 1 the debugger can read the register back to see if
+ * the feature is supported.
+ *
+ * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel.
+ */
+#define DM_DMCONTROL_HARTRESET_OFFSET 29
+#define DM_DMCONTROL_HARTRESET_LENGTH 1
+#define DM_DMCONTROL_HARTRESET (0x1U << DM_DMCONTROL_HARTRESET_OFFSET)
+/*
+ * 0: No effect.
+ *
+ * 1: Clears {\tt havereset} for any selected harts.
+ *
+ * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel.
+ */
+#define DM_DMCONTROL_ACKHAVERESET_OFFSET 28
+#define DM_DMCONTROL_ACKHAVERESET_LENGTH 1
+#define DM_DMCONTROL_ACKHAVERESET (0x1U << DM_DMCONTROL_ACKHAVERESET_OFFSET)
+/*
+ * Selects the definition of currently selected harts.
+ *
+ * 0: There is a single currently selected hart, that is selected by \Fhartsel.
+ *
+ * 1: There may be multiple currently selected harts -- the hart
+ * selected by \Fhartsel, plus those selected by the hart array mask
+ * register.
+ *
+ * An implementation which does not implement the hart array mask register
+ * must tie this field to 0. A debugger which wishes to use the hart array
+ * mask register feature should set this bit and read back to see if the functionality
+ * is supported.
+ */
+#define DM_DMCONTROL_HASEL_OFFSET 26
+#define DM_DMCONTROL_HASEL_LENGTH 1
+#define DM_DMCONTROL_HASEL (0x1U << DM_DMCONTROL_HASEL_OFFSET)
+/*
+ * The low 10 bits of \Fhartsel: the DM-specific index of the hart to
+ * select. This hart is always part of the currently selected harts.
+ */
+#define DM_DMCONTROL_HARTSELLO_OFFSET 16
+#define DM_DMCONTROL_HARTSELLO_LENGTH 10
+#define DM_DMCONTROL_HARTSELLO (0x3ffU << DM_DMCONTROL_HARTSELLO_OFFSET)
+/*
+ * The high 10 bits of \Fhartsel: the DM-specific index of the hart to
+ * select. This hart is always part of the currently selected harts.
+ */
+#define DM_DMCONTROL_HARTSELHI_OFFSET 6
+#define DM_DMCONTROL_HARTSELHI_LENGTH 10
+#define DM_DMCONTROL_HARTSELHI (0x3ffU << DM_DMCONTROL_HARTSELHI_OFFSET)
+/*
+ * This optional field writes the halt-on-reset request bit for all
+ * currently selected harts, unless \FdmDmcontrolClrresethaltreq is
+ * simultaneously set to 1.
+ * When set to 1, each selected hart will halt upon the next deassertion
+ * of its reset. The halt-on-reset request bit is not automatically
+ * cleared. The debugger must write to \FdmDmcontrolClrresethaltreq to clear it.
+ *
+ * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel.
+ *
+ * If \FdmDmstatusHasresethaltreq is 0, this field is not implemented.
+ */
+#define DM_DMCONTROL_SETRESETHALTREQ_OFFSET 3
+#define DM_DMCONTROL_SETRESETHALTREQ_LENGTH 1
+#define DM_DMCONTROL_SETRESETHALTREQ (0x1U << DM_DMCONTROL_SETRESETHALTREQ_OFFSET)
+/*
+ * This optional field clears the halt-on-reset request bit for all
+ * currently selected harts.
+ *
+ * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel.
+ */
+#define DM_DMCONTROL_CLRRESETHALTREQ_OFFSET 2
+#define DM_DMCONTROL_CLRRESETHALTREQ_LENGTH 1
+#define DM_DMCONTROL_CLRRESETHALTREQ (0x1U << DM_DMCONTROL_CLRRESETHALTREQ_OFFSET)
+/*
+ * This bit controls the reset signal from the DM to the rest of the
+ * system. The signal should reset every part of the system, including
+ * every hart, except for the DM and any logic required to access the
+ * DM.
+ * To perform a system reset the debugger writes 1,
+ * and then writes 0
+ * to deassert the reset.
+ */
+#define DM_DMCONTROL_NDMRESET_OFFSET 1
+#define DM_DMCONTROL_NDMRESET_LENGTH 1
+#define DM_DMCONTROL_NDMRESET (0x1U << DM_DMCONTROL_NDMRESET_OFFSET)
+/*
+ * This bit serves as a reset signal for the Debug Module itself.
+ *
+ * 0: The module's state, including authentication mechanism,
+ * takes its reset values (the \FdmDmcontrolDmactive bit is the only bit which can
+ * be written to something other than its reset value). Any accesses
+ * to the module may fail. Specifically, \FdmDmstatusVersion may not return
+ * correct data.
+ *
+ * 1: The module functions normally. After writing 1, the debugger should
+ * poll \RdmDmcontrol until \FdmDmcontrolDmactive is high. Hardware may
+ * take an arbitrarily long time to initialize and will indicate completion
+ * by setting dmactive to 1.
+ *
+ * No other mechanism should exist that may result in resetting the
+ * Debug Module after power up.
+ *
+ * A debugger may pulse this bit low to get the Debug Module into a
+ * known state.
+ *
+ * Implementations may pay attention to this bit to further aid
+ * debugging, for example by preventing the Debug Module from being
+ * power gated while debugging is active.
+ */
+#define DM_DMCONTROL_DMACTIVE_OFFSET 0
+#define DM_DMCONTROL_DMACTIVE_LENGTH 1
+#define DM_DMCONTROL_DMACTIVE (0x1U << DM_DMCONTROL_DMACTIVE_OFFSET)
+#define DM_HARTINFO 0x12
+/*
+ * Number of {\tt dscratch} registers available for the debugger
+ * to use during program buffer execution, starting from \RcsrDscratchZero.
+ * The debugger can make no assumptions about the contents of these
+ * registers between commands.
+ */
+#define DM_HARTINFO_NSCRATCH_OFFSET 20
+#define DM_HARTINFO_NSCRATCH_LENGTH 4
+#define DM_HARTINFO_NSCRATCH (0xfU << DM_HARTINFO_NSCRATCH_OFFSET)
+/*
+ * 0: The {\tt data} registers are shadowed in the hart by CSRs.
+ * Each CSR is DXLEN bits in size, and corresponds
+ * to a single argument, per Table~\ref{tab:datareg}.
+ *
+ * 1: The {\tt data} registers are shadowed in the hart's memory map.
+ * Each register takes up 4 bytes in the memory map.
+ */
+#define DM_HARTINFO_DATAACCESS_OFFSET 16
+#define DM_HARTINFO_DATAACCESS_LENGTH 1
+#define DM_HARTINFO_DATAACCESS (0x1U << DM_HARTINFO_DATAACCESS_OFFSET)
+/*
+ * If \FdmHartinfoDataaccess is 0: Number of CSRs dedicated to
+ * shadowing the {\tt data} registers.
+ *
+ * If \FdmHartinfoDataaccess is 1: Number of 32-bit words in the memory map
+ * dedicated to shadowing the {\tt data} registers.
+ *
+ * Since there are at most 12 {\tt data} registers, the value in this
+ * register must be 12 or smaller.
+ */
+#define DM_HARTINFO_DATASIZE_OFFSET 12
+#define DM_HARTINFO_DATASIZE_LENGTH 4
+#define DM_HARTINFO_DATASIZE (0xfU << DM_HARTINFO_DATASIZE_OFFSET)
+/*
+ * If \FdmHartinfoDataaccess is 0: The number of the first CSR dedicated to
+ * shadowing the {\tt data} registers.
+ *
+ * If \FdmHartinfoDataaccess is 1: Address of RAM where the data
+ * registers are shadowed. This address is sign extended giving a
+ * range of -2048 to 2047, easily addressed with a load or store using
+ * \Xzero as the address register.
+ */
+#define DM_HARTINFO_DATAADDR_OFFSET 0
+#define DM_HARTINFO_DATAADDR_LENGTH 12
+#define DM_HARTINFO_DATAADDR (0xfffU << DM_HARTINFO_DATAADDR_OFFSET)
+#define DM_HAWINDOWSEL 0x14
+/*
+ * The high bits of this field may be tied to 0, depending on how large
+ * the array mask register is. E.g.\ on a system with 48 harts only bit 0
+ * of this field may actually be writable.
+ */
+#define DM_HAWINDOWSEL_HAWINDOWSEL_OFFSET 0
+#define DM_HAWINDOWSEL_HAWINDOWSEL_LENGTH 15
+#define DM_HAWINDOWSEL_HAWINDOWSEL (0x7fffU << DM_HAWINDOWSEL_HAWINDOWSEL_OFFSET)
+#define DM_HAWINDOW 0x15
+#define DM_HAWINDOW_MASKDATA_OFFSET 0
+#define DM_HAWINDOW_MASKDATA_LENGTH 32
+#define DM_HAWINDOW_MASKDATA (0xffffffffU << DM_HAWINDOW_MASKDATA_OFFSET)
+#define DM_ABSTRACTCS 0x16
+/*
+ * Size of the Program Buffer, in 32-bit words. Valid sizes are 0 - 16.
+ */
+#define DM_ABSTRACTCS_PROGBUFSIZE_OFFSET 24
+#define DM_ABSTRACTCS_PROGBUFSIZE_LENGTH 5
+#define DM_ABSTRACTCS_PROGBUFSIZE (0x1fU << DM_ABSTRACTCS_PROGBUFSIZE_OFFSET)
+/*
+ * 1: An abstract command is currently being executed.
+ *
+ * This bit is set as soon as \RdmCommand is written, and is
+ * not cleared until that command has completed.
+ */
+#define DM_ABSTRACTCS_BUSY_OFFSET 12
+#define DM_ABSTRACTCS_BUSY_LENGTH 1
+#define DM_ABSTRACTCS_BUSY (0x1U << DM_ABSTRACTCS_BUSY_OFFSET)
+/*
+ * This optional bit controls whether program buffer and abstract
+ * memory accesses are performed with the exact and full set of
+ * permission checks that apply based on the current architectural
+ * state of the hart performing the access, or with a relaxed set of
+ * permission checks (e.g. PMP restrictions are ignored). The
+ * details of the latter are implementation-specific. When set to 0,
+ * full permissions apply; when set to 1, relaxed permissions apply.
+ */
+#define DM_ABSTRACTCS_RELAXEDPRIV_OFFSET 11
+#define DM_ABSTRACTCS_RELAXEDPRIV_LENGTH 1
+#define DM_ABSTRACTCS_RELAXEDPRIV (0x1U << DM_ABSTRACTCS_RELAXEDPRIV_OFFSET)
+/*
+ * Gets set if an abstract command fails. The bits in this field remain set until
+ * they are cleared by writing 1 to them. No abstract command is
+ * started until the value is reset to 0.
+ *
+ * This field only contains a valid value if \FdmAbstractcsBusy is 0.
+ *
+ * 0 (none): No error.
+ *
+ * 1 (busy): An abstract command was executing while \RdmCommand,
+ * \RdmAbstractcs, or \RdmAbstractauto was written, or when one
+ * of the {\tt data} or {\tt progbuf} registers was read or written.
+ * This status is only written if \FdmAbstractcsCmderr contains 0.
+ *
+ * 2 (not supported): The command in \RdmCommand is not supported. It
+ * may be supported with different options set, but it will not be
+ * supported at a later time when the hart or system state are
+ * different.
+ *
+ * 3 (exception): An exception occurred while executing the command
+ * (e.g.\ while executing the Program Buffer).
+ *
+ * 4 (halt/resume): The abstract command couldn't execute because the
+ * hart wasn't in the required state (running/halted), or unavailable.
+ *
+ * 5 (bus): The abstract command failed due to a bus error (e.g.\
+ * alignment, access size, or timeout).
+ *
+ * 6: Reserved for future use.
+ *
+ * 7 (other): The command failed for another reason.
+ */
+#define DM_ABSTRACTCS_CMDERR_OFFSET 8
+#define DM_ABSTRACTCS_CMDERR_LENGTH 3
+#define DM_ABSTRACTCS_CMDERR (0x7U << DM_ABSTRACTCS_CMDERR_OFFSET)
+/*
+ * Number of {\tt data} registers that are implemented as part of the
+ * abstract command interface. Valid sizes are 1 -- 12.
+ */
+#define DM_ABSTRACTCS_DATACOUNT_OFFSET 0
+#define DM_ABSTRACTCS_DATACOUNT_LENGTH 4
+#define DM_ABSTRACTCS_DATACOUNT (0xfU << DM_ABSTRACTCS_DATACOUNT_OFFSET)
+#define DM_COMMAND 0x17
+/*
+ * The type determines the overall functionality of this
+ * abstract command.
+ */
+#define DM_COMMAND_CMDTYPE_OFFSET 24
+#define DM_COMMAND_CMDTYPE_LENGTH 8
+#define DM_COMMAND_CMDTYPE (0xffU << DM_COMMAND_CMDTYPE_OFFSET)
+/*
+ * This field is interpreted in a command-specific manner,
+ * described for each abstract command.
+ */
+#define DM_COMMAND_CONTROL_OFFSET 0
+#define DM_COMMAND_CONTROL_LENGTH 24
+#define DM_COMMAND_CONTROL (0xffffffU << DM_COMMAND_CONTROL_OFFSET)
+#define DM_ABSTRACTAUTO 0x18
+/*
+ * When a bit in this field is 1, read or write accesses to the
+ * corresponding {\tt progbuf} word cause the command in \RdmCommand to
+ * be executed again.
+ */
+#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET 16
+#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF_LENGTH 16
+#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF (0xffffU << DM_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET)
+/*
+ * When a bit in this field is 1, read or write accesses to the
+ * corresponding {\tt data} word cause the command in \RdmCommand to be
+ * executed again.
+ */
+#define DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET 0
+#define DM_ABSTRACTAUTO_AUTOEXECDATA_LENGTH 12
+#define DM_ABSTRACTAUTO_AUTOEXECDATA (0xfffU << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET)
+#define DM_CONFSTRPTR0 0x19
+#define DM_CONFSTRPTR0_ADDR_OFFSET 0
+#define DM_CONFSTRPTR0_ADDR_LENGTH 32
+#define DM_CONFSTRPTR0_ADDR (0xffffffffU << DM_CONFSTRPTR0_ADDR_OFFSET)
+#define DM_CONFSTRPTR1 0x1a
+#define DM_CONFSTRPTR1_ADDR_OFFSET 0
+#define DM_CONFSTRPTR1_ADDR_LENGTH 32
+#define DM_CONFSTRPTR1_ADDR (0xffffffffU << DM_CONFSTRPTR1_ADDR_OFFSET)
+#define DM_CONFSTRPTR2 0x1b
+#define DM_CONFSTRPTR2_ADDR_OFFSET 0
+#define DM_CONFSTRPTR2_ADDR_LENGTH 32
+#define DM_CONFSTRPTR2_ADDR (0xffffffffU << DM_CONFSTRPTR2_ADDR_OFFSET)
+#define DM_CONFSTRPTR3 0x1c
+#define DM_CONFSTRPTR3_ADDR_OFFSET 0
+#define DM_CONFSTRPTR3_ADDR_LENGTH 32
+#define DM_CONFSTRPTR3_ADDR (0xffffffffU << DM_CONFSTRPTR3_ADDR_OFFSET)
+#define DM_NEXTDM 0x1d
+#define DM_NEXTDM_ADDR_OFFSET 0
+#define DM_NEXTDM_ADDR_LENGTH 32
+#define DM_NEXTDM_ADDR (0xffffffffU << DM_NEXTDM_ADDR_OFFSET)
+#define DM_DATA0 0x04
+#define DM_DATA0_DATA_OFFSET 0
+#define DM_DATA0_DATA_LENGTH 32
+#define DM_DATA0_DATA (0xffffffffU << DM_DATA0_DATA_OFFSET)
+#define DM_DATA11 0x0f
+#define DM_PROGBUF0 0x20
+#define DM_PROGBUF0_DATA_OFFSET 0
+#define DM_PROGBUF0_DATA_LENGTH 32
+#define DM_PROGBUF0_DATA (0xffffffffU << DM_PROGBUF0_DATA_OFFSET)
+#define DM_PROGBUF15 0x2f
+#define DM_AUTHDATA 0x30
+#define DM_AUTHDATA_DATA_OFFSET 0
+#define DM_AUTHDATA_DATA_LENGTH 32
+#define DM_AUTHDATA_DATA (0xffffffffU << DM_AUTHDATA_DATA_OFFSET)
+#define DM_DMCS2 0x32
+/*
+ * 0: The remaining fields in this register configure halt groups.
+ *
+ * 1: The remaining fields in this register configure resume groups.
+ */
+#define DM_DMCS2_GROUPTYPE_OFFSET 11
+#define DM_DMCS2_GROUPTYPE_LENGTH 1
+#define DM_DMCS2_GROUPTYPE (0x1U << DM_DMCS2_GROUPTYPE_OFFSET)
+/*
+ * This field contains the currently selected external trigger.
+ *
+ * If a non-existent trigger value is written here, the hardware will
+ * change it to a valid one or 0 if no external triggers exist.
+ */
+#define DM_DMCS2_EXTTRIGGER_OFFSET 7
+#define DM_DMCS2_EXTTRIGGER_LENGTH 4
+#define DM_DMCS2_EXTTRIGGER (0xfU << DM_DMCS2_EXTTRIGGER_OFFSET)
+/*
+ * When \FdmDmcsTwoHgselect is 0, contains the group of the hart
+ * specified by \Fhartsel.
+ *
+ * When \FdmDmcsTwoHgselect is 1, contains the group of the external
+ * trigger selected by \FdmDmcsTwoExttrigger.
+ *
+ * Writes only have an effect if \FdmDmcsTwoHgwrite is also written 1.
+ *
+ * Group numbers are contiguous starting at 0, with the highest number
+ * being implementation-dependent, and possibly different between
+ * different group types. Debuggers should read back this field after
+ * writing to confirm they are using a hart group that is supported.
+ *
+ * If groups aren't implemented, then this entire field is 0.
+ */
+#define DM_DMCS2_GROUP_OFFSET 2
+#define DM_DMCS2_GROUP_LENGTH 5
+#define DM_DMCS2_GROUP (0x1fU << DM_DMCS2_GROUP_OFFSET)
+/*
+ * When \FdmDmcsTwoHgselect is 0, writing 1 changes the group of all
+ * selected harts to the value written to \FdmDmcsTwoGroup.
+ *
+ * When 1 is written and \FdmDmcsTwoHgselect is 0, for every selected
+ * hart the DM will change its group to the value written to \FdmDmcsTwoGroup,
+ * if the hardware supports that group for that hart.
+ *
+ * When 1 is written and \FdmDmcsTwoHgselect is 1, the DM will change
+ * the group of the external trigger selected by \FdmDmcsTwoExttrigger
+ * to the value written to \FdmDmcsTwoGroup, if the hardware supports
+ * that group for that trigger.
+ *
+ * Writing 0 has no effect.
+ */
+#define DM_DMCS2_HGWRITE_OFFSET 1
+#define DM_DMCS2_HGWRITE_LENGTH 1
+#define DM_DMCS2_HGWRITE (0x1U << DM_DMCS2_HGWRITE_OFFSET)
+/*
+ * 0: Operate on harts.
+ *
+ * 1: Operate on external triggers.
+ *
+ * If there are no external triggers, this field must be tied to 0.
+ */
+#define DM_DMCS2_HGSELECT_OFFSET 0
+#define DM_DMCS2_HGSELECT_LENGTH 1
+#define DM_DMCS2_HGSELECT (0x1U << DM_DMCS2_HGSELECT_OFFSET)
+#define DM_HALTSUM0 0x40
+#define DM_HALTSUM0_HALTSUM0_OFFSET 0
+#define DM_HALTSUM0_HALTSUM0_LENGTH 32
+#define DM_HALTSUM0_HALTSUM0 (0xffffffffU << DM_HALTSUM0_HALTSUM0_OFFSET)
+#define DM_HALTSUM1 0x13
+#define DM_HALTSUM1_HALTSUM1_OFFSET 0
+#define DM_HALTSUM1_HALTSUM1_LENGTH 32
+#define DM_HALTSUM1_HALTSUM1 (0xffffffffU << DM_HALTSUM1_HALTSUM1_OFFSET)
+#define DM_HALTSUM2 0x34
+#define DM_HALTSUM2_HALTSUM2_OFFSET 0
+#define DM_HALTSUM2_HALTSUM2_LENGTH 32
+#define DM_HALTSUM2_HALTSUM2 (0xffffffffU << DM_HALTSUM2_HALTSUM2_OFFSET)
+#define DM_HALTSUM3 0x35
+#define DM_HALTSUM3_HALTSUM3_OFFSET 0
+#define DM_HALTSUM3_HALTSUM3_LENGTH 32
+#define DM_HALTSUM3_HALTSUM3 (0xffffffffU << DM_HALTSUM3_HALTSUM3_OFFSET)
+#define DM_SBCS 0x38
+/*
+ * 0: The System Bus interface conforms to mainline drafts of this
+ * spec older than 1 January, 2018.
+ *
+ * 1: The System Bus interface conforms to this version of the spec.
+ *
+ * Other values are reserved for future versions.
+ */
+#define DM_SBCS_SBVERSION_OFFSET 29
+#define DM_SBCS_SBVERSION_LENGTH 3
+#define DM_SBCS_SBVERSION (0x7U << DM_SBCS_SBVERSION_OFFSET)
+/*
+ * Set when the debugger attempts to read data while a read is in
+ * progress, or when the debugger initiates a new access while one is
+ * already in progress (while \FdmSbcsSbbusy is set). It remains set until
+ * it's explicitly cleared by the debugger.
+ *
+ * While this field is set, no more system bus accesses can be
+ * initiated by the Debug Module.
+ */
+#define DM_SBCS_SBBUSYERROR_OFFSET 22
+#define DM_SBCS_SBBUSYERROR_LENGTH 1
+#define DM_SBCS_SBBUSYERROR (0x1U << DM_SBCS_SBBUSYERROR_OFFSET)
+/*
+ * When 1, indicates the system bus master is busy. (Whether the
+ * system bus itself is busy is related, but not the same thing.) This
+ * bit goes high immediately when a read or write is requested for any
+ * reason, and does not go low until the access is fully completed.
+ *
+ * Writes to \RdmSbcs while \FdmSbcsSbbusy is high result in undefined
+ * behavior. A debugger must not write to \RdmSbcs until it reads
+ * \FdmSbcsSbbusy as 0.
+ */
+#define DM_SBCS_SBBUSY_OFFSET 21
+#define DM_SBCS_SBBUSY_LENGTH 1
+#define DM_SBCS_SBBUSY (0x1U << DM_SBCS_SBBUSY_OFFSET)
+/*
+ * When 1, every write to \RdmSbaddressZero automatically triggers a
+ * system bus read at the new address.
+ */
+#define DM_SBCS_SBREADONADDR_OFFSET 20
+#define DM_SBCS_SBREADONADDR_LENGTH 1
+#define DM_SBCS_SBREADONADDR (0x1U << DM_SBCS_SBREADONADDR_OFFSET)
+/*
+ * Select the access size to use for system bus accesses.
+ *
+ * 0: 8-bit
+ *
+ * 1: 16-bit
+ *
+ * 2: 32-bit
+ *
+ * 3: 64-bit
+ *
+ * 4: 128-bit
+ *
+ * If \FdmSbcsSbaccess has an unsupported value when the DM starts a bus
+ * access, the access is not performed and \FdmSbcsSberror is set to 4.
+ */
+#define DM_SBCS_SBACCESS_OFFSET 17
+#define DM_SBCS_SBACCESS_LENGTH 3
+#define DM_SBCS_SBACCESS (0x7U << DM_SBCS_SBACCESS_OFFSET)
+/*
+ * When 1, {\tt sbaddress} is incremented by the access size (in
+ * bytes) selected in \FdmSbcsSbaccess after every system bus access.
+ */
+#define DM_SBCS_SBAUTOINCREMENT_OFFSET 16
+#define DM_SBCS_SBAUTOINCREMENT_LENGTH 1
+#define DM_SBCS_SBAUTOINCREMENT (0x1U << DM_SBCS_SBAUTOINCREMENT_OFFSET)
+/*
+ * When 1, every read from \RdmSbdataZero automatically triggers a
+ * system bus read at the (possibly auto-incremented) address.
+ */
+#define DM_SBCS_SBREADONDATA_OFFSET 15
+#define DM_SBCS_SBREADONDATA_LENGTH 1
+#define DM_SBCS_SBREADONDATA (0x1U << DM_SBCS_SBREADONDATA_OFFSET)
+/*
+ * When the Debug Module's system bus
+ * master encounters an error, this field gets set. The bits in this
+ * field remain set until they are cleared by writing 1 to them.
+ * While this field is non-zero, no more system bus accesses can be
+ * initiated by the Debug Module.
+ *
+ * An implementation may report ``Other'' (7) for any error condition.
+ *
+ * 0: There was no bus error.
+ *
+ * 1: There was a timeout.
+ *
+ * 2: A bad address was accessed.
+ *
+ * 3: There was an alignment error.
+ *
+ * 4: An access of unsupported size was requested.
+ *
+ * 7: Other.
+ */
+#define DM_SBCS_SBERROR_OFFSET 12
+#define DM_SBCS_SBERROR_LENGTH 3
+#define DM_SBCS_SBERROR (0x7U << DM_SBCS_SBERROR_OFFSET)
+/*
+ * Width of system bus addresses in bits. (0 indicates there is no bus
+ * access support.)
+ */
+#define DM_SBCS_SBASIZE_OFFSET 5
+#define DM_SBCS_SBASIZE_LENGTH 7
+#define DM_SBCS_SBASIZE (0x7fU << DM_SBCS_SBASIZE_OFFSET)
+/*
+ * 1 when 128-bit system bus accesses are supported.
+ */
+#define DM_SBCS_SBACCESS128_OFFSET 4
+#define DM_SBCS_SBACCESS128_LENGTH 1
+#define DM_SBCS_SBACCESS128 (0x1U << DM_SBCS_SBACCESS128_OFFSET)
+/*
+ * 1 when 64-bit system bus accesses are supported.
+ */
+#define DM_SBCS_SBACCESS64_OFFSET 3
+#define DM_SBCS_SBACCESS64_LENGTH 1
+#define DM_SBCS_SBACCESS64 (0x1U << DM_SBCS_SBACCESS64_OFFSET)
+/*
+ * 1 when 32-bit system bus accesses are supported.
+ */
+#define DM_SBCS_SBACCESS32_OFFSET 2
+#define DM_SBCS_SBACCESS32_LENGTH 1
+#define DM_SBCS_SBACCESS32 (0x1U << DM_SBCS_SBACCESS32_OFFSET)
+/*
+ * 1 when 16-bit system bus accesses are supported.
+ */
+#define DM_SBCS_SBACCESS16_OFFSET 1
+#define DM_SBCS_SBACCESS16_LENGTH 1
+#define DM_SBCS_SBACCESS16 (0x1U << DM_SBCS_SBACCESS16_OFFSET)
+/*
+ * 1 when 8-bit system bus accesses are supported.
+ */
+#define DM_SBCS_SBACCESS8_OFFSET 0
+#define DM_SBCS_SBACCESS8_LENGTH 1
+#define DM_SBCS_SBACCESS8 (0x1U << DM_SBCS_SBACCESS8_OFFSET)
+#define DM_SBADDRESS0 0x39
+/*
+ * Accesses bits 31:0 of the physical address in {\tt sbaddress}.
+ */
+#define DM_SBADDRESS0_ADDRESS_OFFSET 0
+#define DM_SBADDRESS0_ADDRESS_LENGTH 32
+#define DM_SBADDRESS0_ADDRESS (0xffffffffU << DM_SBADDRESS0_ADDRESS_OFFSET)
+#define DM_SBADDRESS1 0x3a
+/*
+ * Accesses bits 63:32 of the physical address in {\tt sbaddress} (if
+ * the system address bus is that wide).
+ */
+#define DM_SBADDRESS1_ADDRESS_OFFSET 0
+#define DM_SBADDRESS1_ADDRESS_LENGTH 32
+#define DM_SBADDRESS1_ADDRESS (0xffffffffU << DM_SBADDRESS1_ADDRESS_OFFSET)
+#define DM_SBADDRESS2 0x3b
+/*
+ * Accesses bits 95:64 of the physical address in {\tt sbaddress} (if
+ * the system address bus is that wide).
+ */
+#define DM_SBADDRESS2_ADDRESS_OFFSET 0
+#define DM_SBADDRESS2_ADDRESS_LENGTH 32
+#define DM_SBADDRESS2_ADDRESS (0xffffffffU << DM_SBADDRESS2_ADDRESS_OFFSET)
+#define DM_SBADDRESS3 0x37
+/*
+ * Accesses bits 127:96 of the physical address in {\tt sbaddress} (if
+ * the system address bus is that wide).
+ */
+#define DM_SBADDRESS3_ADDRESS_OFFSET 0
+#define DM_SBADDRESS3_ADDRESS_LENGTH 32
+#define DM_SBADDRESS3_ADDRESS (0xffffffffU << DM_SBADDRESS3_ADDRESS_OFFSET)
+#define DM_SBDATA0 0x3c
+/*
+ * Accesses bits 31:0 of {\tt sbdata}.
+ */
+#define DM_SBDATA0_DATA_OFFSET 0
+#define DM_SBDATA0_DATA_LENGTH 32
+#define DM_SBDATA0_DATA (0xffffffffU << DM_SBDATA0_DATA_OFFSET)
+#define DM_SBDATA1 0x3d
+/*
+ * Accesses bits 63:32 of {\tt sbdata} (if the system bus is that
+ * wide).
+ */
+#define DM_SBDATA1_DATA_OFFSET 0
+#define DM_SBDATA1_DATA_LENGTH 32
+#define DM_SBDATA1_DATA (0xffffffffU << DM_SBDATA1_DATA_OFFSET)
+#define DM_SBDATA2 0x3e
/*
-* This field is 1 when all currently selected harts are unavailable.
+ * Accesses bits 95:64 of {\tt sbdata} (if the system bus is that
+ * wide).
*/
-#define DMI_DMSTATUS_ALLUNAVAIL_OFFSET 13
-#define DMI_DMSTATUS_ALLUNAVAIL_LENGTH 1
-#define DMI_DMSTATUS_ALLUNAVAIL (0x1U << DMI_DMSTATUS_ALLUNAVAIL_OFFSET)
+#define DM_SBDATA2_DATA_OFFSET 0
+#define DM_SBDATA2_DATA_LENGTH 32
+#define DM_SBDATA2_DATA (0xffffffffU << DM_SBDATA2_DATA_OFFSET)
+#define DM_SBDATA3 0x3f
/*
-* This field is 1 when any currently selected hart is unavailable.
+ * Accesses bits 127:96 of {\tt sbdata} (if the system bus is that
+ * wide).
*/
-#define DMI_DMSTATUS_ANYUNAVAIL_OFFSET 12
-#define DMI_DMSTATUS_ANYUNAVAIL_LENGTH 1
-#define DMI_DMSTATUS_ANYUNAVAIL (0x1U << DMI_DMSTATUS_ANYUNAVAIL_OFFSET)
-/*
-* This field is 1 when all currently selected harts are running.
- */
-#define DMI_DMSTATUS_ALLRUNNING_OFFSET 11
-#define DMI_DMSTATUS_ALLRUNNING_LENGTH 1
-#define DMI_DMSTATUS_ALLRUNNING (0x1U << DMI_DMSTATUS_ALLRUNNING_OFFSET)
-/*
-* This field is 1 when any currently selected hart is running.
- */
-#define DMI_DMSTATUS_ANYRUNNING_OFFSET 10
-#define DMI_DMSTATUS_ANYRUNNING_LENGTH 1
-#define DMI_DMSTATUS_ANYRUNNING (0x1U << DMI_DMSTATUS_ANYRUNNING_OFFSET)
-/*
-* This field is 1 when all currently selected harts are halted.
- */
-#define DMI_DMSTATUS_ALLHALTED_OFFSET 9
-#define DMI_DMSTATUS_ALLHALTED_LENGTH 1
-#define DMI_DMSTATUS_ALLHALTED (0x1U << DMI_DMSTATUS_ALLHALTED_OFFSET)
-/*
-* This field is 1 when any currently selected hart is halted.
- */
-#define DMI_DMSTATUS_ANYHALTED_OFFSET 8
-#define DMI_DMSTATUS_ANYHALTED_LENGTH 1
-#define DMI_DMSTATUS_ANYHALTED (0x1U << DMI_DMSTATUS_ANYHALTED_OFFSET)
-/*
-* 0 when authentication is required before using the DM. 1 when the
-* authentication check has passed. On components that don't implement
-* authentication, this bit must be preset as 1.
- */
-#define DMI_DMSTATUS_AUTHENTICATED_OFFSET 7
-#define DMI_DMSTATUS_AUTHENTICATED_LENGTH 1
-#define DMI_DMSTATUS_AUTHENTICATED (0x1U << DMI_DMSTATUS_AUTHENTICATED_OFFSET)
-/*
-* 0: The authentication module is ready to process the next
-* read/write to \Rauthdata.
-*
-* 1: The authentication module is busy. Accessing \Rauthdata results
-* in unspecified behavior.
-*
-* \Fauthbusy only becomes set in immediate response to an access to
-* \Rauthdata.
- */
-#define DMI_DMSTATUS_AUTHBUSY_OFFSET 6
-#define DMI_DMSTATUS_AUTHBUSY_LENGTH 1
-#define DMI_DMSTATUS_AUTHBUSY (0x1U << DMI_DMSTATUS_AUTHBUSY_OFFSET)
-/*
-* 1 if this Debug Module supports halt-on-reset functionality
-* controllable by the \Fsetresethaltreq and \Fclrresethaltreq bits.
-* 0 otherwise.
- */
-#define DMI_DMSTATUS_HASRESETHALTREQ_OFFSET 5
-#define DMI_DMSTATUS_HASRESETHALTREQ_LENGTH 1
-#define DMI_DMSTATUS_HASRESETHALTREQ (0x1U << DMI_DMSTATUS_HASRESETHALTREQ_OFFSET)
-/*
-* 0: \Rdevtreeaddrzero--\Rdevtreeaddrthree hold information which
-* is not relevant to the Device Tree.
-*
-* 1: \Rdevtreeaddrzero--\Rdevtreeaddrthree registers hold the address of the
-* Device Tree.
- */
-#define DMI_DMSTATUS_DEVTREEVALID_OFFSET 4
-#define DMI_DMSTATUS_DEVTREEVALID_LENGTH 1
-#define DMI_DMSTATUS_DEVTREEVALID (0x1U << DMI_DMSTATUS_DEVTREEVALID_OFFSET)
-/*
-* 0: There is no Debug Module present.
-*
-* 1: There is a Debug Module and it conforms to version 0.11 of this
-* specification.
-*
-* 2: There is a Debug Module and it conforms to version 0.13 of this
-* specification.
-*
-* 15: There is a Debug Module but it does not conform to any
-* available version of this spec.
- */
-#define DMI_DMSTATUS_VERSION_OFFSET 0
-#define DMI_DMSTATUS_VERSION_LENGTH 4
-#define DMI_DMSTATUS_VERSION (0xfU << DMI_DMSTATUS_VERSION_OFFSET)
-#define DMI_DMCONTROL 0x10
-/*
-* Writes the halt request bit for all currently selected harts.
-* When set to 1, each selected hart will halt if it is not currently
-* halted.
-*
-* Writing 1 or 0 has no effect on a hart which is already halted, but
-* the bit must be cleared to 0 before the hart is resumed.
-*
-* Writes apply to the new value of \Fhartsel and \Fhasel.
- */
-#define DMI_DMCONTROL_HALTREQ_OFFSET 31
-#define DMI_DMCONTROL_HALTREQ_LENGTH 1
-#define DMI_DMCONTROL_HALTREQ (0x1U << DMI_DMCONTROL_HALTREQ_OFFSET)
-/*
-* Writes the resume request bit for all currently selected harts.
-* When set to 1, each selected hart will resume if it is currently
-* halted.
-*
-* The resume request bit is ignored while the halt request bit is
-* set.
-*
-* Writes apply to the new value of \Fhartsel and \Fhasel.
- */
-#define DMI_DMCONTROL_RESUMEREQ_OFFSET 30
-#define DMI_DMCONTROL_RESUMEREQ_LENGTH 1
-#define DMI_DMCONTROL_RESUMEREQ (0x1U << DMI_DMCONTROL_RESUMEREQ_OFFSET)
-/*
-* This optional field writes the reset bit for all the currently
-* selected harts. To perform a reset the debugger writes 1, and then
-* writes 0 to deassert the reset signal.
-*
-* If this feature is not implemented, the bit always stays 0, so
-* after writing 1 the debugger can read the register back to see if
-* the feature is supported.
-*
-* Writes apply to the new value of \Fhartsel and \Fhasel.
- */
-#define DMI_DMCONTROL_HARTRESET_OFFSET 29
-#define DMI_DMCONTROL_HARTRESET_LENGTH 1
-#define DMI_DMCONTROL_HARTRESET (0x1U << DMI_DMCONTROL_HARTRESET_OFFSET)
-/*
-* Writing 1 to this bit clears the {\tt havereset} bits for
-* any selected harts.
-*
-* Writes apply to the new value of \Fhartsel and \Fhasel.
- */
-#define DMI_DMCONTROL_ACKHAVERESET_OFFSET 28
-#define DMI_DMCONTROL_ACKHAVERESET_LENGTH 1
-#define DMI_DMCONTROL_ACKHAVERESET (0x1U << DMI_DMCONTROL_ACKHAVERESET_OFFSET)
-/*
-* Selects the definition of currently selected harts.
-*
-* 0: There is a single currently selected hart, that selected by \Fhartsel.
-*
-* 1: There may be multiple currently selected harts -- that selected by \Fhartsel,
-* plus those selected by the hart array mask register.
-*
-* An implementation which does not implement the hart array mask register
-* must tie this field to 0. A debugger which wishes to use the hart array
-* mask register feature should set this bit and read back to see if the functionality
-* is supported.
- */
-#define DMI_DMCONTROL_HASEL_OFFSET 26
-#define DMI_DMCONTROL_HASEL_LENGTH 1
-#define DMI_DMCONTROL_HASEL (0x1U << DMI_DMCONTROL_HASEL_OFFSET)
-/*
-* The low 10 bits of \Fhartsel: the DM-specific index of the hart to
-* select. This hart is always part of the currently selected harts.
- */
-#define DMI_DMCONTROL_HARTSELLO_OFFSET 16
-#define DMI_DMCONTROL_HARTSELLO_LENGTH 10
-#define DMI_DMCONTROL_HARTSELLO (0x3ffU << DMI_DMCONTROL_HARTSELLO_OFFSET)
-/*
-* The high 10 bits of \Fhartsel: the DM-specific index of the hart to
-* select. This hart is always part of the currently selected harts.
- */
-#define DMI_DMCONTROL_HARTSELHI_OFFSET 6
-#define DMI_DMCONTROL_HARTSELHI_LENGTH 10
-#define DMI_DMCONTROL_HARTSELHI (0x3ffU << DMI_DMCONTROL_HARTSELHI_OFFSET)
-/*
-* This optional field writes the halt-on-reset request bit for all
-* currently selected harts.
-* When set to 1, each selected hart will halt upon the next deassertion
-* of its reset. The halt-on-reset request bit is not automatically
-* cleared. The debugger must write to \Fclrresethaltreq to clear it.
-*
-* Writes apply to the new value of \Fhartsel and \Fhasel.
-*
-* If \Fhasresethaltreq is 0, this field is not implemented.
- */
-#define DMI_DMCONTROL_SETRESETHALTREQ_OFFSET 3
-#define DMI_DMCONTROL_SETRESETHALTREQ_LENGTH 1
-#define DMI_DMCONTROL_SETRESETHALTREQ (0x1U << DMI_DMCONTROL_SETRESETHALTREQ_OFFSET)
-/*
-* This optional field clears the halt-on-reset request bit for all
-* currently selected harts.
-*
-* Writes apply to the new value of \Fhartsel and \Fhasel.
- */
-#define DMI_DMCONTROL_CLRRESETHALTREQ_OFFSET 2
-#define DMI_DMCONTROL_CLRRESETHALTREQ_LENGTH 1
-#define DMI_DMCONTROL_CLRRESETHALTREQ (0x1U << DMI_DMCONTROL_CLRRESETHALTREQ_OFFSET)
-/*
-* This bit controls the reset signal from the DM to the rest of the
-* system. The signal should reset every part of the system, including
-* every hart, except for the DM and any logic required to access the
-* DM.
-* To perform a system reset the debugger writes 1,
-* and then writes 0
-* to deassert the reset.
- */
-#define DMI_DMCONTROL_NDMRESET_OFFSET 1
-#define DMI_DMCONTROL_NDMRESET_LENGTH 1
-#define DMI_DMCONTROL_NDMRESET (0x1U << DMI_DMCONTROL_NDMRESET_OFFSET)
-/*
-* This bit serves as a reset signal for the Debug Module itself.
-*
-* 0: The module's state, including authentication mechanism,
-* takes its reset values (the \Fdmactive bit is the only bit which can
-* be written to something other than its reset value).
-*
-* 1: The module functions normally.
-*
-* No other mechanism should exist that may result in resetting the
-* Debug Module after power up, including the platform's system reset
-* or Debug Transport reset signals.
-*
-* A debugger may pulse this bit low to get the Debug Module into a
-* known state.
-*
-* Implementations may use this bit to aid debugging, for example by
-* preventing the Debug Module from being power gated while debugging
-* is active.
- */
-#define DMI_DMCONTROL_DMACTIVE_OFFSET 0
-#define DMI_DMCONTROL_DMACTIVE_LENGTH 1
-#define DMI_DMCONTROL_DMACTIVE (0x1U << DMI_DMCONTROL_DMACTIVE_OFFSET)
-#define DMI_HARTINFO 0x12
-/*
-* Number of {\tt dscratch} registers available for the debugger
-* to use during program buffer execution, starting from \Rdscratchzero.
-* The debugger can make no assumptions about the contents of these
-* registers between commands.
- */
-#define DMI_HARTINFO_NSCRATCH_OFFSET 20
-#define DMI_HARTINFO_NSCRATCH_LENGTH 4
-#define DMI_HARTINFO_NSCRATCH (0xfU << DMI_HARTINFO_NSCRATCH_OFFSET)
-/*
-* 0: The {\tt data} registers are shadowed in the hart by CSR
-* registers. Each CSR register is MXLEN bits in size, and corresponds
-* to a single argument, per Table~\ref{tab:datareg}.
-*
-* 1: The {\tt data} registers are shadowed in the hart's memory map.
-* Each register takes up 4 bytes in the memory map.
- */
-#define DMI_HARTINFO_DATAACCESS_OFFSET 16
-#define DMI_HARTINFO_DATAACCESS_LENGTH 1
-#define DMI_HARTINFO_DATAACCESS (0x1U << DMI_HARTINFO_DATAACCESS_OFFSET)
-/*
-* If \Fdataaccess is 0: Number of CSR registers dedicated to
-* shadowing the {\tt data} registers.
-*
-* If \Fdataaccess is 1: Number of 32-bit words in the memory map
-* dedicated to shadowing the {\tt data} registers.
-*
-* Since there are at most 12 {\tt data} registers, the value in this
-* register must be 12 or smaller.
- */
-#define DMI_HARTINFO_DATASIZE_OFFSET 12
-#define DMI_HARTINFO_DATASIZE_LENGTH 4
-#define DMI_HARTINFO_DATASIZE (0xfU << DMI_HARTINFO_DATASIZE_OFFSET)
-/*
-* If \Fdataaccess is 0: The number of the first CSR dedicated to
-* shadowing the {\tt data} registers.
-*
-* If \Fdataaccess is 1: Signed address of RAM where the {\tt data}
-* registers are shadowed, to be used to access relative to \Rzero.
- */
-#define DMI_HARTINFO_DATAADDR_OFFSET 0
-#define DMI_HARTINFO_DATAADDR_LENGTH 12
-#define DMI_HARTINFO_DATAADDR (0xfffU << DMI_HARTINFO_DATAADDR_OFFSET)
-#define DMI_HAWINDOWSEL 0x14
-/*
-* The high bits of this field may be tied to 0, depending on how large
-* the array mask register is. Eg. on a system with 48 harts only bit 0
-* of this field may actually be writable.
- */
-#define DMI_HAWINDOWSEL_HAWINDOWSEL_OFFSET 0
-#define DMI_HAWINDOWSEL_HAWINDOWSEL_LENGTH 15
-#define DMI_HAWINDOWSEL_HAWINDOWSEL (0x7fffU << DMI_HAWINDOWSEL_HAWINDOWSEL_OFFSET)
-#define DMI_HAWINDOW 0x15
-#define DMI_HAWINDOW_MASKDATA_OFFSET 0
-#define DMI_HAWINDOW_MASKDATA_LENGTH 32
-#define DMI_HAWINDOW_MASKDATA (0xffffffffU << DMI_HAWINDOW_MASKDATA_OFFSET)
-#define DMI_ABSTRACTCS 0x16
-/*
-* Size of the Program Buffer, in 32-bit words. Valid sizes are 0 - 16.
- */
-#define DMI_ABSTRACTCS_PROGBUFSIZE_OFFSET 24
-#define DMI_ABSTRACTCS_PROGBUFSIZE_LENGTH 5
-#define DMI_ABSTRACTCS_PROGBUFSIZE (0x1fU << DMI_ABSTRACTCS_PROGBUFSIZE_OFFSET)
-/*
-* 1: An abstract command is currently being executed.
-*
-* This bit is set as soon as \Rcommand is written, and is
-* not cleared until that command has completed.
- */
-#define DMI_ABSTRACTCS_BUSY_OFFSET 12
-#define DMI_ABSTRACTCS_BUSY_LENGTH 1
-#define DMI_ABSTRACTCS_BUSY (0x1U << DMI_ABSTRACTCS_BUSY_OFFSET)
-/*
-* Gets set if an abstract command fails. The bits in this field remain set until
-* they are cleared by writing 1 to them. No abstract command is
-* started until the value is reset to 0.
-*
-* 0 (none): No error.
-*
-* 1 (busy): An abstract command was executing while \Rcommand,
-* \Rabstractcs, \Rabstractauto was written, or when one
-* of the {\tt data} or {\tt progbuf} registers was read or written.
-*
-* 2 (not supported): The requested command is not supported,
-* regardless of whether the hart is running or not.
-*
-* 3 (exception): An exception occurred while executing the command
-* (eg. while executing the Program Buffer).
-*
-* 4 (halt/resume): The abstract command couldn't execute because the
-* hart wasn't in the required state (running/halted).
-*
-* 7 (other): The command failed for another reason.
- */
-#define DMI_ABSTRACTCS_CMDERR_OFFSET 8
-#define DMI_ABSTRACTCS_CMDERR_LENGTH 3
-#define DMI_ABSTRACTCS_CMDERR (0x7U << DMI_ABSTRACTCS_CMDERR_OFFSET)
-/*
-* Number of {\tt data} registers that are implemented as part of the
-* abstract command interface. Valid sizes are 0 - 12.
- */
-#define DMI_ABSTRACTCS_DATACOUNT_OFFSET 0
-#define DMI_ABSTRACTCS_DATACOUNT_LENGTH 4
-#define DMI_ABSTRACTCS_DATACOUNT (0xfU << DMI_ABSTRACTCS_DATACOUNT_OFFSET)
-#define DMI_COMMAND 0x17
-/*
-* The type determines the overall functionality of this
-* abstract command.
- */
-#define DMI_COMMAND_CMDTYPE_OFFSET 24
-#define DMI_COMMAND_CMDTYPE_LENGTH 8
-#define DMI_COMMAND_CMDTYPE (0xffU << DMI_COMMAND_CMDTYPE_OFFSET)
-/*
-* This field is interpreted in a command-specific manner,
-* described for each abstract command.
- */
-#define DMI_COMMAND_CONTROL_OFFSET 0
-#define DMI_COMMAND_CONTROL_LENGTH 24
-#define DMI_COMMAND_CONTROL (0xffffffU << DMI_COMMAND_CONTROL_OFFSET)
-#define DMI_ABSTRACTAUTO 0x18
-/*
-* When a bit in this field is 1, read or write accesses to the corresponding {\tt progbuf} word
-* cause the command in \Rcommand to be executed again.
- */
-#define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET 16
-#define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_LENGTH 16
-#define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF (0xffffU << DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET)
-/*
-* When a bit in this field is 1, read or write accesses to the corresponding {\tt data} word
-* cause the command in \Rcommand to be executed again.
- */
-#define DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET 0
-#define DMI_ABSTRACTAUTO_AUTOEXECDATA_LENGTH 12
-#define DMI_ABSTRACTAUTO_AUTOEXECDATA (0xfffU << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET)
-#define DMI_DEVTREEADDR0 0x19
-#define DMI_DEVTREEADDR0_ADDR_OFFSET 0
-#define DMI_DEVTREEADDR0_ADDR_LENGTH 32
-#define DMI_DEVTREEADDR0_ADDR (0xffffffffU << DMI_DEVTREEADDR0_ADDR_OFFSET)
-#define DMI_DEVTREEADDR1 0x1a
-#define DMI_DEVTREEADDR2 0x1b
-#define DMI_DEVTREEADDR3 0x1c
-#define DMI_NEXTDM 0x1d
-#define DMI_NEXTDM_ADDR_OFFSET 0
-#define DMI_NEXTDM_ADDR_LENGTH 32
-#define DMI_NEXTDM_ADDR (0xffffffffU << DMI_NEXTDM_ADDR_OFFSET)
-#define DMI_DATA0 0x04
-#define DMI_DATA0_DATA_OFFSET 0
-#define DMI_DATA0_DATA_LENGTH 32
-#define DMI_DATA0_DATA (0xffffffffU << DMI_DATA0_DATA_OFFSET)
-#define DMI_DATA11 0x0f
-#define DMI_PROGBUF0 0x20
-#define DMI_PROGBUF0_DATA_OFFSET 0
-#define DMI_PROGBUF0_DATA_LENGTH 32
-#define DMI_PROGBUF0_DATA (0xffffffffU << DMI_PROGBUF0_DATA_OFFSET)
-#define DMI_PROGBUF15 0x2f
-#define DMI_AUTHDATA 0x30
-#define DMI_AUTHDATA_DATA_OFFSET 0
-#define DMI_AUTHDATA_DATA_LENGTH 32
-#define DMI_AUTHDATA_DATA (0xffffffffU << DMI_AUTHDATA_DATA_OFFSET)
-#define DMI_HALTSUM0 0x40
-#define DMI_HALTSUM0_HALTSUM0_OFFSET 0
-#define DMI_HALTSUM0_HALTSUM0_LENGTH 32
-#define DMI_HALTSUM0_HALTSUM0 (0xffffffffU << DMI_HALTSUM0_HALTSUM0_OFFSET)
-#define DMI_HALTSUM1 0x13
-#define DMI_HALTSUM1_HALTSUM1_OFFSET 0
-#define DMI_HALTSUM1_HALTSUM1_LENGTH 32
-#define DMI_HALTSUM1_HALTSUM1 (0xffffffffU << DMI_HALTSUM1_HALTSUM1_OFFSET)
-#define DMI_HALTSUM2 0x34
-#define DMI_HALTSUM2_HALTSUM2_OFFSET 0
-#define DMI_HALTSUM2_HALTSUM2_LENGTH 32
-#define DMI_HALTSUM2_HALTSUM2 (0xffffffffU << DMI_HALTSUM2_HALTSUM2_OFFSET)
-#define DMI_HALTSUM3 0x35
-#define DMI_HALTSUM3_HALTSUM3_OFFSET 0
-#define DMI_HALTSUM3_HALTSUM3_LENGTH 32
-#define DMI_HALTSUM3_HALTSUM3 (0xffffffffU << DMI_HALTSUM3_HALTSUM3_OFFSET)
-#define DMI_SBADDRESS3 0x37
-/*
-* Accesses bits 127:96 of the physical address in {\tt sbaddress} (if
-* the system address bus is that wide).
- */
-#define DMI_SBADDRESS3_ADDRESS_OFFSET 0
-#define DMI_SBADDRESS3_ADDRESS_LENGTH 32
-#define DMI_SBADDRESS3_ADDRESS (0xffffffffU << DMI_SBADDRESS3_ADDRESS_OFFSET)
-#define DMI_SBCS 0x38
-/*
-* 0: The System Bus interface conforms to mainline drafts of this
-* spec older than 1 January, 2018.
-*
-* 1: The System Bus interface conforms to this version of the spec.
-*
-* Other values are reserved for future versions.
- */
-#define DMI_SBCS_SBVERSION_OFFSET 29
-#define DMI_SBCS_SBVERSION_LENGTH 3
-#define DMI_SBCS_SBVERSION (0x7U << DMI_SBCS_SBVERSION_OFFSET)
-/*
-* Set when the debugger attempts to read data while a read is in
-* progress, or when the debugger initiates a new access while one is
-* already in progress (while \Fsbbusy is set). It remains set until
-* it's explicitly cleared by the debugger.
-*
-* While this field is non-zero, no more system bus accesses can be
-* initiated by the Debug Module.
- */
-#define DMI_SBCS_SBBUSYERROR_OFFSET 22
-#define DMI_SBCS_SBBUSYERROR_LENGTH 1
-#define DMI_SBCS_SBBUSYERROR (0x1U << DMI_SBCS_SBBUSYERROR_OFFSET)
-/*
-* When 1, indicates the system bus master is busy. (Whether the
-* system bus itself is busy is related, but not the same thing.) This
-* bit goes high immediately when a read or write is requested for any
-* reason, and does not go low until the access is fully completed.
-*
-* Writes to \Rsbcs while \Fsbbusy is high result in undefined
-* behavior. A debugger must not write to \Rsbcs until it reads
-* \Fsbbusy as 0.
- */
-#define DMI_SBCS_SBBUSY_OFFSET 21
-#define DMI_SBCS_SBBUSY_LENGTH 1
-#define DMI_SBCS_SBBUSY (0x1U << DMI_SBCS_SBBUSY_OFFSET)
-/*
-* When 1, every write to \Rsbaddresszero automatically triggers a
-* system bus read at the new address.
- */
-#define DMI_SBCS_SBREADONADDR_OFFSET 20
-#define DMI_SBCS_SBREADONADDR_LENGTH 1
-#define DMI_SBCS_SBREADONADDR (0x1U << DMI_SBCS_SBREADONADDR_OFFSET)
-/*
-* Select the access size to use for system bus accesses.
-*
-* 0: 8-bit
-*
-* 1: 16-bit
-*
-* 2: 32-bit
-*
-* 3: 64-bit
-*
-* 4: 128-bit
-*
-* If \Fsbaccess has an unsupported value when the DM starts a bus
-* access, the access is not performed and \Fsberror is set to 3.
- */
-#define DMI_SBCS_SBACCESS_OFFSET 17
-#define DMI_SBCS_SBACCESS_LENGTH 3
-#define DMI_SBCS_SBACCESS (0x7U << DMI_SBCS_SBACCESS_OFFSET)
-/*
-* When 1, {\tt sbaddress} is incremented by the access size (in
-* bytes) selected in \Fsbaccess after every system bus access.
- */
-#define DMI_SBCS_SBAUTOINCREMENT_OFFSET 16
-#define DMI_SBCS_SBAUTOINCREMENT_LENGTH 1
-#define DMI_SBCS_SBAUTOINCREMENT (0x1U << DMI_SBCS_SBAUTOINCREMENT_OFFSET)
-/*
-* When 1, every read from \Rsbdatazero automatically triggers a
-* system bus read at the (possibly auto-incremented) address.
- */
-#define DMI_SBCS_SBREADONDATA_OFFSET 15
-#define DMI_SBCS_SBREADONDATA_LENGTH 1
-#define DMI_SBCS_SBREADONDATA (0x1U << DMI_SBCS_SBREADONDATA_OFFSET)
-/*
-* When the Debug Module's system bus
-* master causes a bus error, this field gets set. The bits in this
-* field remain set until they are cleared by writing 1 to them.
-* While this field is non-zero, no more system bus accesses can be
-* initiated by the Debug Module.
-*
-* An implementation may report "Other" (7) for any error condition.
-*
-* 0: There was no bus error.
-*
-* 1: There was a timeout.
-*
-* 2: A bad address was accessed.
-*
-* 3: There was an alignment error.
-*
-* 4: An access of unsupported size was requested.
-*
-* 7: Other.
- */
-#define DMI_SBCS_SBERROR_OFFSET 12
-#define DMI_SBCS_SBERROR_LENGTH 3
-#define DMI_SBCS_SBERROR (0x7U << DMI_SBCS_SBERROR_OFFSET)
-/*
-* Width of system bus addresses in bits. (0 indicates there is no bus
-* access support.)
- */
-#define DMI_SBCS_SBASIZE_OFFSET 5
-#define DMI_SBCS_SBASIZE_LENGTH 7
-#define DMI_SBCS_SBASIZE (0x7fU << DMI_SBCS_SBASIZE_OFFSET)
-/*
-* 1 when 128-bit system bus accesses are supported.
- */
-#define DMI_SBCS_SBACCESS128_OFFSET 4
-#define DMI_SBCS_SBACCESS128_LENGTH 1
-#define DMI_SBCS_SBACCESS128 (0x1U << DMI_SBCS_SBACCESS128_OFFSET)
-/*
-* 1 when 64-bit system bus accesses are supported.
- */
-#define DMI_SBCS_SBACCESS64_OFFSET 3
-#define DMI_SBCS_SBACCESS64_LENGTH 1
-#define DMI_SBCS_SBACCESS64 (0x1U << DMI_SBCS_SBACCESS64_OFFSET)
-/*
-* 1 when 32-bit system bus accesses are supported.
- */
-#define DMI_SBCS_SBACCESS32_OFFSET 2
-#define DMI_SBCS_SBACCESS32_LENGTH 1
-#define DMI_SBCS_SBACCESS32 (0x1U << DMI_SBCS_SBACCESS32_OFFSET)
-/*
-* 1 when 16-bit system bus accesses are supported.
- */
-#define DMI_SBCS_SBACCESS16_OFFSET 1
-#define DMI_SBCS_SBACCESS16_LENGTH 1
-#define DMI_SBCS_SBACCESS16 (0x1U << DMI_SBCS_SBACCESS16_OFFSET)
-/*
-* 1 when 8-bit system bus accesses are supported.
- */
-#define DMI_SBCS_SBACCESS8_OFFSET 0
-#define DMI_SBCS_SBACCESS8_LENGTH 1
-#define DMI_SBCS_SBACCESS8 (0x1U << DMI_SBCS_SBACCESS8_OFFSET)
-#define DMI_SBADDRESS0 0x39
-/*
-* Accesses bits 31:0 of the physical address in {\tt sbaddress}.
- */
-#define DMI_SBADDRESS0_ADDRESS_OFFSET 0
-#define DMI_SBADDRESS0_ADDRESS_LENGTH 32
-#define DMI_SBADDRESS0_ADDRESS (0xffffffffU << DMI_SBADDRESS0_ADDRESS_OFFSET)
-#define DMI_SBADDRESS1 0x3a
-/*
-* Accesses bits 63:32 of the physical address in {\tt sbaddress} (if
-* the system address bus is that wide).
- */
-#define DMI_SBADDRESS1_ADDRESS_OFFSET 0
-#define DMI_SBADDRESS1_ADDRESS_LENGTH 32
-#define DMI_SBADDRESS1_ADDRESS (0xffffffffU << DMI_SBADDRESS1_ADDRESS_OFFSET)
-#define DMI_SBADDRESS2 0x3b
-/*
-* Accesses bits 95:64 of the physical address in {\tt sbaddress} (if
-* the system address bus is that wide).
- */
-#define DMI_SBADDRESS2_ADDRESS_OFFSET 0
-#define DMI_SBADDRESS2_ADDRESS_LENGTH 32
-#define DMI_SBADDRESS2_ADDRESS (0xffffffffU << DMI_SBADDRESS2_ADDRESS_OFFSET)
-#define DMI_SBDATA0 0x3c
-/*
-* Accesses bits 31:0 of {\tt sbdata}.
- */
-#define DMI_SBDATA0_DATA_OFFSET 0
-#define DMI_SBDATA0_DATA_LENGTH 32
-#define DMI_SBDATA0_DATA (0xffffffffU << DMI_SBDATA0_DATA_OFFSET)
-#define DMI_SBDATA1 0x3d
-/*
-* Accesses bits 63:32 of {\tt sbdata} (if the system bus is that
-* wide).
- */
-#define DMI_SBDATA1_DATA_OFFSET 0
-#define DMI_SBDATA1_DATA_LENGTH 32
-#define DMI_SBDATA1_DATA (0xffffffffU << DMI_SBDATA1_DATA_OFFSET)
-#define DMI_SBDATA2 0x3e
-/*
-* Accesses bits 95:64 of {\tt sbdata} (if the system bus is that
-* wide).
- */
-#define DMI_SBDATA2_DATA_OFFSET 0
-#define DMI_SBDATA2_DATA_LENGTH 32
-#define DMI_SBDATA2_DATA (0xffffffffU << DMI_SBDATA2_DATA_OFFSET)
-#define DMI_SBDATA3 0x3f
-/*
-* Accesses bits 127:96 of {\tt sbdata} (if the system bus is that
-* wide).
- */
-#define DMI_SBDATA3_DATA_OFFSET 0
-#define DMI_SBDATA3_DATA_LENGTH 32
-#define DMI_SBDATA3_DATA (0xffffffffU << DMI_SBDATA3_DATA_OFFSET)
+#define DM_SBDATA3_DATA_OFFSET 0
+#define DM_SBDATA3_DATA_LENGTH 32
+#define DM_SBDATA3_DATA (0xffffffffU << DM_SBDATA3_DATA_OFFSET)
+#define DM_CUSTOM 0x1f
+#define DM_CUSTOM0 0x70
+#define DM_CUSTOM15 0x7f
#define SHORTNAME 0x123
/*
-* Description of what this field is used for.
+ * Description of what this field is used for.
*/
#define SHORTNAME_FIELD_OFFSET 0
#define SHORTNAME_FIELD_LENGTH 8
#define SHORTNAME_FIELD (0xffU << SHORTNAME_FIELD_OFFSET)
-#define AC_ACCESS_REGISTER None
/*
-* This is 0 to indicate Access Register Command.
+ * This is 0 to indicate Access Register Command.
*/
#define AC_ACCESS_REGISTER_CMDTYPE_OFFSET 24
#define AC_ACCESS_REGISTER_CMDTYPE_LENGTH 8
#define AC_ACCESS_REGISTER_CMDTYPE (0xffU << AC_ACCESS_REGISTER_CMDTYPE_OFFSET)
/*
-* 2: Access the lowest 32 bits of the register.
-*
-* 3: Access the lowest 64 bits of the register.
-*
-* 4: Access the lowest 128 bits of the register.
-*
-* If \Fsize specifies a size larger than the register's actual size,
-* then the access must fail. If a register is accessible, then reads of \Fsize
-* less than or equal to the register's actual size must be supported.
-*
-* This field controls the Argument Width as referenced in
-* Table~\ref{tab:datareg}.
- */
-#define AC_ACCESS_REGISTER_SIZE_OFFSET 20
-#define AC_ACCESS_REGISTER_SIZE_LENGTH 3
-#define AC_ACCESS_REGISTER_SIZE (0x7U << AC_ACCESS_REGISTER_SIZE_OFFSET)
-/*
-* When 1, execute the program in the Program Buffer exactly once
-* after performing the transfer, if any.
+ * 2: Access the lowest 32 bits of the register.
+ *
+ * 3: Access the lowest 64 bits of the register.
+ *
+ * 4: Access the lowest 128 bits of the register.
+ *
+ * If \FacAccessregisterAarsize specifies a size larger than the register's actual size,
+ * then the access must fail. If a register is accessible, then reads of \FacAccessregisterAarsize
+ * less than or equal to the register's actual size must be supported.
+ *
+ * This field controls the Argument Width as referenced in
+ * Table~\ref{tab:datareg}.
+ */
+#define AC_ACCESS_REGISTER_AARSIZE_OFFSET 20
+#define AC_ACCESS_REGISTER_AARSIZE_LENGTH 3
+#define AC_ACCESS_REGISTER_AARSIZE (0x7U << AC_ACCESS_REGISTER_AARSIZE_OFFSET)
+/*
+ * 0: No effect. This variant must be supported.
+ *
+ * 1: After a successful register access, \FacAccessregisterRegno is
+ * incremented (wrapping around to 0). Supporting this variant is
+ * optional. It is undefined whether the increment happens when
+ * \FacAccessregisterTransfer is 0.
+ */
+#define AC_ACCESS_REGISTER_AARPOSTINCREMENT_OFFSET 19
+#define AC_ACCESS_REGISTER_AARPOSTINCREMENT_LENGTH 1
+#define AC_ACCESS_REGISTER_AARPOSTINCREMENT (0x1U << AC_ACCESS_REGISTER_AARPOSTINCREMENT_OFFSET)
+/*
+ * 0: No effect. This variant must be supported, and is the only
+ * supported one if \FdmAbstractcsProgbufsize is 0.
+ *
+ * 1: Execute the program in the Program Buffer exactly once after
+ * performing the transfer, if any. Supporting this variant is
+ * optional.
*/
#define AC_ACCESS_REGISTER_POSTEXEC_OFFSET 18
#define AC_ACCESS_REGISTER_POSTEXEC_LENGTH 1
#define AC_ACCESS_REGISTER_POSTEXEC (0x1U << AC_ACCESS_REGISTER_POSTEXEC_OFFSET)
/*
-* 0: Don't do the operation specified by \Fwrite.
-*
-* 1: Do the operation specified by \Fwrite.
-*
-* This bit can be used to just execute the Program Buffer without
-* having to worry about placing valid values into \Fsize or \Fregno.
+ * 0: Don't do the operation specified by \FacAccessregisterWrite.
+ *
+ * 1: Do the operation specified by \FacAccessregisterWrite.
+ *
+ * This bit can be used to just execute the Program Buffer without
+ * having to worry about placing valid values into \FacAccessregisterAarsize or \FacAccessregisterRegno.
*/
#define AC_ACCESS_REGISTER_TRANSFER_OFFSET 17
#define AC_ACCESS_REGISTER_TRANSFER_LENGTH 1
#define AC_ACCESS_REGISTER_TRANSFER (0x1U << AC_ACCESS_REGISTER_TRANSFER_OFFSET)
/*
-* When \Ftransfer is set:
-* 0: Copy data from the specified register into {\tt arg0} portion
-* of {\tt data}.
-*
-* 1: Copy data from {\tt arg0} portion of {\tt data} into the
-* specified register.
+ * When \FacAccessregisterTransfer is set:
+ * 0: Copy data from the specified register into {\tt arg0} portion
+ * of {\tt data}.
+ *
+ * 1: Copy data from {\tt arg0} portion of {\tt data} into the
+ * specified register.
*/
#define AC_ACCESS_REGISTER_WRITE_OFFSET 16
#define AC_ACCESS_REGISTER_WRITE_LENGTH 1
#define AC_ACCESS_REGISTER_WRITE (0x1U << AC_ACCESS_REGISTER_WRITE_OFFSET)
/*
-* Number of the register to access, as described in
-* Table~\ref{tab:regno}.
-* \Rdpc may be used as an alias for PC if this command is
-* supported on a non-halted hart.
+ * Number of the register to access, as described in
+ * Table~\ref{tab:regno}.
+ * \RcsrDpc may be used as an alias for PC if this command is
+ * supported on a non-halted hart.
*/
#define AC_ACCESS_REGISTER_REGNO_OFFSET 0
#define AC_ACCESS_REGISTER_REGNO_LENGTH 16
#define AC_ACCESS_REGISTER_REGNO (0xffffU << AC_ACCESS_REGISTER_REGNO_OFFSET)
-#define AC_QUICK_ACCESS None
/*
-* This is 1 to indicate Quick Access command.
+ * This is 1 to indicate Quick Access command.
*/
#define AC_QUICK_ACCESS_CMDTYPE_OFFSET 24
#define AC_QUICK_ACCESS_CMDTYPE_LENGTH 8
#define AC_QUICK_ACCESS_CMDTYPE (0xffU << AC_QUICK_ACCESS_CMDTYPE_OFFSET)
+/*
+ * This is 2 to indicate Access Memory Command.
+ */
+#define AC_ACCESS_MEMORY_CMDTYPE_OFFSET 24
+#define AC_ACCESS_MEMORY_CMDTYPE_LENGTH 8
+#define AC_ACCESS_MEMORY_CMDTYPE (0xffU << AC_ACCESS_MEMORY_CMDTYPE_OFFSET)
+/*
+ * An implementation does not have to implement both virtual and
+ * physical accesses, but it must fail accesses that it doesn't
+ * support.
+ *
+ * 0: Addresses are physical (to the hart they are performed on).
+ *
+ * 1: Addresses are virtual, and translated the way they would be from
+ * M-mode, with \FcsrMcontrolMprv set.
+ */
+#define AC_ACCESS_MEMORY_AAMVIRTUAL_OFFSET 23
+#define AC_ACCESS_MEMORY_AAMVIRTUAL_LENGTH 1
+#define AC_ACCESS_MEMORY_AAMVIRTUAL (0x1U << AC_ACCESS_MEMORY_AAMVIRTUAL_OFFSET)
+/*
+ * 0: Access the lowest 8 bits of the memory location.
+ *
+ * 1: Access the lowest 16 bits of the memory location.
+ *
+ * 2: Access the lowest 32 bits of the memory location.
+ *
+ * 3: Access the lowest 64 bits of the memory location.
+ *
+ * 4: Access the lowest 128 bits of the memory location.
+ */
+#define AC_ACCESS_MEMORY_AAMSIZE_OFFSET 20
+#define AC_ACCESS_MEMORY_AAMSIZE_LENGTH 3
+#define AC_ACCESS_MEMORY_AAMSIZE (0x7U << AC_ACCESS_MEMORY_AAMSIZE_OFFSET)
+/*
+ * After a memory access has completed, if this bit is 1, increment
+ * {\tt arg1} (which contains the address used) by the number of bytes
+ * encoded in \FacAccessmemoryAamsize.
+ *
+ * Supporting this variant is optional, but highly recommended for
+ * performance reasons.
+ */
+#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT_OFFSET 19
+#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT_LENGTH 1
+#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT (0x1U << AC_ACCESS_MEMORY_AAMPOSTINCREMENT_OFFSET)
+/*
+ * 0: Copy data from the memory location specified in {\tt arg1} into
+ * the low bits of {\tt arg0}. Any remaining bits of {\tt arg0} now
+ * have an undefined value.
+ *
+ * 1: Copy data from the low bits of {\tt arg0} into the memory
+ * location specified in {\tt arg1}.
+ */
+#define AC_ACCESS_MEMORY_WRITE_OFFSET 16
+#define AC_ACCESS_MEMORY_WRITE_LENGTH 1
+#define AC_ACCESS_MEMORY_WRITE (0x1U << AC_ACCESS_MEMORY_WRITE_OFFSET)
+/*
+ * These bits are reserved for target-specific uses.
+ */
+#define AC_ACCESS_MEMORY_TARGET_SPECIFIC_OFFSET 14
+#define AC_ACCESS_MEMORY_TARGET_SPECIFIC_LENGTH 2
+#define AC_ACCESS_MEMORY_TARGET_SPECIFIC (0x3U << AC_ACCESS_MEMORY_TARGET_SPECIFIC_OFFSET)
#define VIRT_PRIV virtual
/*
-* Contains the privilege level the hart was operating in when Debug
-* Mode was entered. The encoding is described in Table
-* \ref{tab:privlevel}, and matches the privilege level encoding from
-* the RISC-V Privileged ISA Specification. A user can write this
-* value to change the hart's privilege level when exiting Debug Mode.
+ * Contains the privilege level the hart was operating in when Debug
+ * Mode was entered. The encoding is described in Table
+ * \ref{tab:privlevel}, and matches the privilege level encoding from
+ * the Privileged Spec. A user can write this
+ * value to change the hart's privilege level when exiting Debug Mode.
*/
#define VIRT_PRIV_PRV_OFFSET 0
#define VIRT_PRIV_PRV_LENGTH 2
#define VIRT_PRIV_PRV (0x3U << VIRT_PRIV_PRV_OFFSET)
+#define DMI_SERCS 0x34
+/*
+ * Number of supported serial ports.
+ */
+#define DMI_SERCS_SERIALCOUNT_OFFSET 28
+#define DMI_SERCS_SERIALCOUNT_LENGTH 4
+#define DMI_SERCS_SERIALCOUNT (0xfU << DMI_SERCS_SERIALCOUNT_OFFSET)
+/*
+ * Select which serial port is accessed by \RdmiSerrx and \RdmiSertx.
+ */
+#define DMI_SERCS_SERIAL_OFFSET 24
+#define DMI_SERCS_SERIAL_LENGTH 3
+#define DMI_SERCS_SERIAL (0x7U << DMI_SERCS_SERIAL_OFFSET)
+#define DMI_SERCS_ERROR7_OFFSET 23
+#define DMI_SERCS_ERROR7_LENGTH 1
+#define DMI_SERCS_ERROR7 (0x1U << DMI_SERCS_ERROR7_OFFSET)
+#define DMI_SERCS_VALID7_OFFSET 22
+#define DMI_SERCS_VALID7_LENGTH 1
+#define DMI_SERCS_VALID7 (0x1U << DMI_SERCS_VALID7_OFFSET)
+#define DMI_SERCS_FULL7_OFFSET 21
+#define DMI_SERCS_FULL7_LENGTH 1
+#define DMI_SERCS_FULL7 (0x1U << DMI_SERCS_FULL7_OFFSET)
+#define DMI_SERCS_ERROR6_OFFSET 20
+#define DMI_SERCS_ERROR6_LENGTH 1
+#define DMI_SERCS_ERROR6 (0x1U << DMI_SERCS_ERROR6_OFFSET)
+#define DMI_SERCS_VALID6_OFFSET 19
+#define DMI_SERCS_VALID6_LENGTH 1
+#define DMI_SERCS_VALID6 (0x1U << DMI_SERCS_VALID6_OFFSET)
+#define DMI_SERCS_FULL6_OFFSET 18
+#define DMI_SERCS_FULL6_LENGTH 1
+#define DMI_SERCS_FULL6 (0x1U << DMI_SERCS_FULL6_OFFSET)
+#define DMI_SERCS_ERROR5_OFFSET 17
+#define DMI_SERCS_ERROR5_LENGTH 1
+#define DMI_SERCS_ERROR5 (0x1U << DMI_SERCS_ERROR5_OFFSET)
+#define DMI_SERCS_VALID5_OFFSET 16
+#define DMI_SERCS_VALID5_LENGTH 1
+#define DMI_SERCS_VALID5 (0x1U << DMI_SERCS_VALID5_OFFSET)
+#define DMI_SERCS_FULL5_OFFSET 15
+#define DMI_SERCS_FULL5_LENGTH 1
+#define DMI_SERCS_FULL5 (0x1U << DMI_SERCS_FULL5_OFFSET)
+#define DMI_SERCS_ERROR4_OFFSET 14
+#define DMI_SERCS_ERROR4_LENGTH 1
+#define DMI_SERCS_ERROR4 (0x1U << DMI_SERCS_ERROR4_OFFSET)
+#define DMI_SERCS_VALID4_OFFSET 13
+#define DMI_SERCS_VALID4_LENGTH 1
+#define DMI_SERCS_VALID4 (0x1U << DMI_SERCS_VALID4_OFFSET)
+#define DMI_SERCS_FULL4_OFFSET 12
+#define DMI_SERCS_FULL4_LENGTH 1
+#define DMI_SERCS_FULL4 (0x1U << DMI_SERCS_FULL4_OFFSET)
+#define DMI_SERCS_ERROR3_OFFSET 11
+#define DMI_SERCS_ERROR3_LENGTH 1
+#define DMI_SERCS_ERROR3 (0x1U << DMI_SERCS_ERROR3_OFFSET)
+#define DMI_SERCS_VALID3_OFFSET 10
+#define DMI_SERCS_VALID3_LENGTH 1
+#define DMI_SERCS_VALID3 (0x1U << DMI_SERCS_VALID3_OFFSET)
+#define DMI_SERCS_FULL3_OFFSET 9
+#define DMI_SERCS_FULL3_LENGTH 1
+#define DMI_SERCS_FULL3 (0x1U << DMI_SERCS_FULL3_OFFSET)
+#define DMI_SERCS_ERROR2_OFFSET 8
+#define DMI_SERCS_ERROR2_LENGTH 1
+#define DMI_SERCS_ERROR2 (0x1U << DMI_SERCS_ERROR2_OFFSET)
+#define DMI_SERCS_VALID2_OFFSET 7
+#define DMI_SERCS_VALID2_LENGTH 1
+#define DMI_SERCS_VALID2 (0x1U << DMI_SERCS_VALID2_OFFSET)
+#define DMI_SERCS_FULL2_OFFSET 6
+#define DMI_SERCS_FULL2_LENGTH 1
+#define DMI_SERCS_FULL2 (0x1U << DMI_SERCS_FULL2_OFFSET)
+#define DMI_SERCS_ERROR1_OFFSET 5
+#define DMI_SERCS_ERROR1_LENGTH 1
+#define DMI_SERCS_ERROR1 (0x1U << DMI_SERCS_ERROR1_OFFSET)
+#define DMI_SERCS_VALID1_OFFSET 4
+#define DMI_SERCS_VALID1_LENGTH 1
+#define DMI_SERCS_VALID1 (0x1U << DMI_SERCS_VALID1_OFFSET)
+#define DMI_SERCS_FULL1_OFFSET 3
+#define DMI_SERCS_FULL1_LENGTH 1
+#define DMI_SERCS_FULL1 (0x1U << DMI_SERCS_FULL1_OFFSET)
+/*
+ * 1 when the debugger-to-core queue for serial port 0 has
+ * over or underflowed. This bit will remain set until it is reset by
+ * writing 1 to this bit.
+ */
+#define DMI_SERCS_ERROR0_OFFSET 2
+#define DMI_SERCS_ERROR0_LENGTH 1
+#define DMI_SERCS_ERROR0 (0x1U << DMI_SERCS_ERROR0_OFFSET)
+/*
+ * 1 when the core-to-debugger queue for serial port 0 is not empty.
+ */
+#define DMI_SERCS_VALID0_OFFSET 1
+#define DMI_SERCS_VALID0_LENGTH 1
+#define DMI_SERCS_VALID0 (0x1U << DMI_SERCS_VALID0_OFFSET)
+/*
+ * 1 when the debugger-to-core queue for serial port 0 is full.
+ */
+#define DMI_SERCS_FULL0_OFFSET 0
+#define DMI_SERCS_FULL0_LENGTH 1
+#define DMI_SERCS_FULL0 (0x1U << DMI_SERCS_FULL0_OFFSET)
+#define DMI_SERTX 0x35
+#define DMI_SERTX_DATA_OFFSET 0
+#define DMI_SERTX_DATA_LENGTH 32
+#define DMI_SERTX_DATA (0xffffffffU << DMI_SERTX_DATA_OFFSET)
+#define DMI_SERRX 0x36
+#define DMI_SERRX_DATA_OFFSET 0
+#define DMI_SERRX_DATA_LENGTH 32
+#define DMI_SERRX_DATA (0xffffffffU << DMI_SERRX_DATA_OFFSET)
diff --git a/src/target/riscv/encoding.h b/src/target/riscv/encoding.h
index e214c0c..4a035e2 100644
--- a/src/target/riscv/encoding.h
+++ b/src/target/riscv/encoding.h
@@ -1,3 +1,8 @@
+/*
+ * This file is auto-generated by running 'make ../riscv-openocd/src/target/riscv/encoding.h' in
+ * https://github.com/riscv/riscv-opcodes (876ee63)
+ */
+
/* See LICENSE for license details. */
#ifndef RISCV_CSR_ENCODING_H
@@ -12,7 +17,7 @@
#define MSTATUS_HPIE 0x00000040
#define MSTATUS_MPIE 0x00000080
#define MSTATUS_SPP 0x00000100
-#define MSTATUS_HPP 0x00000600
+#define MSTATUS_VS 0x00000600
#define MSTATUS_MPP 0x00001800
#define MSTATUS_FS 0x00006000
#define MSTATUS_XS 0x00018000
@@ -25,6 +30,8 @@
#define MSTATUS32_SD 0x80000000
#define MSTATUS_UXL 0x0000000300000000
#define MSTATUS_SXL 0x0000000C00000000
+#define MSTATUS_GVA 0x0000004000000000
+#define MSTATUS_MPV 0x0000008000000000
#define MSTATUS64_SD 0x8000000000000000
#define SSTATUS_UIE 0x00000001
@@ -32,6 +39,7 @@
#define SSTATUS_UPIE 0x00000010
#define SSTATUS_SPIE 0x00000020
#define SSTATUS_SPP 0x00000100
+#define SSTATUS_VS 0x00000600
#define SSTATUS_FS 0x00006000
#define SSTATUS_XS 0x00018000
#define SSTATUS_SUM 0x00040000
@@ -40,6 +48,24 @@
#define SSTATUS_UXL 0x0000000300000000
#define SSTATUS64_SD 0x8000000000000000
+#define SSTATUS_VS_MASK (SSTATUS_SIE | SSTATUS_SPIE | \
+ SSTATUS_SPP | SSTATUS_SUM | \
+ SSTATUS_MXR | SSTATUS_UXL)
+
+#define HSTATUS_VSXL 0x300000000
+#define HSTATUS_VTSR 0x00400000
+#define HSTATUS_VTW 0x00200000
+#define HSTATUS_VTVM 0x00100000
+#define HSTATUS_VGEIN 0x0003f000
+#define HSTATUS_HU 0x00000200
+#define HSTATUS_SPVP 0x00000100
+#define HSTATUS_SPV 0x00000080
+#define HSTATUS_GVA 0x00000040
+#define HSTATUS_VSBE 0x00000020
+
+#define USTATUS_UIE 0x00000001
+#define USTATUS_UPIE 0x00000010
+
#define DCSR_XDEBUGVER (3U<<30)
#define DCSR_NDRESET (1<<29)
#define DCSR_FULLRESET (1<<28)
@@ -61,6 +87,7 @@
#define DCSR_CAUSE_DEBUGINT 3
#define DCSR_CAUSE_STEP 4
#define DCSR_CAUSE_HALT 5
+#define DCSR_CAUSE_GROUP 6
#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4))
#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5))
@@ -95,24 +122,35 @@
#define MCONTROL_MATCH_MASK_LOW 4
#define MCONTROL_MATCH_MASK_HIGH 5
+#define MIP_USIP (1 << IRQ_U_SOFT)
#define MIP_SSIP (1 << IRQ_S_SOFT)
-#define MIP_HSIP (1 << IRQ_H_SOFT)
+#define MIP_VSSIP (1 << IRQ_VS_SOFT)
#define MIP_MSIP (1 << IRQ_M_SOFT)
+#define MIP_UTIP (1 << IRQ_U_TIMER)
#define MIP_STIP (1 << IRQ_S_TIMER)
-#define MIP_HTIP (1 << IRQ_H_TIMER)
+#define MIP_VSTIP (1 << IRQ_VS_TIMER)
#define MIP_MTIP (1 << IRQ_M_TIMER)
+#define MIP_UEIP (1 << IRQ_U_EXT)
#define MIP_SEIP (1 << IRQ_S_EXT)
-#define MIP_HEIP (1 << IRQ_H_EXT)
+#define MIP_VSEIP (1 << IRQ_VS_EXT)
#define MIP_MEIP (1 << IRQ_M_EXT)
+#define MIP_SGEIP (1 << IRQ_S_GEXT)
+
+#define MIP_S_MASK (MIP_SSIP | MIP_STIP | MIP_SEIP)
+#define MIP_VS_MASK (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)
+#define MIP_HS_MASK (MIP_VS_MASK | MIP_SGEIP)
+
+#define MIDELEG_FORCED_MASK MIP_HS_MASK
#define SIP_SSIP MIP_SSIP
#define SIP_STIP MIP_STIP
#define PRV_U 0
#define PRV_S 1
-#define PRV_H 2
#define PRV_M 3
+#define PRV_HS (PRV_S + 1)
+
#define SATP32_MODE 0x80000000
#define SATP32_ASID 0x7FC00000
#define SATP32_PPN 0x003FFFFF
@@ -127,6 +165,19 @@
#define SATP_MODE_SV57 10
#define SATP_MODE_SV64 11
+#define HGATP32_MODE 0x80000000
+#define HGATP32_VMID 0x1FC00000
+#define HGATP32_PPN 0x003FFFFF
+
+#define HGATP64_MODE 0xF000000000000000
+#define HGATP64_VMID 0x03FFF00000000000
+#define HGATP64_PPN 0x00000FFFFFFFFFFF
+
+#define HGATP_MODE_OFF 0
+#define HGATP_MODE_SV32X4 1
+#define HGATP_MODE_SV39X4 8
+#define HGATP_MODE_SV48X4 9
+
#define PMP_R 0x01
#define PMP_W 0x02
#define PMP_X 0x04
@@ -138,15 +189,19 @@
#define PMP_NA4 0x10
#define PMP_NAPOT 0x18
+#define IRQ_U_SOFT 0
#define IRQ_S_SOFT 1
-#define IRQ_H_SOFT 2
+#define IRQ_VS_SOFT 2
#define IRQ_M_SOFT 3
+#define IRQ_U_TIMER 4
#define IRQ_S_TIMER 5
-#define IRQ_H_TIMER 6
+#define IRQ_VS_TIMER 6
#define IRQ_M_TIMER 7
+#define IRQ_U_EXT 8
#define IRQ_S_EXT 9
-#define IRQ_H_EXT 10
+#define IRQ_VS_EXT 10
#define IRQ_M_EXT 11
+#define IRQ_S_GEXT 12
#define IRQ_COP 12
#define IRQ_HOST 13
@@ -191,7 +246,6 @@
#ifdef __GNUC__
-/*
#define read_csr(reg) ({ unsigned long __tmp; \
asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
__tmp; })
@@ -210,7 +264,6 @@
#define clear_csr(reg, bit) ({ unsigned long __tmp; \
asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \
__tmp; })
- */
#define rdtime() read_csr(time)
#define rdcycle() read_csr(cycle)
@@ -223,9 +276,55 @@
#endif
#endif
-/* Automatically generated by parse-opcodes. */
+/* Automatically generated by parse_opcodes. */
#ifndef RISCV_ENCODING_H
#define RISCV_ENCODING_H
+#define MATCH_SLLI_RV32 0x1013
+#define MASK_SLLI_RV32 0xfe00707f
+#define MATCH_SRLI_RV32 0x5013
+#define MASK_SRLI_RV32 0xfe00707f
+#define MATCH_SRAI_RV32 0x40005013
+#define MASK_SRAI_RV32 0xfe00707f
+#define MATCH_FRFLAGS 0x102073
+#define MASK_FRFLAGS 0xfffff07f
+#define MATCH_FSFLAGS 0x101073
+#define MASK_FSFLAGS 0xfff0707f
+#define MATCH_FSFLAGSI 0x105073
+#define MASK_FSFLAGSI 0xfff0707f
+#define MATCH_FRRM 0x202073
+#define MASK_FRRM 0xfffff07f
+#define MATCH_FSRM 0x201073
+#define MASK_FSRM 0xfff0707f
+#define MATCH_FSRMI 0x205073
+#define MASK_FSRMI 0xfff0707f
+#define MATCH_FSCSR 0x301073
+#define MASK_FSCSR 0xfff0707f
+#define MATCH_FRCSR 0x302073
+#define MASK_FRCSR 0xfffff07f
+#define MATCH_RDCYCLE 0xc0002073
+#define MASK_RDCYCLE 0xfffff07f
+#define MATCH_RDTIME 0xc0102073
+#define MASK_RDTIME 0xfffff07f
+#define MATCH_RDINSTRET 0xc0202073
+#define MASK_RDINSTRET 0xfffff07f
+#define MATCH_RDCYCLEH 0xc8002073
+#define MASK_RDCYCLEH 0xfffff07f
+#define MATCH_RDTIMEH 0xc8102073
+#define MASK_RDTIMEH 0xfffff07f
+#define MATCH_RDINSTRETH 0xc8202073
+#define MASK_RDINSTRETH 0xfffff07f
+#define MATCH_SCALL 0x73
+#define MASK_SCALL 0xffffffff
+#define MATCH_SBREAK 0x100073
+#define MASK_SBREAK 0xffffffff
+#define MATCH_FMV_X_S 0xe0000053
+#define MASK_FMV_X_S 0xfff0707f
+#define MATCH_FMV_S_X 0xf0000053
+#define MASK_FMV_S_X 0xfff0707f
+#define MATCH_FENCE_TSO 0x8330000f
+#define MASK_FENCE_TSO 0xfff0707f
+#define MATCH_PAUSE 0x100000f
+#define MASK_PAUSE 0xffffffff
#define MATCH_BEQ 0x63
#define MASK_BEQ 0x707f
#define MATCH_BNE 0x1063
@@ -284,6 +383,26 @@
#define MASK_OR 0xfe00707f
#define MATCH_AND 0x7033
#define MASK_AND 0xfe00707f
+#define MATCH_LB 0x3
+#define MASK_LB 0x707f
+#define MATCH_LH 0x1003
+#define MASK_LH 0x707f
+#define MATCH_LW 0x2003
+#define MASK_LW 0x707f
+#define MATCH_LBU 0x4003
+#define MASK_LBU 0x707f
+#define MATCH_LHU 0x5003
+#define MASK_LHU 0x707f
+#define MATCH_SB 0x23
+#define MASK_SB 0x707f
+#define MATCH_SH 0x1023
+#define MASK_SH 0x707f
+#define MATCH_SW 0x2023
+#define MASK_SW 0x707f
+#define MATCH_FENCE 0xf
+#define MASK_FENCE 0x707f
+#define MATCH_FENCE_I 0x100f
+#define MASK_FENCE_I 0x707f
#define MATCH_ADDIW 0x1b
#define MASK_ADDIW 0x707f
#define MATCH_SLLIW 0x101b
@@ -302,32 +421,12 @@
#define MASK_SRLW 0xfe00707f
#define MATCH_SRAW 0x4000503b
#define MASK_SRAW 0xfe00707f
-#define MATCH_LB 0x3
-#define MASK_LB 0x707f
-#define MATCH_LH 0x1003
-#define MASK_LH 0x707f
-#define MATCH_LW 0x2003
-#define MASK_LW 0x707f
#define MATCH_LD 0x3003
#define MASK_LD 0x707f
-#define MATCH_LBU 0x4003
-#define MASK_LBU 0x707f
-#define MATCH_LHU 0x5003
-#define MASK_LHU 0x707f
#define MATCH_LWU 0x6003
#define MASK_LWU 0x707f
-#define MATCH_SB 0x23
-#define MASK_SB 0x707f
-#define MATCH_SH 0x1023
-#define MASK_SH 0x707f
-#define MATCH_SW 0x2023
-#define MASK_SW 0x707f
#define MATCH_SD 0x3023
#define MASK_SD 0x707f
-#define MATCH_FENCE 0xf
-#define MASK_FENCE 0x707f
-#define MATCH_FENCE_I 0x100f
-#define MASK_FENCE_I 0x707f
#define MATCH_MUL 0x2000033
#define MASK_MUL 0xfe00707f
#define MATCH_MULH 0x2001033
@@ -398,34 +497,36 @@
#define MASK_LR_D 0xf9f0707f
#define MATCH_SC_D 0x1800302f
#define MASK_SC_D 0xf800707f
-#define MATCH_ECALL 0x73
-#define MASK_ECALL 0xffffffff
-#define MATCH_EBREAK 0x100073
-#define MASK_EBREAK 0xffffffff
-#define MATCH_URET 0x200073
-#define MASK_URET 0xffffffff
-#define MATCH_SRET 0x10200073
-#define MASK_SRET 0xffffffff
-#define MATCH_MRET 0x30200073
-#define MASK_MRET 0xffffffff
-#define MATCH_DRET 0x7b200073
-#define MASK_DRET 0xffffffff
-#define MATCH_SFENCE_VMA 0x12000073
-#define MASK_SFENCE_VMA 0xfe007fff
-#define MATCH_WFI 0x10500073
-#define MASK_WFI 0xffffffff
-#define MATCH_CSRRW 0x1073
-#define MASK_CSRRW 0x707f
-#define MATCH_CSRRS 0x2073
-#define MASK_CSRRS 0x707f
-#define MATCH_CSRRC 0x3073
-#define MASK_CSRRC 0x707f
-#define MATCH_CSRRWI 0x5073
-#define MASK_CSRRWI 0x707f
-#define MATCH_CSRRSI 0x6073
-#define MASK_CSRRSI 0x707f
-#define MATCH_CSRRCI 0x7073
-#define MASK_CSRRCI 0x707f
+#define MATCH_HFENCE_VVMA 0x22000073
+#define MASK_HFENCE_VVMA 0xfe007fff
+#define MATCH_HFENCE_GVMA 0x62000073
+#define MASK_HFENCE_GVMA 0xfe007fff
+#define MATCH_HLV_B 0x60004073
+#define MASK_HLV_B 0xfff0707f
+#define MATCH_HLV_BU 0x60104073
+#define MASK_HLV_BU 0xfff0707f
+#define MATCH_HLV_H 0x64004073
+#define MASK_HLV_H 0xfff0707f
+#define MATCH_HLV_HU 0x64104073
+#define MASK_HLV_HU 0xfff0707f
+#define MATCH_HLVX_HU 0x64304073
+#define MASK_HLVX_HU 0xfff0707f
+#define MATCH_HLV_W 0x68004073
+#define MASK_HLV_W 0xfff0707f
+#define MATCH_HLVX_WU 0x68304073
+#define MASK_HLVX_WU 0xfff0707f
+#define MATCH_HSV_B 0x62004073
+#define MASK_HSV_B 0xfe007fff
+#define MATCH_HSV_H 0x66004073
+#define MASK_HSV_H 0xfe007fff
+#define MATCH_HSV_W 0x6a004073
+#define MASK_HSV_W 0xfe007fff
+#define MATCH_HLV_WU 0x68104073
+#define MASK_HLV_WU 0xfff0707f
+#define MATCH_HLV_D 0x6c004073
+#define MASK_HLV_D 0xfff0707f
+#define MATCH_HSV_D 0x6e004073
+#define MASK_HSV_D 0xfe007fff
#define MATCH_FADD_S 0x53
#define MASK_FADD_S 0xfe00007f
#define MATCH_FSUB_S 0x8000053
@@ -446,6 +547,46 @@
#define MASK_FMAX_S 0xfe00707f
#define MATCH_FSQRT_S 0x58000053
#define MASK_FSQRT_S 0xfff0007f
+#define MATCH_FLE_S 0xa0000053
+#define MASK_FLE_S 0xfe00707f
+#define MATCH_FLT_S 0xa0001053
+#define MASK_FLT_S 0xfe00707f
+#define MATCH_FEQ_S 0xa0002053
+#define MASK_FEQ_S 0xfe00707f
+#define MATCH_FCVT_W_S 0xc0000053
+#define MASK_FCVT_W_S 0xfff0007f
+#define MATCH_FCVT_WU_S 0xc0100053
+#define MASK_FCVT_WU_S 0xfff0007f
+#define MATCH_FMV_X_W 0xe0000053
+#define MASK_FMV_X_W 0xfff0707f
+#define MATCH_FCLASS_S 0xe0001053
+#define MASK_FCLASS_S 0xfff0707f
+#define MATCH_FCVT_S_W 0xd0000053
+#define MASK_FCVT_S_W 0xfff0007f
+#define MATCH_FCVT_S_WU 0xd0100053
+#define MASK_FCVT_S_WU 0xfff0007f
+#define MATCH_FMV_W_X 0xf0000053
+#define MASK_FMV_W_X 0xfff0707f
+#define MATCH_FLW 0x2007
+#define MASK_FLW 0x707f
+#define MATCH_FSW 0x2027
+#define MASK_FSW 0x707f
+#define MATCH_FMADD_S 0x43
+#define MASK_FMADD_S 0x600007f
+#define MATCH_FMSUB_S 0x47
+#define MASK_FMSUB_S 0x600007f
+#define MATCH_FNMSUB_S 0x4b
+#define MASK_FNMSUB_S 0x600007f
+#define MATCH_FNMADD_S 0x4f
+#define MASK_FNMADD_S 0x600007f
+#define MATCH_FCVT_L_S 0xc0200053
+#define MASK_FCVT_L_S 0xfff0007f
+#define MATCH_FCVT_LU_S 0xc0300053
+#define MASK_FCVT_LU_S 0xfff0007f
+#define MATCH_FCVT_S_L 0xd0200053
+#define MASK_FCVT_S_L 0xfff0007f
+#define MATCH_FCVT_S_LU 0xd0300053
+#define MASK_FCVT_S_LU 0xfff0007f
#define MATCH_FADD_D 0x2000053
#define MASK_FADD_D 0xfe00007f
#define MATCH_FSUB_D 0xa000053
@@ -470,6 +611,46 @@
#define MASK_FCVT_D_S 0xfff0007f
#define MATCH_FSQRT_D 0x5a000053
#define MASK_FSQRT_D 0xfff0007f
+#define MATCH_FLE_D 0xa2000053
+#define MASK_FLE_D 0xfe00707f
+#define MATCH_FLT_D 0xa2001053
+#define MASK_FLT_D 0xfe00707f
+#define MATCH_FEQ_D 0xa2002053
+#define MASK_FEQ_D 0xfe00707f
+#define MATCH_FCVT_W_D 0xc2000053
+#define MASK_FCVT_W_D 0xfff0007f
+#define MATCH_FCVT_WU_D 0xc2100053
+#define MASK_FCVT_WU_D 0xfff0007f
+#define MATCH_FCLASS_D 0xe2001053
+#define MASK_FCLASS_D 0xfff0707f
+#define MATCH_FCVT_D_W 0xd2000053
+#define MASK_FCVT_D_W 0xfff0007f
+#define MATCH_FCVT_D_WU 0xd2100053
+#define MASK_FCVT_D_WU 0xfff0007f
+#define MATCH_FLD 0x3007
+#define MASK_FLD 0x707f
+#define MATCH_FSD 0x3027
+#define MASK_FSD 0x707f
+#define MATCH_FMADD_D 0x2000043
+#define MASK_FMADD_D 0x600007f
+#define MATCH_FMSUB_D 0x2000047
+#define MASK_FMSUB_D 0x600007f
+#define MATCH_FNMSUB_D 0x200004b
+#define MASK_FNMSUB_D 0x600007f
+#define MATCH_FNMADD_D 0x200004f
+#define MASK_FNMADD_D 0x600007f
+#define MATCH_FCVT_L_D 0xc2200053
+#define MASK_FCVT_L_D 0xfff0007f
+#define MATCH_FCVT_LU_D 0xc2300053
+#define MASK_FCVT_LU_D 0xfff0007f
+#define MATCH_FMV_X_D 0xe2000053
+#define MASK_FMV_X_D 0xfff0707f
+#define MATCH_FCVT_D_L 0xd2200053
+#define MASK_FCVT_D_L 0xfff0007f
+#define MATCH_FCVT_D_LU 0xd2300053
+#define MASK_FCVT_D_LU 0xfff0007f
+#define MATCH_FMV_D_X 0xf2000053
+#define MASK_FMV_D_X 0xfff0707f
#define MATCH_FADD_Q 0x6000053
#define MASK_FADD_Q 0xfe00007f
#define MATCH_FSUB_Q 0xe000053
@@ -498,118 +679,26 @@
#define MASK_FCVT_Q_D 0xfff0007f
#define MATCH_FSQRT_Q 0x5e000053
#define MASK_FSQRT_Q 0xfff0007f
-#define MATCH_FLE_S 0xa0000053
-#define MASK_FLE_S 0xfe00707f
-#define MATCH_FLT_S 0xa0001053
-#define MASK_FLT_S 0xfe00707f
-#define MATCH_FEQ_S 0xa0002053
-#define MASK_FEQ_S 0xfe00707f
-#define MATCH_FLE_D 0xa2000053
-#define MASK_FLE_D 0xfe00707f
-#define MATCH_FLT_D 0xa2001053
-#define MASK_FLT_D 0xfe00707f
-#define MATCH_FEQ_D 0xa2002053
-#define MASK_FEQ_D 0xfe00707f
#define MATCH_FLE_Q 0xa6000053
#define MASK_FLE_Q 0xfe00707f
#define MATCH_FLT_Q 0xa6001053
#define MASK_FLT_Q 0xfe00707f
#define MATCH_FEQ_Q 0xa6002053
#define MASK_FEQ_Q 0xfe00707f
-#define MATCH_FCVT_W_S 0xc0000053
-#define MASK_FCVT_W_S 0xfff0007f
-#define MATCH_FCVT_WU_S 0xc0100053
-#define MASK_FCVT_WU_S 0xfff0007f
-#define MATCH_FCVT_L_S 0xc0200053
-#define MASK_FCVT_L_S 0xfff0007f
-#define MATCH_FCVT_LU_S 0xc0300053
-#define MASK_FCVT_LU_S 0xfff0007f
-#define MATCH_FMV_X_W 0xe0000053
-#define MASK_FMV_X_W 0xfff0707f
-#define MATCH_FCLASS_S 0xe0001053
-#define MASK_FCLASS_S 0xfff0707f
-#define MATCH_FCVT_W_D 0xc2000053
-#define MASK_FCVT_W_D 0xfff0007f
-#define MATCH_FCVT_WU_D 0xc2100053
-#define MASK_FCVT_WU_D 0xfff0007f
-#define MATCH_FCVT_L_D 0xc2200053
-#define MASK_FCVT_L_D 0xfff0007f
-#define MATCH_FCVT_LU_D 0xc2300053
-#define MASK_FCVT_LU_D 0xfff0007f
-#define MATCH_FMV_X_D 0xe2000053
-#define MASK_FMV_X_D 0xfff0707f
-#define MATCH_FCLASS_D 0xe2001053
-#define MASK_FCLASS_D 0xfff0707f
#define MATCH_FCVT_W_Q 0xc6000053
#define MASK_FCVT_W_Q 0xfff0007f
#define MATCH_FCVT_WU_Q 0xc6100053
#define MASK_FCVT_WU_Q 0xfff0007f
-#define MATCH_FCVT_L_Q 0xc6200053
-#define MASK_FCVT_L_Q 0xfff0007f
-#define MATCH_FCVT_LU_Q 0xc6300053
-#define MASK_FCVT_LU_Q 0xfff0007f
-#define MATCH_FMV_X_Q 0xe6000053
-#define MASK_FMV_X_Q 0xfff0707f
#define MATCH_FCLASS_Q 0xe6001053
#define MASK_FCLASS_Q 0xfff0707f
-#define MATCH_FCVT_S_W 0xd0000053
-#define MASK_FCVT_S_W 0xfff0007f
-#define MATCH_FCVT_S_WU 0xd0100053
-#define MASK_FCVT_S_WU 0xfff0007f
-#define MATCH_FCVT_S_L 0xd0200053
-#define MASK_FCVT_S_L 0xfff0007f
-#define MATCH_FCVT_S_LU 0xd0300053
-#define MASK_FCVT_S_LU 0xfff0007f
-#define MATCH_FMV_W_X 0xf0000053
-#define MASK_FMV_W_X 0xfff0707f
-#define MATCH_FCVT_D_W 0xd2000053
-#define MASK_FCVT_D_W 0xfff0007f
-#define MATCH_FCVT_D_WU 0xd2100053
-#define MASK_FCVT_D_WU 0xfff0007f
-#define MATCH_FCVT_D_L 0xd2200053
-#define MASK_FCVT_D_L 0xfff0007f
-#define MATCH_FCVT_D_LU 0xd2300053
-#define MASK_FCVT_D_LU 0xfff0007f
-#define MATCH_FMV_D_X 0xf2000053
-#define MASK_FMV_D_X 0xfff0707f
#define MATCH_FCVT_Q_W 0xd6000053
#define MASK_FCVT_Q_W 0xfff0007f
#define MATCH_FCVT_Q_WU 0xd6100053
#define MASK_FCVT_Q_WU 0xfff0007f
-#define MATCH_FCVT_Q_L 0xd6200053
-#define MASK_FCVT_Q_L 0xfff0007f
-#define MATCH_FCVT_Q_LU 0xd6300053
-#define MASK_FCVT_Q_LU 0xfff0007f
-#define MATCH_FMV_Q_X 0xf6000053
-#define MASK_FMV_Q_X 0xfff0707f
-#define MATCH_FLW 0x2007
-#define MASK_FLW 0x707f
-#define MATCH_FLD 0x3007
-#define MASK_FLD 0x707f
#define MATCH_FLQ 0x4007
#define MASK_FLQ 0x707f
-#define MATCH_FSW 0x2027
-#define MASK_FSW 0x707f
-#define MATCH_FSD 0x3027
-#define MASK_FSD 0x707f
#define MATCH_FSQ 0x4027
#define MASK_FSQ 0x707f
-#define MATCH_FMADD_S 0x43
-#define MASK_FMADD_S 0x600007f
-#define MATCH_FMSUB_S 0x47
-#define MASK_FMSUB_S 0x600007f
-#define MATCH_FNMSUB_S 0x4b
-#define MASK_FNMSUB_S 0x600007f
-#define MATCH_FNMADD_S 0x4f
-#define MASK_FNMADD_S 0x600007f
-#define MATCH_FMADD_D 0x2000043
-#define MASK_FMADD_D 0x600007f
-#define MATCH_FMSUB_D 0x2000047
-#define MASK_FMSUB_D 0x600007f
-#define MATCH_FNMSUB_D 0x200004b
-#define MASK_FNMSUB_D 0x600007f
-#define MATCH_FNMADD_D 0x200004f
-#define MASK_FNMADD_D 0x600007f
#define MATCH_FMADD_Q 0x6000043
#define MASK_FMADD_Q 0x600007f
#define MATCH_FMSUB_Q 0x6000047
@@ -618,6 +707,42 @@
#define MASK_FNMSUB_Q 0x600007f
#define MATCH_FNMADD_Q 0x600004f
#define MASK_FNMADD_Q 0x600007f
+#define MATCH_FCVT_L_Q 0xc6200053
+#define MASK_FCVT_L_Q 0xfff0007f
+#define MATCH_FCVT_LU_Q 0xc6300053
+#define MASK_FCVT_LU_Q 0xfff0007f
+#define MATCH_FCVT_Q_L 0xd6200053
+#define MASK_FCVT_Q_L 0xfff0007f
+#define MATCH_FCVT_Q_LU 0xd6300053
+#define MASK_FCVT_Q_LU 0xfff0007f
+#define MATCH_ECALL 0x73
+#define MASK_ECALL 0xffffffff
+#define MATCH_EBREAK 0x100073
+#define MASK_EBREAK 0xffffffff
+#define MATCH_URET 0x200073
+#define MASK_URET 0xffffffff
+#define MATCH_SRET 0x10200073
+#define MASK_SRET 0xffffffff
+#define MATCH_MRET 0x30200073
+#define MASK_MRET 0xffffffff
+#define MATCH_DRET 0x7b200073
+#define MASK_DRET 0xffffffff
+#define MATCH_SFENCE_VMA 0x12000073
+#define MASK_SFENCE_VMA 0xfe007fff
+#define MATCH_WFI 0x10500073
+#define MASK_WFI 0xffffffff
+#define MATCH_CSRRW 0x1073
+#define MASK_CSRRW 0x707f
+#define MATCH_CSRRS 0x2073
+#define MASK_CSRRS 0x707f
+#define MATCH_CSRRC 0x3073
+#define MASK_CSRRC 0x707f
+#define MATCH_CSRRWI 0x5073
+#define MASK_CSRRWI 0x707f
+#define MATCH_CSRRSI 0x6073
+#define MASK_CSRRSI 0x707f
+#define MATCH_CSRRCI 0x7073
+#define MASK_CSRRCI 0x707f
#define MATCH_C_NOP 0x1
#define MASK_C_NOP 0xffff
#define MATCH_C_ADDI16SP 0x6101
@@ -628,16 +753,6 @@
#define MASK_C_JALR 0xf07f
#define MATCH_C_EBREAK 0x9002
#define MASK_C_EBREAK 0xffff
-#define MATCH_C_LD 0x6000
-#define MASK_C_LD 0xe003
-#define MATCH_C_SD 0xe000
-#define MASK_C_SD 0xe003
-#define MATCH_C_ADDIW 0x2001
-#define MASK_C_ADDIW 0xe003
-#define MATCH_C_LDSP 0x6002
-#define MASK_C_LDSP 0xe003
-#define MATCH_C_SDSP 0xe002
-#define MASK_C_SDSP 0xe003
#define MATCH_C_ADDI4SPN 0x0
#define MASK_C_ADDI4SPN 0xe003
#define MATCH_C_FLD 0x2000
@@ -674,10 +789,6 @@
#define MASK_C_OR 0xfc63
#define MATCH_C_AND 0x8c61
#define MASK_C_AND 0xfc63
-#define MATCH_C_SUBW 0x9c01
-#define MASK_C_SUBW 0xfc63
-#define MATCH_C_ADDW 0x9c21
-#define MASK_C_ADDW 0xfc63
#define MATCH_C_J 0xa001
#define MASK_C_J 0xe003
#define MATCH_C_BEQZ 0xc001
@@ -702,6 +813,26 @@
#define MASK_C_SWSP 0xe003
#define MATCH_C_FSWSP 0xe002
#define MASK_C_FSWSP 0xe003
+#define MATCH_C_SRLI_RV32 0x8001
+#define MASK_C_SRLI_RV32 0xfc03
+#define MATCH_C_SRAI_RV32 0x8401
+#define MASK_C_SRAI_RV32 0xfc03
+#define MATCH_C_SLLI_RV32 0x2
+#define MASK_C_SLLI_RV32 0xf003
+#define MATCH_C_LD 0x6000
+#define MASK_C_LD 0xe003
+#define MATCH_C_SD 0xe000
+#define MASK_C_SD 0xe003
+#define MATCH_C_SUBW 0x9c01
+#define MASK_C_SUBW 0xfc63
+#define MATCH_C_ADDW 0x9c21
+#define MASK_C_ADDW 0xfc63
+#define MATCH_C_ADDIW 0x2001
+#define MASK_C_ADDIW 0xe003
+#define MATCH_C_LDSP 0x6002
+#define MASK_C_LDSP 0xe003
+#define MATCH_C_SDSP 0xe002
+#define MASK_C_SDSP 0xe003
#define MATCH_CUSTOM0 0xb
#define MASK_CUSTOM0 0x707f
#define MATCH_CUSTOM0_RS1 0x200b
@@ -750,9 +881,909 @@
#define MASK_CUSTOM3_RD_RS1 0x707f
#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
+#define MATCH_VSETVLI 0x7057
+#define MASK_VSETVLI 0x8000707f
+#define MATCH_VSETVL 0x80007057
+#define MASK_VSETVL 0xfe00707f
+#define MATCH_VLE8_V 0x7
+#define MASK_VLE8_V 0x1df0707f
+#define MATCH_VLE16_V 0x5007
+#define MASK_VLE16_V 0x1df0707f
+#define MATCH_VLE32_V 0x6007
+#define MASK_VLE32_V 0x1df0707f
+#define MATCH_VLE64_V 0x7007
+#define MASK_VLE64_V 0x1df0707f
+#define MATCH_VLE128_V 0x10000007
+#define MASK_VLE128_V 0x1df0707f
+#define MATCH_VLE256_V 0x10005007
+#define MASK_VLE256_V 0x1df0707f
+#define MATCH_VLE512_V 0x10006007
+#define MASK_VLE512_V 0x1df0707f
+#define MATCH_VLE1024_V 0x10007007
+#define MASK_VLE1024_V 0x1df0707f
+#define MATCH_VSE8_V 0x27
+#define MASK_VSE8_V 0x1df0707f
+#define MATCH_VSE16_V 0x5027
+#define MASK_VSE16_V 0x1df0707f
+#define MATCH_VSE32_V 0x6027
+#define MASK_VSE32_V 0x1df0707f
+#define MATCH_VSE64_V 0x7027
+#define MASK_VSE64_V 0x1df0707f
+#define MATCH_VSE128_V 0x10000027
+#define MASK_VSE128_V 0x1df0707f
+#define MATCH_VSE256_V 0x10005027
+#define MASK_VSE256_V 0x1df0707f
+#define MATCH_VSE512_V 0x10006027
+#define MASK_VSE512_V 0x1df0707f
+#define MATCH_VSE1024_V 0x10007027
+#define MASK_VSE1024_V 0x1df0707f
+#define MATCH_VLSE8_V 0x8000007
+#define MASK_VLSE8_V 0x1c00707f
+#define MATCH_VLSE16_V 0x8005007
+#define MASK_VLSE16_V 0x1c00707f
+#define MATCH_VLSE32_V 0x8006007
+#define MASK_VLSE32_V 0x1c00707f
+#define MATCH_VLSE64_V 0x8007007
+#define MASK_VLSE64_V 0x1c00707f
+#define MATCH_VLSE128_V 0x18000007
+#define MASK_VLSE128_V 0x1c00707f
+#define MATCH_VLSE256_V 0x18005007
+#define MASK_VLSE256_V 0x1c00707f
+#define MATCH_VLSE512_V 0x18006007
+#define MASK_VLSE512_V 0x1c00707f
+#define MATCH_VLSE1024_V 0x18007007
+#define MASK_VLSE1024_V 0x1c00707f
+#define MATCH_VSSE8_V 0x8000027
+#define MASK_VSSE8_V 0x1c00707f
+#define MATCH_VSSE16_V 0x8005027
+#define MASK_VSSE16_V 0x1c00707f
+#define MATCH_VSSE32_V 0x8006027
+#define MASK_VSSE32_V 0x1c00707f
+#define MATCH_VSSE64_V 0x8007027
+#define MASK_VSSE64_V 0x1c00707f
+#define MATCH_VSSE128_V 0x18000027
+#define MASK_VSSE128_V 0x1c00707f
+#define MATCH_VSSE256_V 0x18005027
+#define MASK_VSSE256_V 0x1c00707f
+#define MATCH_VSSE512_V 0x18006027
+#define MASK_VSSE512_V 0x1c00707f
+#define MATCH_VSSE1024_V 0x18007027
+#define MASK_VSSE1024_V 0x1c00707f
+#define MATCH_VLXEI8_V 0xc000007
+#define MASK_VLXEI8_V 0x1c00707f
+#define MATCH_VLXEI16_V 0xc005007
+#define MASK_VLXEI16_V 0x1c00707f
+#define MATCH_VLXEI32_V 0xc006007
+#define MASK_VLXEI32_V 0x1c00707f
+#define MATCH_VLXEI64_V 0xc007007
+#define MASK_VLXEI64_V 0x1c00707f
+#define MATCH_VLXEI128_V 0x1c000007
+#define MASK_VLXEI128_V 0x1c00707f
+#define MATCH_VLXEI256_V 0x1c005007
+#define MASK_VLXEI256_V 0x1c00707f
+#define MATCH_VLXEI512_V 0x1c006007
+#define MASK_VLXEI512_V 0x1c00707f
+#define MATCH_VLXEI1024_V 0x1c007007
+#define MASK_VLXEI1024_V 0x1c00707f
+#define MATCH_VSXEI8_V 0xc000027
+#define MASK_VSXEI8_V 0x1c00707f
+#define MATCH_VSXEI16_V 0xc005027
+#define MASK_VSXEI16_V 0x1c00707f
+#define MATCH_VSXEI32_V 0xc006027
+#define MASK_VSXEI32_V 0x1c00707f
+#define MATCH_VSXEI64_V 0xc007027
+#define MASK_VSXEI64_V 0x1c00707f
+#define MATCH_VSXEI128_V 0x1c000027
+#define MASK_VSXEI128_V 0x1c00707f
+#define MATCH_VSXEI256_V 0x1c005027
+#define MASK_VSXEI256_V 0x1c00707f
+#define MATCH_VSXEI512_V 0x1c006027
+#define MASK_VSXEI512_V 0x1c00707f
+#define MATCH_VSXEI1024_V 0x1c007027
+#define MASK_VSXEI1024_V 0x1c00707f
+#define MATCH_VSUXEI8_V 0x4000027
+#define MASK_VSUXEI8_V 0x1c00707f
+#define MATCH_VSUXEI16_V 0x4005027
+#define MASK_VSUXEI16_V 0x1c00707f
+#define MATCH_VSUXEI32_V 0x4006027
+#define MASK_VSUXEI32_V 0x1c00707f
+#define MATCH_VSUXEI64_V 0x4007027
+#define MASK_VSUXEI64_V 0x1c00707f
+#define MATCH_VSUXEI128_V 0x14000027
+#define MASK_VSUXEI128_V 0x1c00707f
+#define MATCH_VSUXEI256_V 0x14005027
+#define MASK_VSUXEI256_V 0x1c00707f
+#define MATCH_VSUXEI512_V 0x14006027
+#define MASK_VSUXEI512_V 0x1c00707f
+#define MATCH_VSUXEI1024_V 0x14007027
+#define MASK_VSUXEI1024_V 0x1c00707f
+#define MATCH_VLE8FF_V 0x1000007
+#define MASK_VLE8FF_V 0x1df0707f
+#define MATCH_VLE16FF_V 0x1005007
+#define MASK_VLE16FF_V 0x1df0707f
+#define MATCH_VLE32FF_V 0x1006007
+#define MASK_VLE32FF_V 0x1df0707f
+#define MATCH_VLE64FF_V 0x1007007
+#define MASK_VLE64FF_V 0x1df0707f
+#define MATCH_VLE128FF_V 0x11000007
+#define MASK_VLE128FF_V 0x1df0707f
+#define MATCH_VLE256FF_V 0x11005007
+#define MASK_VLE256FF_V 0x1df0707f
+#define MATCH_VLE512FF_V 0x11006007
+#define MASK_VLE512FF_V 0x1df0707f
+#define MATCH_VLE1024FF_V 0x11007007
+#define MASK_VLE1024FF_V 0x1df0707f
+#define MATCH_VL1RE8_V 0x2800007
+#define MASK_VL1RE8_V 0xfff0707f
+#define MATCH_VL1RE16_V 0x2805007
+#define MASK_VL1RE16_V 0xfff0707f
+#define MATCH_VL1RE32_V 0x2806007
+#define MASK_VL1RE32_V 0xfff0707f
+#define MATCH_VL1RE64_V 0x2807007
+#define MASK_VL1RE64_V 0xfff0707f
+#define MATCH_VL2RE8_V 0x22800007
+#define MASK_VL2RE8_V 0xfff0707f
+#define MATCH_VL2RE16_V 0x22805007
+#define MASK_VL2RE16_V 0xfff0707f
+#define MATCH_VL2RE32_V 0x22806007
+#define MASK_VL2RE32_V 0xfff0707f
+#define MATCH_VL2RE64_V 0x22807007
+#define MASK_VL2RE64_V 0xfff0707f
+#define MATCH_VL4RE8_V 0x62800007
+#define MASK_VL4RE8_V 0xfff0707f
+#define MATCH_VL4RE16_V 0x62805007
+#define MASK_VL4RE16_V 0xfff0707f
+#define MATCH_VL4RE32_V 0x62806007
+#define MASK_VL4RE32_V 0xfff0707f
+#define MATCH_VL4RE64_V 0x62807007
+#define MASK_VL4RE64_V 0xfff0707f
+#define MATCH_VL8RE8_V 0xe2800007
+#define MASK_VL8RE8_V 0xfff0707f
+#define MATCH_VL8RE16_V 0xe2805007
+#define MASK_VL8RE16_V 0xfff0707f
+#define MATCH_VL8RE32_V 0xe2806007
+#define MASK_VL8RE32_V 0xfff0707f
+#define MATCH_VL8RE64_V 0xe2807007
+#define MASK_VL8RE64_V 0xfff0707f
+#define MATCH_VS1R_V 0x2800027
+#define MASK_VS1R_V 0xfff0707f
+#define MATCH_VS2R_V 0x22800027
+#define MASK_VS2R_V 0xfff0707f
+#define MATCH_VS4R_V 0x62800027
+#define MASK_VS4R_V 0xfff0707f
+#define MATCH_VS8R_V 0xe2800027
+#define MASK_VS8R_V 0xfff0707f
+#define MATCH_VFADD_VF 0x5057
+#define MASK_VFADD_VF 0xfc00707f
+#define MATCH_VFSUB_VF 0x8005057
+#define MASK_VFSUB_VF 0xfc00707f
+#define MATCH_VFMIN_VF 0x10005057
+#define MASK_VFMIN_VF 0xfc00707f
+#define MATCH_VFMAX_VF 0x18005057
+#define MASK_VFMAX_VF 0xfc00707f
+#define MATCH_VFSGNJ_VF 0x20005057
+#define MASK_VFSGNJ_VF 0xfc00707f
+#define MATCH_VFSGNJN_VF 0x24005057
+#define MASK_VFSGNJN_VF 0xfc00707f
+#define MATCH_VFSGNJX_VF 0x28005057
+#define MASK_VFSGNJX_VF 0xfc00707f
+#define MATCH_VFSLIDE1UP_VF 0x38005057
+#define MASK_VFSLIDE1UP_VF 0xfc00707f
+#define MATCH_VFSLIDE1DOWN_VF 0x3c005057
+#define MASK_VFSLIDE1DOWN_VF 0xfc00707f
+#define MATCH_VFMV_S_F 0x42005057
+#define MASK_VFMV_S_F 0xfff0707f
+#define MATCH_VFMERGE_VFM 0x5c005057
+#define MASK_VFMERGE_VFM 0xfe00707f
+#define MATCH_VFMV_V_F 0x5e005057
+#define MASK_VFMV_V_F 0xfff0707f
+#define MATCH_VMFEQ_VF 0x60005057
+#define MASK_VMFEQ_VF 0xfc00707f
+#define MATCH_VMFLE_VF 0x64005057
+#define MASK_VMFLE_VF 0xfc00707f
+#define MATCH_VMFLT_VF 0x6c005057
+#define MASK_VMFLT_VF 0xfc00707f
+#define MATCH_VMFNE_VF 0x70005057
+#define MASK_VMFNE_VF 0xfc00707f
+#define MATCH_VMFGT_VF 0x74005057
+#define MASK_VMFGT_VF 0xfc00707f
+#define MATCH_VMFGE_VF 0x7c005057
+#define MASK_VMFGE_VF 0xfc00707f
+#define MATCH_VFDIV_VF 0x80005057
+#define MASK_VFDIV_VF 0xfc00707f
+#define MATCH_VFRDIV_VF 0x84005057
+#define MASK_VFRDIV_VF 0xfc00707f
+#define MATCH_VFMUL_VF 0x90005057
+#define MASK_VFMUL_VF 0xfc00707f
+#define MATCH_VFRSUB_VF 0x9c005057
+#define MASK_VFRSUB_VF 0xfc00707f
+#define MATCH_VFMADD_VF 0xa0005057
+#define MASK_VFMADD_VF 0xfc00707f
+#define MATCH_VFNMADD_VF 0xa4005057
+#define MASK_VFNMADD_VF 0xfc00707f
+#define MATCH_VFMSUB_VF 0xa8005057
+#define MASK_VFMSUB_VF 0xfc00707f
+#define MATCH_VFNMSUB_VF 0xac005057
+#define MASK_VFNMSUB_VF 0xfc00707f
+#define MATCH_VFMACC_VF 0xb0005057
+#define MASK_VFMACC_VF 0xfc00707f
+#define MATCH_VFNMACC_VF 0xb4005057
+#define MASK_VFNMACC_VF 0xfc00707f
+#define MATCH_VFMSAC_VF 0xb8005057
+#define MASK_VFMSAC_VF 0xfc00707f
+#define MATCH_VFNMSAC_VF 0xbc005057
+#define MASK_VFNMSAC_VF 0xfc00707f
+#define MATCH_VFWADD_VF 0xc0005057
+#define MASK_VFWADD_VF 0xfc00707f
+#define MATCH_VFWSUB_VF 0xc8005057
+#define MASK_VFWSUB_VF 0xfc00707f
+#define MATCH_VFWADD_WF 0xd0005057
+#define MASK_VFWADD_WF 0xfc00707f
+#define MATCH_VFWSUB_WF 0xd8005057
+#define MASK_VFWSUB_WF 0xfc00707f
+#define MATCH_VFWMUL_VF 0xe0005057
+#define MASK_VFWMUL_VF 0xfc00707f
+#define MATCH_VFWMACC_VF 0xf0005057
+#define MASK_VFWMACC_VF 0xfc00707f
+#define MATCH_VFWNMACC_VF 0xf4005057
+#define MASK_VFWNMACC_VF 0xfc00707f
+#define MATCH_VFWMSAC_VF 0xf8005057
+#define MASK_VFWMSAC_VF 0xfc00707f
+#define MATCH_VFWNMSAC_VF 0xfc005057
+#define MASK_VFWNMSAC_VF 0xfc00707f
+#define MATCH_VFADD_VV 0x1057
+#define MASK_VFADD_VV 0xfc00707f
+#define MATCH_VFREDSUM_VS 0x4001057
+#define MASK_VFREDSUM_VS 0xfc00707f
+#define MATCH_VFSUB_VV 0x8001057
+#define MASK_VFSUB_VV 0xfc00707f
+#define MATCH_VFREDOSUM_VS 0xc001057
+#define MASK_VFREDOSUM_VS 0xfc00707f
+#define MATCH_VFMIN_VV 0x10001057
+#define MASK_VFMIN_VV 0xfc00707f
+#define MATCH_VFREDMIN_VS 0x14001057
+#define MASK_VFREDMIN_VS 0xfc00707f
+#define MATCH_VFMAX_VV 0x18001057
+#define MASK_VFMAX_VV 0xfc00707f
+#define MATCH_VFREDMAX_VS 0x1c001057
+#define MASK_VFREDMAX_VS 0xfc00707f
+#define MATCH_VFSGNJ_VV 0x20001057
+#define MASK_VFSGNJ_VV 0xfc00707f
+#define MATCH_VFSGNJN_VV 0x24001057
+#define MASK_VFSGNJN_VV 0xfc00707f
+#define MATCH_VFSGNJX_VV 0x28001057
+#define MASK_VFSGNJX_VV 0xfc00707f
+#define MATCH_VFMV_F_S 0x42001057
+#define MASK_VFMV_F_S 0xfe0ff07f
+#define MATCH_VMFEQ_VV 0x60001057
+#define MASK_VMFEQ_VV 0xfc00707f
+#define MATCH_VMFLE_VV 0x64001057
+#define MASK_VMFLE_VV 0xfc00707f
+#define MATCH_VMFLT_VV 0x6c001057
+#define MASK_VMFLT_VV 0xfc00707f
+#define MATCH_VMFNE_VV 0x70001057
+#define MASK_VMFNE_VV 0xfc00707f
+#define MATCH_VFDIV_VV 0x80001057
+#define MASK_VFDIV_VV 0xfc00707f
+#define MATCH_VFMUL_VV 0x90001057
+#define MASK_VFMUL_VV 0xfc00707f
+#define MATCH_VFMADD_VV 0xa0001057
+#define MASK_VFMADD_VV 0xfc00707f
+#define MATCH_VFNMADD_VV 0xa4001057
+#define MASK_VFNMADD_VV 0xfc00707f
+#define MATCH_VFMSUB_VV 0xa8001057
+#define MASK_VFMSUB_VV 0xfc00707f
+#define MATCH_VFNMSUB_VV 0xac001057
+#define MASK_VFNMSUB_VV 0xfc00707f
+#define MATCH_VFMACC_VV 0xb0001057
+#define MASK_VFMACC_VV 0xfc00707f
+#define MATCH_VFNMACC_VV 0xb4001057
+#define MASK_VFNMACC_VV 0xfc00707f
+#define MATCH_VFMSAC_VV 0xb8001057
+#define MASK_VFMSAC_VV 0xfc00707f
+#define MATCH_VFNMSAC_VV 0xbc001057
+#define MASK_VFNMSAC_VV 0xfc00707f
+#define MATCH_VFCVT_XU_F_V 0x48001057
+#define MASK_VFCVT_XU_F_V 0xfc0ff07f
+#define MATCH_VFCVT_X_F_V 0x48009057
+#define MASK_VFCVT_X_F_V 0xfc0ff07f
+#define MATCH_VFCVT_F_XU_V 0x48011057
+#define MASK_VFCVT_F_XU_V 0xfc0ff07f
+#define MATCH_VFCVT_F_X_V 0x48019057
+#define MASK_VFCVT_F_X_V 0xfc0ff07f
+#define MATCH_VFCVT_RTZ_XU_F_V 0x48031057
+#define MASK_VFCVT_RTZ_XU_F_V 0xfc0ff07f
+#define MATCH_VFCVT_RTZ_X_F_V 0x48039057
+#define MASK_VFCVT_RTZ_X_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_XU_F_V 0x48041057
+#define MASK_VFWCVT_XU_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_X_F_V 0x48049057
+#define MASK_VFWCVT_X_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_F_XU_V 0x48051057
+#define MASK_VFWCVT_F_XU_V 0xfc0ff07f
+#define MATCH_VFWCVT_F_X_V 0x48059057
+#define MASK_VFWCVT_F_X_V 0xfc0ff07f
+#define MATCH_VFWCVT_F_F_V 0x48061057
+#define MASK_VFWCVT_F_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_RTZ_XU_F_V 0x48071057
+#define MASK_VFWCVT_RTZ_XU_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_RTZ_X_F_V 0x48079057
+#define MASK_VFWCVT_RTZ_X_F_V 0xfc0ff07f
+#define MATCH_VFNCVT_XU_F_W 0x48081057
+#define MASK_VFNCVT_XU_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_X_F_W 0x48089057
+#define MASK_VFNCVT_X_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_F_XU_W 0x48091057
+#define MASK_VFNCVT_F_XU_W 0xfc0ff07f
+#define MATCH_VFNCVT_F_X_W 0x48099057
+#define MASK_VFNCVT_F_X_W 0xfc0ff07f
+#define MATCH_VFNCVT_F_F_W 0x480a1057
+#define MASK_VFNCVT_F_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_ROD_F_F_W 0x480a9057
+#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_RTZ_XU_F_W 0x480b1057
+#define MASK_VFNCVT_RTZ_XU_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_RTZ_X_F_W 0x480b9057
+#define MASK_VFNCVT_RTZ_X_F_W 0xfc0ff07f
+#define MATCH_VFSQRT_V 0x4c001057
+#define MASK_VFSQRT_V 0xfc0ff07f
+#define MATCH_VFCLASS_V 0x4c081057
+#define MASK_VFCLASS_V 0xfc0ff07f
+#define MATCH_VFWADD_VV 0xc0001057
+#define MASK_VFWADD_VV 0xfc00707f
+#define MATCH_VFWREDSUM_VS 0xc4001057
+#define MASK_VFWREDSUM_VS 0xfc00707f
+#define MATCH_VFWSUB_VV 0xc8001057
+#define MASK_VFWSUB_VV 0xfc00707f
+#define MATCH_VFWREDOSUM_VS 0xcc001057
+#define MASK_VFWREDOSUM_VS 0xfc00707f
+#define MATCH_VFWADD_WV 0xd0001057
+#define MASK_VFWADD_WV 0xfc00707f
+#define MATCH_VFWSUB_WV 0xd8001057
+#define MASK_VFWSUB_WV 0xfc00707f
+#define MATCH_VFWMUL_VV 0xe0001057
+#define MASK_VFWMUL_VV 0xfc00707f
+#define MATCH_VFDOT_VV 0xe4001057
+#define MASK_VFDOT_VV 0xfc00707f
+#define MATCH_VFWMACC_VV 0xf0001057
+#define MASK_VFWMACC_VV 0xfc00707f
+#define MATCH_VFWNMACC_VV 0xf4001057
+#define MASK_VFWNMACC_VV 0xfc00707f
+#define MATCH_VFWMSAC_VV 0xf8001057
+#define MASK_VFWMSAC_VV 0xfc00707f
+#define MATCH_VFWNMSAC_VV 0xfc001057
+#define MASK_VFWNMSAC_VV 0xfc00707f
+#define MATCH_VADD_VX 0x4057
+#define MASK_VADD_VX 0xfc00707f
+#define MATCH_VSUB_VX 0x8004057
+#define MASK_VSUB_VX 0xfc00707f
+#define MATCH_VRSUB_VX 0xc004057
+#define MASK_VRSUB_VX 0xfc00707f
+#define MATCH_VMINU_VX 0x10004057
+#define MASK_VMINU_VX 0xfc00707f
+#define MATCH_VMIN_VX 0x14004057
+#define MASK_VMIN_VX 0xfc00707f
+#define MATCH_VMAXU_VX 0x18004057
+#define MASK_VMAXU_VX 0xfc00707f
+#define MATCH_VMAX_VX 0x1c004057
+#define MASK_VMAX_VX 0xfc00707f
+#define MATCH_VAND_VX 0x24004057
+#define MASK_VAND_VX 0xfc00707f
+#define MATCH_VOR_VX 0x28004057
+#define MASK_VOR_VX 0xfc00707f
+#define MATCH_VXOR_VX 0x2c004057
+#define MASK_VXOR_VX 0xfc00707f
+#define MATCH_VRGATHER_VX 0x30004057
+#define MASK_VRGATHER_VX 0xfc00707f
+#define MATCH_VSLIDEUP_VX 0x38004057
+#define MASK_VSLIDEUP_VX 0xfc00707f
+#define MATCH_VSLIDEDOWN_VX 0x3c004057
+#define MASK_VSLIDEDOWN_VX 0xfc00707f
+#define MATCH_VADC_VXM 0x40004057
+#define MASK_VADC_VXM 0xfe00707f
+#define MATCH_VMADC_VXM 0x44004057
+#define MASK_VMADC_VXM 0xfc00707f
+#define MATCH_VSBC_VXM 0x48004057
+#define MASK_VSBC_VXM 0xfe00707f
+#define MATCH_VMSBC_VXM 0x4c004057
+#define MASK_VMSBC_VXM 0xfc00707f
+#define MATCH_VMERGE_VXM 0x5c004057
+#define MASK_VMERGE_VXM 0xfe00707f
+#define MATCH_VMV_V_X 0x5e004057
+#define MASK_VMV_V_X 0xfff0707f
+#define MATCH_VMSEQ_VX 0x60004057
+#define MASK_VMSEQ_VX 0xfc00707f
+#define MATCH_VMSNE_VX 0x64004057
+#define MASK_VMSNE_VX 0xfc00707f
+#define MATCH_VMSLTU_VX 0x68004057
+#define MASK_VMSLTU_VX 0xfc00707f
+#define MATCH_VMSLT_VX 0x6c004057
+#define MASK_VMSLT_VX 0xfc00707f
+#define MATCH_VMSLEU_VX 0x70004057
+#define MASK_VMSLEU_VX 0xfc00707f
+#define MATCH_VMSLE_VX 0x74004057
+#define MASK_VMSLE_VX 0xfc00707f
+#define MATCH_VMSGTU_VX 0x78004057
+#define MASK_VMSGTU_VX 0xfc00707f
+#define MATCH_VMSGT_VX 0x7c004057
+#define MASK_VMSGT_VX 0xfc00707f
+#define MATCH_VSADDU_VX 0x80004057
+#define MASK_VSADDU_VX 0xfc00707f
+#define MATCH_VSADD_VX 0x84004057
+#define MASK_VSADD_VX 0xfc00707f
+#define MATCH_VSSUBU_VX 0x88004057
+#define MASK_VSSUBU_VX 0xfc00707f
+#define MATCH_VSSUB_VX 0x8c004057
+#define MASK_VSSUB_VX 0xfc00707f
+#define MATCH_VSLL_VX 0x94004057
+#define MASK_VSLL_VX 0xfc00707f
+#define MATCH_VSMUL_VX 0x9c004057
+#define MASK_VSMUL_VX 0xfc00707f
+#define MATCH_VSRL_VX 0xa0004057
+#define MASK_VSRL_VX 0xfc00707f
+#define MATCH_VSRA_VX 0xa4004057
+#define MASK_VSRA_VX 0xfc00707f
+#define MATCH_VSSRL_VX 0xa8004057
+#define MASK_VSSRL_VX 0xfc00707f
+#define MATCH_VSSRA_VX 0xac004057
+#define MASK_VSSRA_VX 0xfc00707f
+#define MATCH_VNSRL_WX 0xb0004057
+#define MASK_VNSRL_WX 0xfc00707f
+#define MATCH_VNSRA_WX 0xb4004057
+#define MASK_VNSRA_WX 0xfc00707f
+#define MATCH_VNCLIPU_WX 0xb8004057
+#define MASK_VNCLIPU_WX 0xfc00707f
+#define MATCH_VNCLIP_WX 0xbc004057
+#define MASK_VNCLIP_WX 0xfc00707f
+#define MATCH_VQMACCU_VX 0xf0004057
+#define MASK_VQMACCU_VX 0xfc00707f
+#define MATCH_VQMACC_VX 0xf4004057
+#define MASK_VQMACC_VX 0xfc00707f
+#define MATCH_VQMACCUS_VX 0xf8004057
+#define MASK_VQMACCUS_VX 0xfc00707f
+#define MATCH_VQMACCSU_VX 0xfc004057
+#define MASK_VQMACCSU_VX 0xfc00707f
+#define MATCH_VADD_VV 0x57
+#define MASK_VADD_VV 0xfc00707f
+#define MATCH_VSUB_VV 0x8000057
+#define MASK_VSUB_VV 0xfc00707f
+#define MATCH_VMINU_VV 0x10000057
+#define MASK_VMINU_VV 0xfc00707f
+#define MATCH_VMIN_VV 0x14000057
+#define MASK_VMIN_VV 0xfc00707f
+#define MATCH_VMAXU_VV 0x18000057
+#define MASK_VMAXU_VV 0xfc00707f
+#define MATCH_VMAX_VV 0x1c000057
+#define MASK_VMAX_VV 0xfc00707f
+#define MATCH_VAND_VV 0x24000057
+#define MASK_VAND_VV 0xfc00707f
+#define MATCH_VOR_VV 0x28000057
+#define MASK_VOR_VV 0xfc00707f
+#define MATCH_VXOR_VV 0x2c000057
+#define MASK_VXOR_VV 0xfc00707f
+#define MATCH_VRGATHER_VV 0x30000057
+#define MASK_VRGATHER_VV 0xfc00707f
+#define MATCH_VRGATHEREI16_VV 0x38000057
+#define MASK_VRGATHEREI16_VV 0xfc00707f
+#define MATCH_VADC_VVM 0x40000057
+#define MASK_VADC_VVM 0xfe00707f
+#define MATCH_VMADC_VVM 0x44000057
+#define MASK_VMADC_VVM 0xfc00707f
+#define MATCH_VSBC_VVM 0x48000057
+#define MASK_VSBC_VVM 0xfe00707f
+#define MATCH_VMSBC_VVM 0x4c000057
+#define MASK_VMSBC_VVM 0xfc00707f
+#define MATCH_VMERGE_VVM 0x5c000057
+#define MASK_VMERGE_VVM 0xfe00707f
+#define MATCH_VMV_V_V 0x5e000057
+#define MASK_VMV_V_V 0xfff0707f
+#define MATCH_VMSEQ_VV 0x60000057
+#define MASK_VMSEQ_VV 0xfc00707f
+#define MATCH_VMSNE_VV 0x64000057
+#define MASK_VMSNE_VV 0xfc00707f
+#define MATCH_VMSLTU_VV 0x68000057
+#define MASK_VMSLTU_VV 0xfc00707f
+#define MATCH_VMSLT_VV 0x6c000057
+#define MASK_VMSLT_VV 0xfc00707f
+#define MATCH_VMSLEU_VV 0x70000057
+#define MASK_VMSLEU_VV 0xfc00707f
+#define MATCH_VMSLE_VV 0x74000057
+#define MASK_VMSLE_VV 0xfc00707f
+#define MATCH_VSADDU_VV 0x80000057
+#define MASK_VSADDU_VV 0xfc00707f
+#define MATCH_VSADD_VV 0x84000057
+#define MASK_VSADD_VV 0xfc00707f
+#define MATCH_VSSUBU_VV 0x88000057
+#define MASK_VSSUBU_VV 0xfc00707f
+#define MATCH_VSSUB_VV 0x8c000057
+#define MASK_VSSUB_VV 0xfc00707f
+#define MATCH_VSLL_VV 0x94000057
+#define MASK_VSLL_VV 0xfc00707f
+#define MATCH_VSMUL_VV 0x9c000057
+#define MASK_VSMUL_VV 0xfc00707f
+#define MATCH_VSRL_VV 0xa0000057
+#define MASK_VSRL_VV 0xfc00707f
+#define MATCH_VSRA_VV 0xa4000057
+#define MASK_VSRA_VV 0xfc00707f
+#define MATCH_VSSRL_VV 0xa8000057
+#define MASK_VSSRL_VV 0xfc00707f
+#define MATCH_VSSRA_VV 0xac000057
+#define MASK_VSSRA_VV 0xfc00707f
+#define MATCH_VNSRL_WV 0xb0000057
+#define MASK_VNSRL_WV 0xfc00707f
+#define MATCH_VNSRA_WV 0xb4000057
+#define MASK_VNSRA_WV 0xfc00707f
+#define MATCH_VNCLIPU_WV 0xb8000057
+#define MASK_VNCLIPU_WV 0xfc00707f
+#define MATCH_VNCLIP_WV 0xbc000057
+#define MASK_VNCLIP_WV 0xfc00707f
+#define MATCH_VWREDSUMU_VS 0xc0000057
+#define MASK_VWREDSUMU_VS 0xfc00707f
+#define MATCH_VWREDSUM_VS 0xc4000057
+#define MASK_VWREDSUM_VS 0xfc00707f
+#define MATCH_VDOTU_VV 0xe0000057
+#define MASK_VDOTU_VV 0xfc00707f
+#define MATCH_VDOT_VV 0xe4000057
+#define MASK_VDOT_VV 0xfc00707f
+#define MATCH_VQMACCU_VV 0xf0000057
+#define MASK_VQMACCU_VV 0xfc00707f
+#define MATCH_VQMACC_VV 0xf4000057
+#define MASK_VQMACC_VV 0xfc00707f
+#define MATCH_VQMACCSU_VV 0xfc000057
+#define MASK_VQMACCSU_VV 0xfc00707f
+#define MATCH_VADD_VI 0x3057
+#define MASK_VADD_VI 0xfc00707f
+#define MATCH_VRSUB_VI 0xc003057
+#define MASK_VRSUB_VI 0xfc00707f
+#define MATCH_VAND_VI 0x24003057
+#define MASK_VAND_VI 0xfc00707f
+#define MATCH_VOR_VI 0x28003057
+#define MASK_VOR_VI 0xfc00707f
+#define MATCH_VXOR_VI 0x2c003057
+#define MASK_VXOR_VI 0xfc00707f
+#define MATCH_VRGATHER_VI 0x30003057
+#define MASK_VRGATHER_VI 0xfc00707f
+#define MATCH_VSLIDEUP_VI 0x38003057
+#define MASK_VSLIDEUP_VI 0xfc00707f
+#define MATCH_VSLIDEDOWN_VI 0x3c003057
+#define MASK_VSLIDEDOWN_VI 0xfc00707f
+#define MATCH_VADC_VIM 0x40003057
+#define MASK_VADC_VIM 0xfe00707f
+#define MATCH_VMADC_VIM 0x44003057
+#define MASK_VMADC_VIM 0xfc00707f
+#define MATCH_VMERGE_VIM 0x5c003057
+#define MASK_VMERGE_VIM 0xfe00707f
+#define MATCH_VMV_V_I 0x5e003057
+#define MASK_VMV_V_I 0xfff0707f
+#define MATCH_VMSEQ_VI 0x60003057
+#define MASK_VMSEQ_VI 0xfc00707f
+#define MATCH_VMSNE_VI 0x64003057
+#define MASK_VMSNE_VI 0xfc00707f
+#define MATCH_VMSLEU_VI 0x70003057
+#define MASK_VMSLEU_VI 0xfc00707f
+#define MATCH_VMSLE_VI 0x74003057
+#define MASK_VMSLE_VI 0xfc00707f
+#define MATCH_VMSGTU_VI 0x78003057
+#define MASK_VMSGTU_VI 0xfc00707f
+#define MATCH_VMSGT_VI 0x7c003057
+#define MASK_VMSGT_VI 0xfc00707f
+#define MATCH_VSADDU_VI 0x80003057
+#define MASK_VSADDU_VI 0xfc00707f
+#define MATCH_VSADD_VI 0x84003057
+#define MASK_VSADD_VI 0xfc00707f
+#define MATCH_VSLL_VI 0x94003057
+#define MASK_VSLL_VI 0xfc00707f
+#define MATCH_VMV1R_V 0x9e003057
+#define MASK_VMV1R_V 0xfe0ff07f
+#define MATCH_VMV2R_V 0x9e00b057
+#define MASK_VMV2R_V 0xfe0ff07f
+#define MATCH_VMV4R_V 0x9e01b057
+#define MASK_VMV4R_V 0xfe0ff07f
+#define MATCH_VMV8R_V 0x9e03b057
+#define MASK_VMV8R_V 0xfe0ff07f
+#define MATCH_VSRL_VI 0xa0003057
+#define MASK_VSRL_VI 0xfc00707f
+#define MATCH_VSRA_VI 0xa4003057
+#define MASK_VSRA_VI 0xfc00707f
+#define MATCH_VSSRL_VI 0xa8003057
+#define MASK_VSSRL_VI 0xfc00707f
+#define MATCH_VSSRA_VI 0xac003057
+#define MASK_VSSRA_VI 0xfc00707f
+#define MATCH_VNSRL_WI 0xb0003057
+#define MASK_VNSRL_WI 0xfc00707f
+#define MATCH_VNSRA_WI 0xb4003057
+#define MASK_VNSRA_WI 0xfc00707f
+#define MATCH_VNCLIPU_WI 0xb8003057
+#define MASK_VNCLIPU_WI 0xfc00707f
+#define MATCH_VNCLIP_WI 0xbc003057
+#define MASK_VNCLIP_WI 0xfc00707f
+#define MATCH_VREDSUM_VS 0x2057
+#define MASK_VREDSUM_VS 0xfc00707f
+#define MATCH_VREDAND_VS 0x4002057
+#define MASK_VREDAND_VS 0xfc00707f
+#define MATCH_VREDOR_VS 0x8002057
+#define MASK_VREDOR_VS 0xfc00707f
+#define MATCH_VREDXOR_VS 0xc002057
+#define MASK_VREDXOR_VS 0xfc00707f
+#define MATCH_VREDMINU_VS 0x10002057
+#define MASK_VREDMINU_VS 0xfc00707f
+#define MATCH_VREDMIN_VS 0x14002057
+#define MASK_VREDMIN_VS 0xfc00707f
+#define MATCH_VREDMAXU_VS 0x18002057
+#define MASK_VREDMAXU_VS 0xfc00707f
+#define MATCH_VREDMAX_VS 0x1c002057
+#define MASK_VREDMAX_VS 0xfc00707f
+#define MATCH_VAADDU_VV 0x20002057
+#define MASK_VAADDU_VV 0xfc00707f
+#define MATCH_VAADD_VV 0x24002057
+#define MASK_VAADD_VV 0xfc00707f
+#define MATCH_VASUBU_VV 0x28002057
+#define MASK_VASUBU_VV 0xfc00707f
+#define MATCH_VASUB_VV 0x2c002057
+#define MASK_VASUB_VV 0xfc00707f
+#define MATCH_VMV_X_S 0x42002057
+#define MASK_VMV_X_S 0xfe0ff07f
+#define MATCH_VZEXT_VF8 0x48012057
+#define MASK_VZEXT_VF8 0xfc0ff07f
+#define MATCH_VSEXT_VF8 0x4801a057
+#define MASK_VSEXT_VF8 0xfc0ff07f
+#define MATCH_VZEXT_VF4 0x48022057
+#define MASK_VZEXT_VF4 0xfc0ff07f
+#define MATCH_VSEXT_VF4 0x4802a057
+#define MASK_VSEXT_VF4 0xfc0ff07f
+#define MATCH_VZEXT_VF2 0x48032057
+#define MASK_VZEXT_VF2 0xfc0ff07f
+#define MATCH_VSEXT_VF2 0x4803a057
+#define MASK_VSEXT_VF2 0xfc0ff07f
+#define MATCH_VCOMPRESS_VM 0x5e002057
+#define MASK_VCOMPRESS_VM 0xfe00707f
+#define MATCH_VMANDNOT_MM 0x60002057
+#define MASK_VMANDNOT_MM 0xfc00707f
+#define MATCH_VMAND_MM 0x64002057
+#define MASK_VMAND_MM 0xfc00707f
+#define MATCH_VMOR_MM 0x68002057
+#define MASK_VMOR_MM 0xfc00707f
+#define MATCH_VMXOR_MM 0x6c002057
+#define MASK_VMXOR_MM 0xfc00707f
+#define MATCH_VMORNOT_MM 0x70002057
+#define MASK_VMORNOT_MM 0xfc00707f
+#define MATCH_VMNAND_MM 0x74002057
+#define MASK_VMNAND_MM 0xfc00707f
+#define MATCH_VMNOR_MM 0x78002057
+#define MASK_VMNOR_MM 0xfc00707f
+#define MATCH_VMXNOR_MM 0x7c002057
+#define MASK_VMXNOR_MM 0xfc00707f
+#define MATCH_VMSBF_M 0x5000a057
+#define MASK_VMSBF_M 0xfc0ff07f
+#define MATCH_VMSOF_M 0x50012057
+#define MASK_VMSOF_M 0xfc0ff07f
+#define MATCH_VMSIF_M 0x5001a057
+#define MASK_VMSIF_M 0xfc0ff07f
+#define MATCH_VIOTA_M 0x50082057
+#define MASK_VIOTA_M 0xfc0ff07f
+#define MATCH_VID_V 0x5008a057
+#define MASK_VID_V 0xfdfff07f
+#define MATCH_VPOPC_M 0x40082057
+#define MASK_VPOPC_M 0xfc0ff07f
+#define MATCH_VFIRST_M 0x4008a057
+#define MASK_VFIRST_M 0xfc0ff07f
+#define MATCH_VDIVU_VV 0x80002057
+#define MASK_VDIVU_VV 0xfc00707f
+#define MATCH_VDIV_VV 0x84002057
+#define MASK_VDIV_VV 0xfc00707f
+#define MATCH_VREMU_VV 0x88002057
+#define MASK_VREMU_VV 0xfc00707f
+#define MATCH_VREM_VV 0x8c002057
+#define MASK_VREM_VV 0xfc00707f
+#define MATCH_VMULHU_VV 0x90002057
+#define MASK_VMULHU_VV 0xfc00707f
+#define MATCH_VMUL_VV 0x94002057
+#define MASK_VMUL_VV 0xfc00707f
+#define MATCH_VMULHSU_VV 0x98002057
+#define MASK_VMULHSU_VV 0xfc00707f
+#define MATCH_VMULH_VV 0x9c002057
+#define MASK_VMULH_VV 0xfc00707f
+#define MATCH_VMADD_VV 0xa4002057
+#define MASK_VMADD_VV 0xfc00707f
+#define MATCH_VNMSUB_VV 0xac002057
+#define MASK_VNMSUB_VV 0xfc00707f
+#define MATCH_VMACC_VV 0xb4002057
+#define MASK_VMACC_VV 0xfc00707f
+#define MATCH_VNMSAC_VV 0xbc002057
+#define MASK_VNMSAC_VV 0xfc00707f
+#define MATCH_VWADDU_VV 0xc0002057
+#define MASK_VWADDU_VV 0xfc00707f
+#define MATCH_VWADD_VV 0xc4002057
+#define MASK_VWADD_VV 0xfc00707f
+#define MATCH_VWSUBU_VV 0xc8002057
+#define MASK_VWSUBU_VV 0xfc00707f
+#define MATCH_VWSUB_VV 0xcc002057
+#define MASK_VWSUB_VV 0xfc00707f
+#define MATCH_VWADDU_WV 0xd0002057
+#define MASK_VWADDU_WV 0xfc00707f
+#define MATCH_VWADD_WV 0xd4002057
+#define MASK_VWADD_WV 0xfc00707f
+#define MATCH_VWSUBU_WV 0xd8002057
+#define MASK_VWSUBU_WV 0xfc00707f
+#define MATCH_VWSUB_WV 0xdc002057
+#define MASK_VWSUB_WV 0xfc00707f
+#define MATCH_VWMULU_VV 0xe0002057
+#define MASK_VWMULU_VV 0xfc00707f
+#define MATCH_VWMULSU_VV 0xe8002057
+#define MASK_VWMULSU_VV 0xfc00707f
+#define MATCH_VWMUL_VV 0xec002057
+#define MASK_VWMUL_VV 0xfc00707f
+#define MATCH_VWMACCU_VV 0xf0002057
+#define MASK_VWMACCU_VV 0xfc00707f
+#define MATCH_VWMACC_VV 0xf4002057
+#define MASK_VWMACC_VV 0xfc00707f
+#define MATCH_VWMACCSU_VV 0xfc002057
+#define MASK_VWMACCSU_VV 0xfc00707f
+#define MATCH_VAADDU_VX 0x20006057
+#define MASK_VAADDU_VX 0xfc00707f
+#define MATCH_VAADD_VX 0x24006057
+#define MASK_VAADD_VX 0xfc00707f
+#define MATCH_VASUBU_VX 0x28006057
+#define MASK_VASUBU_VX 0xfc00707f
+#define MATCH_VASUB_VX 0x2c006057
+#define MASK_VASUB_VX 0xfc00707f
+#define MATCH_VMV_S_X 0x42006057
+#define MASK_VMV_S_X 0xfff0707f
+#define MATCH_VSLIDE1UP_VX 0x38006057
+#define MASK_VSLIDE1UP_VX 0xfc00707f
+#define MATCH_VSLIDE1DOWN_VX 0x3c006057
+#define MASK_VSLIDE1DOWN_VX 0xfc00707f
+#define MATCH_VDIVU_VX 0x80006057
+#define MASK_VDIVU_VX 0xfc00707f
+#define MATCH_VDIV_VX 0x84006057
+#define MASK_VDIV_VX 0xfc00707f
+#define MATCH_VREMU_VX 0x88006057
+#define MASK_VREMU_VX 0xfc00707f
+#define MATCH_VREM_VX 0x8c006057
+#define MASK_VREM_VX 0xfc00707f
+#define MATCH_VMULHU_VX 0x90006057
+#define MASK_VMULHU_VX 0xfc00707f
+#define MATCH_VMUL_VX 0x94006057
+#define MASK_VMUL_VX 0xfc00707f
+#define MATCH_VMULHSU_VX 0x98006057
+#define MASK_VMULHSU_VX 0xfc00707f
+#define MATCH_VMULH_VX 0x9c006057
+#define MASK_VMULH_VX 0xfc00707f
+#define MATCH_VMADD_VX 0xa4006057
+#define MASK_VMADD_VX 0xfc00707f
+#define MATCH_VNMSUB_VX 0xac006057
+#define MASK_VNMSUB_VX 0xfc00707f
+#define MATCH_VMACC_VX 0xb4006057
+#define MASK_VMACC_VX 0xfc00707f
+#define MATCH_VNMSAC_VX 0xbc006057
+#define MASK_VNMSAC_VX 0xfc00707f
+#define MATCH_VWADDU_VX 0xc0006057
+#define MASK_VWADDU_VX 0xfc00707f
+#define MATCH_VWADD_VX 0xc4006057
+#define MASK_VWADD_VX 0xfc00707f
+#define MATCH_VWSUBU_VX 0xc8006057
+#define MASK_VWSUBU_VX 0xfc00707f
+#define MATCH_VWSUB_VX 0xcc006057
+#define MASK_VWSUB_VX 0xfc00707f
+#define MATCH_VWADDU_WX 0xd0006057
+#define MASK_VWADDU_WX 0xfc00707f
+#define MATCH_VWADD_WX 0xd4006057
+#define MASK_VWADD_WX 0xfc00707f
+#define MATCH_VWSUBU_WX 0xd8006057
+#define MASK_VWSUBU_WX 0xfc00707f
+#define MATCH_VWSUB_WX 0xdc006057
+#define MASK_VWSUB_WX 0xfc00707f
+#define MATCH_VWMULU_VX 0xe0006057
+#define MASK_VWMULU_VX 0xfc00707f
+#define MATCH_VWMULSU_VX 0xe8006057
+#define MASK_VWMULSU_VX 0xfc00707f
+#define MATCH_VWMUL_VX 0xec006057
+#define MASK_VWMUL_VX 0xfc00707f
+#define MATCH_VWMACCU_VX 0xf0006057
+#define MASK_VWMACCU_VX 0xfc00707f
+#define MATCH_VWMACC_VX 0xf4006057
+#define MASK_VWMACC_VX 0xfc00707f
+#define MATCH_VWMACCUS_VX 0xf8006057
+#define MASK_VWMACCUS_VX 0xfc00707f
+#define MATCH_VWMACCSU_VX 0xfc006057
+#define MASK_VWMACCSU_VX 0xfc00707f
+#define MATCH_VAMOSWAPEI8_V 0x800002f
+#define MASK_VAMOSWAPEI8_V 0xf800707f
+#define MATCH_VAMOADDEI8_V 0x2f
+#define MASK_VAMOADDEI8_V 0xf800707f
+#define MATCH_VAMOXOREI8_V 0x2000002f
+#define MASK_VAMOXOREI8_V 0xf800707f
+#define MATCH_VAMOANDEI8_V 0x6000002f
+#define MASK_VAMOANDEI8_V 0xf800707f
+#define MATCH_VAMOOREI8_V 0x4000002f
+#define MASK_VAMOOREI8_V 0xf800707f
+#define MATCH_VAMOMINEI8_V 0x8000002f
+#define MASK_VAMOMINEI8_V 0xf800707f
+#define MATCH_VAMOMAXEI8_V 0xa000002f
+#define MASK_VAMOMAXEI8_V 0xf800707f
+#define MATCH_VAMOMINUEI8_V 0xc000002f
+#define MASK_VAMOMINUEI8_V 0xf800707f
+#define MATCH_VAMOMAXUEI8_V 0xe000002f
+#define MASK_VAMOMAXUEI8_V 0xf800707f
+#define MATCH_VAMOSWAPEI16_V 0x800502f
+#define MASK_VAMOSWAPEI16_V 0xf800707f
+#define MATCH_VAMOADDEI16_V 0x502f
+#define MASK_VAMOADDEI16_V 0xf800707f
+#define MATCH_VAMOXOREI16_V 0x2000502f
+#define MASK_VAMOXOREI16_V 0xf800707f
+#define MATCH_VAMOANDEI16_V 0x6000502f
+#define MASK_VAMOANDEI16_V 0xf800707f
+#define MATCH_VAMOOREI16_V 0x4000502f
+#define MASK_VAMOOREI16_V 0xf800707f
+#define MATCH_VAMOMINEI16_V 0x8000502f
+#define MASK_VAMOMINEI16_V 0xf800707f
+#define MATCH_VAMOMAXEI16_V 0xa000502f
+#define MASK_VAMOMAXEI16_V 0xf800707f
+#define MATCH_VAMOMINUEI16_V 0xc000502f
+#define MASK_VAMOMINUEI16_V 0xf800707f
+#define MATCH_VAMOMAXUEI16_V 0xe000502f
+#define MASK_VAMOMAXUEI16_V 0xf800707f
+#define MATCH_VAMOSWAPEI32_V 0x800602f
+#define MASK_VAMOSWAPEI32_V 0xf800707f
+#define MATCH_VAMOADDEI32_V 0x602f
+#define MASK_VAMOADDEI32_V 0xf800707f
+#define MATCH_VAMOXOREI32_V 0x2000602f
+#define MASK_VAMOXOREI32_V 0xf800707f
+#define MATCH_VAMOANDEI32_V 0x6000602f
+#define MASK_VAMOANDEI32_V 0xf800707f
+#define MATCH_VAMOOREI32_V 0x4000602f
+#define MASK_VAMOOREI32_V 0xf800707f
+#define MATCH_VAMOMINEI32_V 0x8000602f
+#define MASK_VAMOMINEI32_V 0xf800707f
+#define MATCH_VAMOMAXEI32_V 0xa000602f
+#define MASK_VAMOMAXEI32_V 0xf800707f
+#define MATCH_VAMOMINUEI32_V 0xc000602f
+#define MASK_VAMOMINUEI32_V 0xf800707f
+#define MATCH_VAMOMAXUEI32_V 0xe000602f
+#define MASK_VAMOMAXUEI32_V 0xf800707f
+#define MATCH_VAMOSWAPEI64_V 0x800702f
+#define MASK_VAMOSWAPEI64_V 0xf800707f
+#define MATCH_VAMOADDEI64_V 0x702f
+#define MASK_VAMOADDEI64_V 0xf800707f
+#define MATCH_VAMOXOREI64_V 0x2000702f
+#define MASK_VAMOXOREI64_V 0xf800707f
+#define MATCH_VAMOANDEI64_V 0x6000702f
+#define MASK_VAMOANDEI64_V 0xf800707f
+#define MATCH_VAMOOREI64_V 0x4000702f
+#define MASK_VAMOOREI64_V 0xf800707f
+#define MATCH_VAMOMINEI64_V 0x8000702f
+#define MASK_VAMOMINEI64_V 0xf800707f
+#define MATCH_VAMOMAXEI64_V 0xa000702f
+#define MASK_VAMOMAXEI64_V 0xf800707f
+#define MATCH_VAMOMINUEI64_V 0xc000702f
+#define MASK_VAMOMINUEI64_V 0xf800707f
+#define MATCH_VAMOMAXUEI64_V 0xe000702f
+#define MASK_VAMOMAXUEI64_V 0xf800707f
+#define MATCH_VMVNFR_V 0x9e003057
+#define MASK_VMVNFR_V 0xfe00707f
+#define MATCH_VL1R_V 0x2800007
+#define MASK_VL1R_V 0xfff0707f
+#define MATCH_VL2R_V 0x6805007
+#define MASK_VL2R_V 0xfff0707f
+#define MATCH_VL4R_V 0xe806007
+#define MASK_VL4R_V 0xfff0707f
+#define MATCH_VL8R_V 0x1e807007
+#define MASK_VL8R_V 0xfff0707f
#define CSR_FFLAGS 0x1
#define CSR_FRM 0x2
#define CSR_FCSR 0x3
+#define CSR_USTATUS 0x0
+#define CSR_UIE 0x4
+#define CSR_UTVEC 0x5
+#define CSR_VSTART 0x8
+#define CSR_VXSAT 0x9
+#define CSR_VXRM 0xa
+#define CSR_VCSR 0xf
+#define CSR_USCRATCH 0x40
+#define CSR_UEPC 0x41
+#define CSR_UCAUSE 0x42
+#define CSR_UTVAL 0x43
+#define CSR_UIP 0x44
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
#define CSR_INSTRET 0xc02
@@ -785,7 +1816,12 @@
#define CSR_HPMCOUNTER29 0xc1d
#define CSR_HPMCOUNTER30 0xc1e
#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_VL 0xc20
+#define CSR_VTYPE 0xc21
+#define CSR_VLENB 0xc22
#define CSR_SSTATUS 0x100
+#define CSR_SEDELEG 0x102
+#define CSR_SIDELEG 0x103
#define CSR_SIE 0x104
#define CSR_STVEC 0x105
#define CSR_SCOUNTEREN 0x106
@@ -795,6 +1831,43 @@
#define CSR_STVAL 0x143
#define CSR_SIP 0x144
#define CSR_SATP 0x180
+#define CSR_VSSTATUS 0x200
+#define CSR_VSIE 0x204
+#define CSR_VSTVEC 0x205
+#define CSR_VSSCRATCH 0x240
+#define CSR_VSEPC 0x241
+#define CSR_VSCAUSE 0x242
+#define CSR_VSTVAL 0x243
+#define CSR_VSIP 0x244
+#define CSR_VSATP 0x280
+#define CSR_HSTATUS 0x600
+#define CSR_HEDELEG 0x602
+#define CSR_HIDELEG 0x603
+#define CSR_HIE 0x604
+#define CSR_HTIMEDELTA 0x605
+#define CSR_HCOUNTEREN 0x606
+#define CSR_HGEIE 0x607
+#define CSR_HTVAL 0x643
+#define CSR_HIP 0x644
+#define CSR_HVIP 0x645
+#define CSR_HTINST 0x64a
+#define CSR_HGATP 0x680
+#define CSR_HGEIP 0xe12
+#define CSR_UTVT 0x7
+#define CSR_UNXTI 0x45
+#define CSR_UINTSTATUS 0x46
+#define CSR_USCRATCHCSW 0x48
+#define CSR_USCRATCHCSWL 0x49
+#define CSR_STVT 0x107
+#define CSR_SNXTI 0x145
+#define CSR_SINTSTATUS 0x146
+#define CSR_SSCRATCHCSW 0x148
+#define CSR_SSCRATCHCSWL 0x149
+#define CSR_MTVT 0x307
+#define CSR_MNXTI 0x345
+#define CSR_MINTSTATUS 0x346
+#define CSR_MSCRATCHCSW 0x348
+#define CSR_MSCRATCHCSWL 0x349
#define CSR_MSTATUS 0x300
#define CSR_MISA 0x301
#define CSR_MEDELEG 0x302
@@ -802,11 +1875,14 @@
#define CSR_MIE 0x304
#define CSR_MTVEC 0x305
#define CSR_MCOUNTEREN 0x306
+#define CSR_MCOUNTINHIBIT 0x320
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
+#define CSR_MTINST 0x34a
+#define CSR_MTVAL2 0x34b
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPCFG1 0x3a1
#define CSR_PMPCFG2 0x3a2
@@ -833,7 +1909,8 @@
#define CSR_TDATA3 0x7a3
#define CSR_DCSR 0x7b0
#define CSR_DPC 0x7b1
-#define CSR_DSCRATCH 0x7b2
+#define CSR_DSCRATCH0 0x7b2
+#define CSR_DSCRATCH1 0x7b3
#define CSR_MCYCLE 0xb00
#define CSR_MINSTRET 0xb02
#define CSR_MHPMCOUNTER3 0xb03
@@ -898,6 +1975,7 @@
#define CSR_MARCHID 0xf12
#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14
+#define CSR_HTIMEDELTAH 0x615
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81
#define CSR_INSTRETH 0xc82
@@ -930,6 +2008,7 @@
#define CSR_HPMCOUNTER29H 0xc9d
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
+#define CSR_MSTATUSH 0x310
#define CSR_MCYCLEH 0xb80
#define CSR_MINSTRETH 0xb82
#define CSR_MHPMCOUNTER3H 0xb83
@@ -971,13 +2050,40 @@
#define CAUSE_STORE_ACCESS 0x7
#define CAUSE_USER_ECALL 0x8
#define CAUSE_SUPERVISOR_ECALL 0x9
-#define CAUSE_HYPERVISOR_ECALL 0xa
+#define CAUSE_VIRTUAL_SUPERVISOR_ECALL 0xa
#define CAUSE_MACHINE_ECALL 0xb
#define CAUSE_FETCH_PAGE_FAULT 0xc
#define CAUSE_LOAD_PAGE_FAULT 0xd
#define CAUSE_STORE_PAGE_FAULT 0xf
+#define CAUSE_FETCH_GUEST_PAGE_FAULT 0x14
+#define CAUSE_LOAD_GUEST_PAGE_FAULT 0x15
+#define CAUSE_VIRTUAL_INSTRUCTION 0x16
+#define CAUSE_STORE_GUEST_PAGE_FAULT 0x17
#endif
#ifdef DECLARE_INSN
+DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
+DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
+DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
+DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
+DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
+DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
+DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
+DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
+DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
+DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
+DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
+DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
+DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
+DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
+DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
+DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
+DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
+DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
+DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
+DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
+DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
+DECLARE_INSN(fence_tso, MATCH_FENCE_TSO, MASK_FENCE_TSO)
+DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE)
DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
@@ -1007,6 +2113,16 @@ DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
DECLARE_INSN(or, MATCH_OR, MASK_OR)
DECLARE_INSN(and, MATCH_AND, MASK_AND)
+DECLARE_INSN(lb, MATCH_LB, MASK_LB)
+DECLARE_INSN(lh, MATCH_LH, MASK_LH)
+DECLARE_INSN(lw, MATCH_LW, MASK_LW)
+DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
+DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
+DECLARE_INSN(sb, MATCH_SB, MASK_SB)
+DECLARE_INSN(sh, MATCH_SH, MASK_SH)
+DECLARE_INSN(sw, MATCH_SW, MASK_SW)
+DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
+DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
@@ -1016,19 +2132,9 @@ DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
-DECLARE_INSN(lb, MATCH_LB, MASK_LB)
-DECLARE_INSN(lh, MATCH_LH, MASK_LH)
-DECLARE_INSN(lw, MATCH_LW, MASK_LW)
DECLARE_INSN(ld, MATCH_LD, MASK_LD)
-DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
-DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
-DECLARE_INSN(sb, MATCH_SB, MASK_SB)
-DECLARE_INSN(sh, MATCH_SH, MASK_SH)
-DECLARE_INSN(sw, MATCH_SW, MASK_SW)
DECLARE_INSN(sd, MATCH_SD, MASK_SD)
-DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
-DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
@@ -1064,20 +2170,21 @@ DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
-DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
-DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
-DECLARE_INSN(uret, MATCH_URET, MASK_URET)
-DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
-DECLARE_INSN(mret, MATCH_MRET, MASK_MRET)
-DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
-DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA)
-DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
-DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
-DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
-DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
-DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
-DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
-DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
+DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA)
+DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA)
+DECLARE_INSN(hlv_b, MATCH_HLV_B, MASK_HLV_B)
+DECLARE_INSN(hlv_bu, MATCH_HLV_BU, MASK_HLV_BU)
+DECLARE_INSN(hlv_h, MATCH_HLV_H, MASK_HLV_H)
+DECLARE_INSN(hlv_hu, MATCH_HLV_HU, MASK_HLV_HU)
+DECLARE_INSN(hlvx_hu, MATCH_HLVX_HU, MASK_HLVX_HU)
+DECLARE_INSN(hlv_w, MATCH_HLV_W, MASK_HLV_W)
+DECLARE_INSN(hlvx_wu, MATCH_HLVX_WU, MASK_HLVX_WU)
+DECLARE_INSN(hsv_b, MATCH_HSV_B, MASK_HSV_B)
+DECLARE_INSN(hsv_h, MATCH_HSV_H, MASK_HSV_H)
+DECLARE_INSN(hsv_w, MATCH_HSV_W, MASK_HSV_W)
+DECLARE_INSN(hlv_wu, MATCH_HLV_WU, MASK_HLV_WU)
+DECLARE_INSN(hlv_d, MATCH_HLV_D, MASK_HLV_D)
+DECLARE_INSN(hsv_d, MATCH_HSV_D, MASK_HSV_D)
DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
@@ -1088,6 +2195,26 @@ DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
+DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
+DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
+DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
+DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
+DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
+DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W)
+DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
+DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
+DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
+DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X)
+DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
+DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
+DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
+DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
+DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
+DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
+DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
+DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
+DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
+DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
@@ -1100,6 +2227,26 @@ DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
+DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
+DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
+DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
+DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
+DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
+DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
+DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
+DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
+DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
+DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
+DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
+DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
+DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
+DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
+DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
+DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
+DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
+DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
+DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
+DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q)
DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q)
DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q)
@@ -1114,76 +2261,43 @@ DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S)
DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q)
DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D)
DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q)
-DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
-DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
-DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
-DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
-DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
-DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q)
DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q)
DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q)
-DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
-DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
-DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
-DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
-DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W)
-DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
-DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
-DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
-DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
-DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
-DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
-DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q)
DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q)
-DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q)
-DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q)
-DECLARE_INSN(fmv_x_q, MATCH_FMV_X_Q, MASK_FMV_X_Q)
DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q)
-DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
-DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
-DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
-DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
-DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X)
-DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
-DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
-DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
-DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
-DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W)
DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU)
-DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L)
-DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU)
-DECLARE_INSN(fmv_q_x, MATCH_FMV_Q_X, MASK_FMV_Q_X)
-DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
-DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ)
-DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
-DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ)
-DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
-DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
-DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
-DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
-DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
-DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
-DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
-DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q)
DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q)
DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q)
DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q)
+DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q)
+DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q)
+DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L)
+DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU)
+DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
+DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
+DECLARE_INSN(uret, MATCH_URET, MASK_URET)
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
+DECLARE_INSN(mret, MATCH_MRET, MASK_MRET)
+DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
+DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA)
+DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
+DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
+DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
+DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
+DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
+DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
+DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP)
DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP)
DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
-DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
-DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
-DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
-DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
-DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN)
DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
@@ -1202,8 +2316,6 @@ DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR)
DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND)
-DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
-DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
@@ -1216,6 +2328,16 @@ DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP)
DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP)
+DECLARE_INSN(c_srli_rv32, MATCH_C_SRLI_RV32, MASK_C_SRLI_RV32)
+DECLARE_INSN(c_srai_rv32, MATCH_C_SRAI_RV32, MASK_C_SRAI_RV32)
+DECLARE_INSN(c_slli_rv32, MATCH_C_SLLI_RV32, MASK_C_SLLI_RV32)
+DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
+DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
+DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
+DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
+DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
+DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
+DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
@@ -1240,11 +2362,467 @@ DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
+DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI)
+DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
+DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V)
+DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V)
+DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V)
+DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V)
+DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V)
+DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V)
+DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V)
+DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V)
+DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V)
+DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V)
+DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V)
+DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V)
+DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V)
+DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V)
+DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V)
+DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V)
+DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V)
+DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V)
+DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V)
+DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V)
+DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V)
+DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V)
+DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V)
+DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V)
+DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V)
+DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V)
+DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V)
+DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V)
+DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V)
+DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V)
+DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V)
+DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V)
+DECLARE_INSN(vlxei8_v, MATCH_VLXEI8_V, MASK_VLXEI8_V)
+DECLARE_INSN(vlxei16_v, MATCH_VLXEI16_V, MASK_VLXEI16_V)
+DECLARE_INSN(vlxei32_v, MATCH_VLXEI32_V, MASK_VLXEI32_V)
+DECLARE_INSN(vlxei64_v, MATCH_VLXEI64_V, MASK_VLXEI64_V)
+DECLARE_INSN(vlxei128_v, MATCH_VLXEI128_V, MASK_VLXEI128_V)
+DECLARE_INSN(vlxei256_v, MATCH_VLXEI256_V, MASK_VLXEI256_V)
+DECLARE_INSN(vlxei512_v, MATCH_VLXEI512_V, MASK_VLXEI512_V)
+DECLARE_INSN(vlxei1024_v, MATCH_VLXEI1024_V, MASK_VLXEI1024_V)
+DECLARE_INSN(vsxei8_v, MATCH_VSXEI8_V, MASK_VSXEI8_V)
+DECLARE_INSN(vsxei16_v, MATCH_VSXEI16_V, MASK_VSXEI16_V)
+DECLARE_INSN(vsxei32_v, MATCH_VSXEI32_V, MASK_VSXEI32_V)
+DECLARE_INSN(vsxei64_v, MATCH_VSXEI64_V, MASK_VSXEI64_V)
+DECLARE_INSN(vsxei128_v, MATCH_VSXEI128_V, MASK_VSXEI128_V)
+DECLARE_INSN(vsxei256_v, MATCH_VSXEI256_V, MASK_VSXEI256_V)
+DECLARE_INSN(vsxei512_v, MATCH_VSXEI512_V, MASK_VSXEI512_V)
+DECLARE_INSN(vsxei1024_v, MATCH_VSXEI1024_V, MASK_VSXEI1024_V)
+DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V)
+DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V)
+DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V)
+DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V)
+DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V)
+DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V)
+DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V)
+DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V)
+DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V)
+DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V)
+DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V)
+DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V)
+DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V)
+DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V)
+DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V)
+DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V)
+DECLARE_INSN(vl1re8_v, MATCH_VL1RE8_V, MASK_VL1RE8_V)
+DECLARE_INSN(vl1re16_v, MATCH_VL1RE16_V, MASK_VL1RE16_V)
+DECLARE_INSN(vl1re32_v, MATCH_VL1RE32_V, MASK_VL1RE32_V)
+DECLARE_INSN(vl1re64_v, MATCH_VL1RE64_V, MASK_VL1RE64_V)
+DECLARE_INSN(vl2re8_v, MATCH_VL2RE8_V, MASK_VL2RE8_V)
+DECLARE_INSN(vl2re16_v, MATCH_VL2RE16_V, MASK_VL2RE16_V)
+DECLARE_INSN(vl2re32_v, MATCH_VL2RE32_V, MASK_VL2RE32_V)
+DECLARE_INSN(vl2re64_v, MATCH_VL2RE64_V, MASK_VL2RE64_V)
+DECLARE_INSN(vl4re8_v, MATCH_VL4RE8_V, MASK_VL4RE8_V)
+DECLARE_INSN(vl4re16_v, MATCH_VL4RE16_V, MASK_VL4RE16_V)
+DECLARE_INSN(vl4re32_v, MATCH_VL4RE32_V, MASK_VL4RE32_V)
+DECLARE_INSN(vl4re64_v, MATCH_VL4RE64_V, MASK_VL4RE64_V)
+DECLARE_INSN(vl8re8_v, MATCH_VL8RE8_V, MASK_VL8RE8_V)
+DECLARE_INSN(vl8re16_v, MATCH_VL8RE16_V, MASK_VL8RE16_V)
+DECLARE_INSN(vl8re32_v, MATCH_VL8RE32_V, MASK_VL8RE32_V)
+DECLARE_INSN(vl8re64_v, MATCH_VL8RE64_V, MASK_VL8RE64_V)
+DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V)
+DECLARE_INSN(vs2r_v, MATCH_VS2R_V, MASK_VS2R_V)
+DECLARE_INSN(vs4r_v, MATCH_VS4R_V, MASK_VS4R_V)
+DECLARE_INSN(vs8r_v, MATCH_VS8R_V, MASK_VS8R_V)
+DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF)
+DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF)
+DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF)
+DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF)
+DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF)
+DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF)
+DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF)
+DECLARE_INSN(vfslide1up_vf, MATCH_VFSLIDE1UP_VF, MASK_VFSLIDE1UP_VF)
+DECLARE_INSN(vfslide1down_vf, MATCH_VFSLIDE1DOWN_VF, MASK_VFSLIDE1DOWN_VF)
+DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F)
+DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM)
+DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F)
+DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF)
+DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF)
+DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF)
+DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF)
+DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF)
+DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF)
+DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF)
+DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF)
+DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF)
+DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF)
+DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF)
+DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF)
+DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF)
+DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF)
+DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF)
+DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF)
+DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF)
+DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF)
+DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF)
+DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF)
+DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF)
+DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF)
+DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF)
+DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF)
+DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF)
+DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF)
+DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF)
+DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV)
+DECLARE_INSN(vfredsum_vs, MATCH_VFREDSUM_VS, MASK_VFREDSUM_VS)
+DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV)
+DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS)
+DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV)
+DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS)
+DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV)
+DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS)
+DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV)
+DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV)
+DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV)
+DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S)
+DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV)
+DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV)
+DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV)
+DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV)
+DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV)
+DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV)
+DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV)
+DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV)
+DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV)
+DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV)
+DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV)
+DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV)
+DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV)
+DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV)
+DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V)
+DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V)
+DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V)
+DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V)
+DECLARE_INSN(vfcvt_rtz_xu_f_v, MATCH_VFCVT_RTZ_XU_F_V, MASK_VFCVT_RTZ_XU_F_V)
+DECLARE_INSN(vfcvt_rtz_x_f_v, MATCH_VFCVT_RTZ_X_F_V, MASK_VFCVT_RTZ_X_F_V)
+DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V)
+DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V)
+DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V)
+DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V)
+DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V)
+DECLARE_INSN(vfwcvt_rtz_xu_f_v, MATCH_VFWCVT_RTZ_XU_F_V, MASK_VFWCVT_RTZ_XU_F_V)
+DECLARE_INSN(vfwcvt_rtz_x_f_v, MATCH_VFWCVT_RTZ_X_F_V, MASK_VFWCVT_RTZ_X_F_V)
+DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W)
+DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W)
+DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W)
+DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W)
+DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W)
+DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W)
+DECLARE_INSN(vfncvt_rtz_xu_f_w, MATCH_VFNCVT_RTZ_XU_F_W, MASK_VFNCVT_RTZ_XU_F_W)
+DECLARE_INSN(vfncvt_rtz_x_f_w, MATCH_VFNCVT_RTZ_X_F_W, MASK_VFNCVT_RTZ_X_F_W)
+DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V)
+DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V)
+DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV)
+DECLARE_INSN(vfwredsum_vs, MATCH_VFWREDSUM_VS, MASK_VFWREDSUM_VS)
+DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV)
+DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS)
+DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV)
+DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV)
+DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV)
+DECLARE_INSN(vfdot_vv, MATCH_VFDOT_VV, MASK_VFDOT_VV)
+DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV)
+DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV)
+DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV)
+DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV)
+DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX)
+DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX)
+DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX)
+DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX)
+DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX)
+DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX)
+DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX)
+DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX)
+DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX)
+DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX)
+DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX)
+DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX)
+DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX)
+DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM)
+DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM)
+DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM)
+DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM)
+DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM)
+DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X)
+DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX)
+DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX)
+DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX)
+DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX)
+DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX)
+DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX)
+DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX)
+DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX)
+DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX)
+DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX)
+DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX)
+DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX)
+DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX)
+DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX)
+DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX)
+DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX)
+DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX)
+DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX)
+DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX)
+DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX)
+DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX)
+DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX)
+DECLARE_INSN(vqmaccu_vx, MATCH_VQMACCU_VX, MASK_VQMACCU_VX)
+DECLARE_INSN(vqmacc_vx, MATCH_VQMACC_VX, MASK_VQMACC_VX)
+DECLARE_INSN(vqmaccus_vx, MATCH_VQMACCUS_VX, MASK_VQMACCUS_VX)
+DECLARE_INSN(vqmaccsu_vx, MATCH_VQMACCSU_VX, MASK_VQMACCSU_VX)
+DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV)
+DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV)
+DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV)
+DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV)
+DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV)
+DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV)
+DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV)
+DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV)
+DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV)
+DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV)
+DECLARE_INSN(vrgatherei16_vv, MATCH_VRGATHEREI16_VV, MASK_VRGATHEREI16_VV)
+DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM)
+DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM)
+DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM)
+DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM)
+DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM)
+DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V)
+DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV)
+DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV)
+DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV)
+DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV)
+DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV)
+DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV)
+DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV)
+DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV)
+DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV)
+DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV)
+DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV)
+DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV)
+DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV)
+DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV)
+DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV)
+DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV)
+DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV)
+DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV)
+DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV)
+DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV)
+DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS)
+DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS)
+DECLARE_INSN(vdotu_vv, MATCH_VDOTU_VV, MASK_VDOTU_VV)
+DECLARE_INSN(vdot_vv, MATCH_VDOT_VV, MASK_VDOT_VV)
+DECLARE_INSN(vqmaccu_vv, MATCH_VQMACCU_VV, MASK_VQMACCU_VV)
+DECLARE_INSN(vqmacc_vv, MATCH_VQMACC_VV, MASK_VQMACC_VV)
+DECLARE_INSN(vqmaccsu_vv, MATCH_VQMACCSU_VV, MASK_VQMACCSU_VV)
+DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI)
+DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI)
+DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI)
+DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI)
+DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI)
+DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI)
+DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI)
+DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI)
+DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM)
+DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM)
+DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM)
+DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I)
+DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI)
+DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI)
+DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI)
+DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI)
+DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI)
+DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI)
+DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI)
+DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI)
+DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI)
+DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V)
+DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V)
+DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V)
+DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V)
+DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI)
+DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI)
+DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI)
+DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI)
+DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI)
+DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI)
+DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI)
+DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI)
+DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS)
+DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS)
+DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS)
+DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS)
+DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS)
+DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS)
+DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS)
+DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS)
+DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV)
+DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV)
+DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV)
+DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV)
+DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S)
+DECLARE_INSN(vzext_vf8, MATCH_VZEXT_VF8, MASK_VZEXT_VF8)
+DECLARE_INSN(vsext_vf8, MATCH_VSEXT_VF8, MASK_VSEXT_VF8)
+DECLARE_INSN(vzext_vf4, MATCH_VZEXT_VF4, MASK_VZEXT_VF4)
+DECLARE_INSN(vsext_vf4, MATCH_VSEXT_VF4, MASK_VSEXT_VF4)
+DECLARE_INSN(vzext_vf2, MATCH_VZEXT_VF2, MASK_VZEXT_VF2)
+DECLARE_INSN(vsext_vf2, MATCH_VSEXT_VF2, MASK_VSEXT_VF2)
+DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM)
+DECLARE_INSN(vmandnot_mm, MATCH_VMANDNOT_MM, MASK_VMANDNOT_MM)
+DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM)
+DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM)
+DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM)
+DECLARE_INSN(vmornot_mm, MATCH_VMORNOT_MM, MASK_VMORNOT_MM)
+DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM)
+DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM)
+DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM)
+DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M)
+DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M)
+DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M)
+DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M)
+DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V)
+DECLARE_INSN(vpopc_m, MATCH_VPOPC_M, MASK_VPOPC_M)
+DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M)
+DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV)
+DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV)
+DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV)
+DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV)
+DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV)
+DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV)
+DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV)
+DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV)
+DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV)
+DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV)
+DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV)
+DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV)
+DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV)
+DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV)
+DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV)
+DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV)
+DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV)
+DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV)
+DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV)
+DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV)
+DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV)
+DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV)
+DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV)
+DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV)
+DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV)
+DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV)
+DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX)
+DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX)
+DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX)
+DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX)
+DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X)
+DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX)
+DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX)
+DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX)
+DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX)
+DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX)
+DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX)
+DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX)
+DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX)
+DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX)
+DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX)
+DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX)
+DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX)
+DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX)
+DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX)
+DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX)
+DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX)
+DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX)
+DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX)
+DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX)
+DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX)
+DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX)
+DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX)
+DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX)
+DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX)
+DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX)
+DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX)
+DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX)
+DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX)
+DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX)
+DECLARE_INSN(vamoswapei8_v, MATCH_VAMOSWAPEI8_V, MASK_VAMOSWAPEI8_V)
+DECLARE_INSN(vamoaddei8_v, MATCH_VAMOADDEI8_V, MASK_VAMOADDEI8_V)
+DECLARE_INSN(vamoxorei8_v, MATCH_VAMOXOREI8_V, MASK_VAMOXOREI8_V)
+DECLARE_INSN(vamoandei8_v, MATCH_VAMOANDEI8_V, MASK_VAMOANDEI8_V)
+DECLARE_INSN(vamoorei8_v, MATCH_VAMOOREI8_V, MASK_VAMOOREI8_V)
+DECLARE_INSN(vamominei8_v, MATCH_VAMOMINEI8_V, MASK_VAMOMINEI8_V)
+DECLARE_INSN(vamomaxei8_v, MATCH_VAMOMAXEI8_V, MASK_VAMOMAXEI8_V)
+DECLARE_INSN(vamominuei8_v, MATCH_VAMOMINUEI8_V, MASK_VAMOMINUEI8_V)
+DECLARE_INSN(vamomaxuei8_v, MATCH_VAMOMAXUEI8_V, MASK_VAMOMAXUEI8_V)
+DECLARE_INSN(vamoswapei16_v, MATCH_VAMOSWAPEI16_V, MASK_VAMOSWAPEI16_V)
+DECLARE_INSN(vamoaddei16_v, MATCH_VAMOADDEI16_V, MASK_VAMOADDEI16_V)
+DECLARE_INSN(vamoxorei16_v, MATCH_VAMOXOREI16_V, MASK_VAMOXOREI16_V)
+DECLARE_INSN(vamoandei16_v, MATCH_VAMOANDEI16_V, MASK_VAMOANDEI16_V)
+DECLARE_INSN(vamoorei16_v, MATCH_VAMOOREI16_V, MASK_VAMOOREI16_V)
+DECLARE_INSN(vamominei16_v, MATCH_VAMOMINEI16_V, MASK_VAMOMINEI16_V)
+DECLARE_INSN(vamomaxei16_v, MATCH_VAMOMAXEI16_V, MASK_VAMOMAXEI16_V)
+DECLARE_INSN(vamominuei16_v, MATCH_VAMOMINUEI16_V, MASK_VAMOMINUEI16_V)
+DECLARE_INSN(vamomaxuei16_v, MATCH_VAMOMAXUEI16_V, MASK_VAMOMAXUEI16_V)
+DECLARE_INSN(vamoswapei32_v, MATCH_VAMOSWAPEI32_V, MASK_VAMOSWAPEI32_V)
+DECLARE_INSN(vamoaddei32_v, MATCH_VAMOADDEI32_V, MASK_VAMOADDEI32_V)
+DECLARE_INSN(vamoxorei32_v, MATCH_VAMOXOREI32_V, MASK_VAMOXOREI32_V)
+DECLARE_INSN(vamoandei32_v, MATCH_VAMOANDEI32_V, MASK_VAMOANDEI32_V)
+DECLARE_INSN(vamoorei32_v, MATCH_VAMOOREI32_V, MASK_VAMOOREI32_V)
+DECLARE_INSN(vamominei32_v, MATCH_VAMOMINEI32_V, MASK_VAMOMINEI32_V)
+DECLARE_INSN(vamomaxei32_v, MATCH_VAMOMAXEI32_V, MASK_VAMOMAXEI32_V)
+DECLARE_INSN(vamominuei32_v, MATCH_VAMOMINUEI32_V, MASK_VAMOMINUEI32_V)
+DECLARE_INSN(vamomaxuei32_v, MATCH_VAMOMAXUEI32_V, MASK_VAMOMAXUEI32_V)
+DECLARE_INSN(vamoswapei64_v, MATCH_VAMOSWAPEI64_V, MASK_VAMOSWAPEI64_V)
+DECLARE_INSN(vamoaddei64_v, MATCH_VAMOADDEI64_V, MASK_VAMOADDEI64_V)
+DECLARE_INSN(vamoxorei64_v, MATCH_VAMOXOREI64_V, MASK_VAMOXOREI64_V)
+DECLARE_INSN(vamoandei64_v, MATCH_VAMOANDEI64_V, MASK_VAMOANDEI64_V)
+DECLARE_INSN(vamoorei64_v, MATCH_VAMOOREI64_V, MASK_VAMOOREI64_V)
+DECLARE_INSN(vamominei64_v, MATCH_VAMOMINEI64_V, MASK_VAMOMINEI64_V)
+DECLARE_INSN(vamomaxei64_v, MATCH_VAMOMAXEI64_V, MASK_VAMOMAXEI64_V)
+DECLARE_INSN(vamominuei64_v, MATCH_VAMOMINUEI64_V, MASK_VAMOMINUEI64_V)
+DECLARE_INSN(vamomaxuei64_v, MATCH_VAMOMAXUEI64_V, MASK_VAMOMAXUEI64_V)
+DECLARE_INSN(vmvnfr_v, MATCH_VMVNFR_V, MASK_VMVNFR_V)
+DECLARE_INSN(vl1r_v, MATCH_VL1R_V, MASK_VL1R_V)
+DECLARE_INSN(vl2r_v, MATCH_VL2R_V, MASK_VL2R_V)
+DECLARE_INSN(vl4r_v, MATCH_VL4R_V, MASK_VL4R_V)
+DECLARE_INSN(vl8r_v, MATCH_VL8R_V, MASK_VL8R_V)
#endif
#ifdef DECLARE_CSR
DECLARE_CSR(fflags, CSR_FFLAGS)
DECLARE_CSR(frm, CSR_FRM)
DECLARE_CSR(fcsr, CSR_FCSR)
+DECLARE_CSR(ustatus, CSR_USTATUS)
+DECLARE_CSR(uie, CSR_UIE)
+DECLARE_CSR(utvec, CSR_UTVEC)
+DECLARE_CSR(vstart, CSR_VSTART)
+DECLARE_CSR(vxsat, CSR_VXSAT)
+DECLARE_CSR(vxrm, CSR_VXRM)
+DECLARE_CSR(vcsr, CSR_VCSR)
+DECLARE_CSR(uscratch, CSR_USCRATCH)
+DECLARE_CSR(uepc, CSR_UEPC)
+DECLARE_CSR(ucause, CSR_UCAUSE)
+DECLARE_CSR(utval, CSR_UTVAL)
+DECLARE_CSR(uip, CSR_UIP)
DECLARE_CSR(cycle, CSR_CYCLE)
DECLARE_CSR(time, CSR_TIME)
DECLARE_CSR(instret, CSR_INSTRET)
@@ -1277,7 +2855,12 @@ DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28)
DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29)
DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30)
DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31)
+DECLARE_CSR(vl, CSR_VL)
+DECLARE_CSR(vtype, CSR_VTYPE)
+DECLARE_CSR(vlenb, CSR_VLENB)
DECLARE_CSR(sstatus, CSR_SSTATUS)
+DECLARE_CSR(sedeleg, CSR_SEDELEG)
+DECLARE_CSR(sideleg, CSR_SIDELEG)
DECLARE_CSR(sie, CSR_SIE)
DECLARE_CSR(stvec, CSR_STVEC)
DECLARE_CSR(scounteren, CSR_SCOUNTEREN)
@@ -1287,6 +2870,43 @@ DECLARE_CSR(scause, CSR_SCAUSE)
DECLARE_CSR(stval, CSR_STVAL)
DECLARE_CSR(sip, CSR_SIP)
DECLARE_CSR(satp, CSR_SATP)
+DECLARE_CSR(vsstatus, CSR_VSSTATUS)
+DECLARE_CSR(vsie, CSR_VSIE)
+DECLARE_CSR(vstvec, CSR_VSTVEC)
+DECLARE_CSR(vsscratch, CSR_VSSCRATCH)
+DECLARE_CSR(vsepc, CSR_VSEPC)
+DECLARE_CSR(vscause, CSR_VSCAUSE)
+DECLARE_CSR(vstval, CSR_VSTVAL)
+DECLARE_CSR(vsip, CSR_VSIP)
+DECLARE_CSR(vsatp, CSR_VSATP)
+DECLARE_CSR(hstatus, CSR_HSTATUS)
+DECLARE_CSR(hedeleg, CSR_HEDELEG)
+DECLARE_CSR(hideleg, CSR_HIDELEG)
+DECLARE_CSR(hie, CSR_HIE)
+DECLARE_CSR(htimedelta, CSR_HTIMEDELTA)
+DECLARE_CSR(hcounteren, CSR_HCOUNTEREN)
+DECLARE_CSR(hgeie, CSR_HGEIE)
+DECLARE_CSR(htval, CSR_HTVAL)
+DECLARE_CSR(hip, CSR_HIP)
+DECLARE_CSR(hvip, CSR_HVIP)
+DECLARE_CSR(htinst, CSR_HTINST)
+DECLARE_CSR(hgatp, CSR_HGATP)
+DECLARE_CSR(hgeip, CSR_HGEIP)
+DECLARE_CSR(utvt, CSR_UTVT)
+DECLARE_CSR(unxti, CSR_UNXTI)
+DECLARE_CSR(uintstatus, CSR_UINTSTATUS)
+DECLARE_CSR(uscratchcsw, CSR_USCRATCHCSW)
+DECLARE_CSR(uscratchcswl, CSR_USCRATCHCSWL)
+DECLARE_CSR(stvt, CSR_STVT)
+DECLARE_CSR(snxti, CSR_SNXTI)
+DECLARE_CSR(sintstatus, CSR_SINTSTATUS)
+DECLARE_CSR(sscratchcsw, CSR_SSCRATCHCSW)
+DECLARE_CSR(sscratchcswl, CSR_SSCRATCHCSWL)
+DECLARE_CSR(mtvt, CSR_MTVT)
+DECLARE_CSR(mnxti, CSR_MNXTI)
+DECLARE_CSR(mintstatus, CSR_MINTSTATUS)
+DECLARE_CSR(mscratchcsw, CSR_MSCRATCHCSW)
+DECLARE_CSR(mscratchcswl, CSR_MSCRATCHCSWL)
DECLARE_CSR(mstatus, CSR_MSTATUS)
DECLARE_CSR(misa, CSR_MISA)
DECLARE_CSR(medeleg, CSR_MEDELEG)
@@ -1294,11 +2914,14 @@ DECLARE_CSR(mideleg, CSR_MIDELEG)
DECLARE_CSR(mie, CSR_MIE)
DECLARE_CSR(mtvec, CSR_MTVEC)
DECLARE_CSR(mcounteren, CSR_MCOUNTEREN)
+DECLARE_CSR(mcountinhibit, CSR_MCOUNTINHIBIT)
DECLARE_CSR(mscratch, CSR_MSCRATCH)
DECLARE_CSR(mepc, CSR_MEPC)
DECLARE_CSR(mcause, CSR_MCAUSE)
DECLARE_CSR(mtval, CSR_MTVAL)
DECLARE_CSR(mip, CSR_MIP)
+DECLARE_CSR(mtinst, CSR_MTINST)
+DECLARE_CSR(mtval2, CSR_MTVAL2)
DECLARE_CSR(pmpcfg0, CSR_PMPCFG0)
DECLARE_CSR(pmpcfg1, CSR_PMPCFG1)
DECLARE_CSR(pmpcfg2, CSR_PMPCFG2)
@@ -1325,7 +2948,8 @@ DECLARE_CSR(tdata2, CSR_TDATA2)
DECLARE_CSR(tdata3, CSR_TDATA3)
DECLARE_CSR(dcsr, CSR_DCSR)
DECLARE_CSR(dpc, CSR_DPC)
-DECLARE_CSR(dscratch, CSR_DSCRATCH)
+DECLARE_CSR(dscratch0, CSR_DSCRATCH0)
+DECLARE_CSR(dscratch1, CSR_DSCRATCH1)
DECLARE_CSR(mcycle, CSR_MCYCLE)
DECLARE_CSR(minstret, CSR_MINSTRET)
DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3)
@@ -1390,6 +3014,7 @@ DECLARE_CSR(mvendorid, CSR_MVENDORID)
DECLARE_CSR(marchid, CSR_MARCHID)
DECLARE_CSR(mimpid, CSR_MIMPID)
DECLARE_CSR(mhartid, CSR_MHARTID)
+DECLARE_CSR(htimedeltah, CSR_HTIMEDELTAH)
DECLARE_CSR(cycleh, CSR_CYCLEH)
DECLARE_CSR(timeh, CSR_TIMEH)
DECLARE_CSR(instreth, CSR_INSTRETH)
@@ -1422,6 +3047,7 @@ DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H)
DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H)
DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H)
DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H)
+DECLARE_CSR(mstatush, CSR_MSTATUSH)
DECLARE_CSR(mcycleh, CSR_MCYCLEH)
DECLARE_CSR(minstreth, CSR_MINSTRETH)
DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H)
@@ -1465,9 +3091,13 @@ DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE)
DECLARE_CAUSE("store access", CAUSE_STORE_ACCESS)
DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL)
DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL)
-DECLARE_CAUSE("hypervisor_ecall", CAUSE_HYPERVISOR_ECALL)
+DECLARE_CAUSE("virtual_supervisor_ecall", CAUSE_VIRTUAL_SUPERVISOR_ECALL)
DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL)
DECLARE_CAUSE("fetch page fault", CAUSE_FETCH_PAGE_FAULT)
DECLARE_CAUSE("load page fault", CAUSE_LOAD_PAGE_FAULT)
DECLARE_CAUSE("store page fault", CAUSE_STORE_PAGE_FAULT)
+DECLARE_CAUSE("fetch guest page fault", CAUSE_FETCH_GUEST_PAGE_FAULT)
+DECLARE_CAUSE("load guest page fault", CAUSE_LOAD_GUEST_PAGE_FAULT)
+DECLARE_CAUSE("virtual instruction", CAUSE_VIRTUAL_INSTRUCTION)
+DECLARE_CAUSE("store guest page fault", CAUSE_STORE_GUEST_PAGE_FAULT)
#endif
diff --git a/src/target/riscv/gdb_regs.h b/src/target/riscv/gdb_regs.h
index a587952..32bc1d5 100644
--- a/src/target/riscv/gdb_regs.h
+++ b/src/target/riscv/gdb_regs.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifndef TARGET__RISCV__GDB_REGS_H
#define TARGET__RISCV__GDB_REGS_H
@@ -21,6 +23,7 @@ enum gdb_regno {
GDB_REGNO_A3,
GDB_REGNO_A4,
GDB_REGNO_A5,
+ GDB_REGNO_XPR15 = GDB_REGNO_A5,
GDB_REGNO_A6,
GDB_REGNO_A7,
GDB_REGNO_S2,
@@ -75,16 +78,37 @@ enum gdb_regno {
GDB_REGNO_FT11,
GDB_REGNO_FPR31 = GDB_REGNO_FT11,
GDB_REGNO_CSR0 = 65,
+ GDB_REGNO_VSTART = CSR_VSTART + GDB_REGNO_CSR0,
+ GDB_REGNO_VXSAT = CSR_VXSAT + GDB_REGNO_CSR0,
+ GDB_REGNO_VXRM = CSR_VXRM + GDB_REGNO_CSR0,
+ GDB_REGNO_VLENB = CSR_VLENB + GDB_REGNO_CSR0,
+ GDB_REGNO_VL = CSR_VL + GDB_REGNO_CSR0,
+ GDB_REGNO_VTYPE = CSR_VTYPE + GDB_REGNO_CSR0,
GDB_REGNO_TSELECT = CSR_TSELECT + GDB_REGNO_CSR0,
GDB_REGNO_TDATA1 = CSR_TDATA1 + GDB_REGNO_CSR0,
GDB_REGNO_TDATA2 = CSR_TDATA2 + GDB_REGNO_CSR0,
GDB_REGNO_MISA = CSR_MISA + GDB_REGNO_CSR0,
GDB_REGNO_DPC = CSR_DPC + GDB_REGNO_CSR0,
GDB_REGNO_DCSR = CSR_DCSR + GDB_REGNO_CSR0,
- GDB_REGNO_DSCRATCH = CSR_DSCRATCH + GDB_REGNO_CSR0,
+ GDB_REGNO_DSCRATCH0 = CSR_DSCRATCH0 + GDB_REGNO_CSR0,
GDB_REGNO_MSTATUS = CSR_MSTATUS + GDB_REGNO_CSR0,
+ GDB_REGNO_MEPC = CSR_MEPC + GDB_REGNO_CSR0,
+ GDB_REGNO_MCAUSE = CSR_MCAUSE + GDB_REGNO_CSR0,
+ GDB_REGNO_SATP = CSR_SATP + GDB_REGNO_CSR0,
GDB_REGNO_CSR4095 = GDB_REGNO_CSR0 + 4095,
GDB_REGNO_PRIV = 4161,
+ /* It's still undecided what register numbers GDB will actually use for
+ * these. See
+ * https://groups.google.com/a/groups.riscv.org/d/msg/sw-dev/7lQYiTUN9Ms/gTxGhzaYBQAJ
+ */
+ GDB_REGNO_V0, GDB_REGNO_V1, GDB_REGNO_V2, GDB_REGNO_V3,
+ GDB_REGNO_V4, GDB_REGNO_V5, GDB_REGNO_V6, GDB_REGNO_V7,
+ GDB_REGNO_V8, GDB_REGNO_V9, GDB_REGNO_V10, GDB_REGNO_V11,
+ GDB_REGNO_V12, GDB_REGNO_V13, GDB_REGNO_V14, GDB_REGNO_V15,
+ GDB_REGNO_V16, GDB_REGNO_V17, GDB_REGNO_V18, GDB_REGNO_V19,
+ GDB_REGNO_V20, GDB_REGNO_V21, GDB_REGNO_V22, GDB_REGNO_V23,
+ GDB_REGNO_V24, GDB_REGNO_V25, GDB_REGNO_V26, GDB_REGNO_V27,
+ GDB_REGNO_V28, GDB_REGNO_V29, GDB_REGNO_V30, GDB_REGNO_V31,
GDB_REGNO_COUNT
};
diff --git a/src/target/riscv/opcodes.h b/src/target/riscv/opcodes.h
index de85aad..998290c 100644
--- a/src/target/riscv/opcodes.h
+++ b/src/target/riscv/opcodes.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#include "encoding.h"
#define ZERO 0
@@ -143,6 +145,18 @@ static uint32_t csrrw(unsigned int rd, unsigned int rs, unsigned int csr)
return (csr << 20) | (rs << 15) | (rd << 7) | MATCH_CSRRW;
}
+static uint32_t csrrci(unsigned int rd, unsigned int zimm, unsigned int csr) __attribute__ ((unused));
+static uint32_t csrrci(unsigned int rd, unsigned int zimm, unsigned int csr)
+{
+ return (csr << 20) | (zimm << 15) | (rd << 7) | MATCH_CSRRCI;
+}
+
+static uint32_t csrrsi(unsigned int rd, unsigned int zimm, unsigned int csr) __attribute__ ((unused));
+static uint32_t csrrsi(unsigned int rd, unsigned int zimm, unsigned int csr)
+{
+ return (csr << 20) | (zimm << 15) | (rd << 7) | MATCH_CSRRSI;
+}
+
static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset)
{
@@ -311,3 +325,33 @@ static uint32_t auipc(unsigned int dest)
{
return MATCH_AUIPC | (dest << 7);
}
+
+static uint32_t vsetvli(unsigned int dest, unsigned int src, uint16_t imm) __attribute__((unused));
+static uint32_t vsetvli(unsigned int dest, unsigned int src, uint16_t imm)
+{
+ return (bits(imm, 10, 0) << 20) |
+ (src << 15) |
+ (dest << 7) |
+ MATCH_VSETVLI;
+}
+
+static uint32_t vmv_x_s(unsigned int rd, unsigned int vs2) __attribute__((unused));
+static uint32_t vmv_x_s(unsigned int rd, unsigned int vs2)
+{
+ return (vs2 << 20) | (rd << 7) | MATCH_VMV_X_S;
+}
+
+static uint32_t vmv_s_x(unsigned int vd, unsigned int vs2) __attribute__((unused));
+static uint32_t vmv_s_x(unsigned int vd, unsigned int rs1)
+{
+ return (rs1 << 15) | (vd << 7) | MATCH_VMV_S_X;
+}
+
+static uint32_t vslide1down_vx(unsigned int vd, unsigned int vs2,
+ unsigned int rs1, unsigned int vm) __attribute__((unused));
+static uint32_t vslide1down_vx(unsigned int vd, unsigned int vs2,
+ unsigned int rs1, unsigned int vm)
+{
+ return (vm << 25) | (vs2 << 20) | (rs1 << 15) | (vd << 7) |
+ MATCH_VSLIDE1DOWN_VX;
+}
diff --git a/src/target/riscv/program.c b/src/target/riscv/program.c
index 5e899b2..8e2ce5d 100644
--- a/src/target/riscv/program.c
+++ b/src/target/riscv/program.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
@@ -30,7 +32,7 @@ int riscv_program_init(struct riscv_program *p, struct target *target)
int riscv_program_write(struct riscv_program *program)
{
for (unsigned i = 0; i < program->instruction_count; ++i) {
- LOG_DEBUG("%p: debug_buffer[%02x] = DASM(0x%08x)", program, i, program->debug_buffer[i]);
+ LOG_DEBUG("debug_buffer[%02x] = DASM(0x%08x)", i, program->debug_buffer[i]);
if (riscv_write_debug_buffer(program->target, i,
program->debug_buffer[i]) != ERROR_OK)
return ERROR_FAIL;
@@ -56,7 +58,8 @@ int riscv_program_exec(struct riscv_program *p, struct target *t)
if (riscv_program_ebreak(p) != ERROR_OK) {
LOG_ERROR("Unable to write ebreak");
for (size_t i = 0; i < riscv_debug_buffer_size(p->target); ++i)
- LOG_ERROR("ram[%02x]: DASM(0x%08lx) [0x%08lx]", (int)i, (long)p->debug_buffer[i], (long)p->debug_buffer[i]);
+ LOG_ERROR("ram[%02x]: DASM(0x%08" PRIx32 ") [0x%08" PRIx32 "]",
+ (int)i, p->debug_buffer[i], p->debug_buffer[i]);
return ERROR_FAIL;
}
@@ -79,6 +82,11 @@ int riscv_program_exec(struct riscv_program *p, struct target *t)
return ERROR_OK;
}
+int riscv_program_sdr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, sd(d, b, offset));
+}
+
int riscv_program_swr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
{
return riscv_program_insert(p, sw(d, b, offset));
@@ -94,6 +102,11 @@ int riscv_program_sbr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno
return riscv_program_insert(p, sb(d, b, offset));
}
+int riscv_program_ldr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, ld(d, b, offset));
+}
+
int riscv_program_lwr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
{
return riscv_program_insert(p, lw(d, b, offset));
@@ -109,6 +122,18 @@ int riscv_program_lbr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno
return riscv_program_insert(p, lb(d, b, offset));
}
+int riscv_program_csrrsi(struct riscv_program *p, enum gdb_regno d, unsigned int z, enum gdb_regno csr)
+{
+ assert(csr >= GDB_REGNO_CSR0 && csr <= GDB_REGNO_CSR4095);
+ return riscv_program_insert(p, csrrsi(d, z, csr - GDB_REGNO_CSR0));
+}
+
+int riscv_program_csrrci(struct riscv_program *p, enum gdb_regno d, unsigned int z, enum gdb_regno csr)
+{
+ assert(csr >= GDB_REGNO_CSR0 && csr <= GDB_REGNO_CSR4095);
+ return riscv_program_insert(p, csrrci(d, z, csr - GDB_REGNO_CSR0));
+}
+
int riscv_program_csrr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno csr)
{
assert(csr >= GDB_REGNO_CSR0 && csr <= GDB_REGNO_CSR4095);
diff --git a/src/target/riscv/program.h b/src/target/riscv/program.h
index 310460c..2fa925a 100644
--- a/src/target/riscv/program.h
+++ b/src/target/riscv/program.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifndef TARGET__RISCV__PROGRAM_H
#define TARGET__RISCV__PROGRAM_H
@@ -55,14 +57,18 @@ int riscv_program_save_to_dscratch(struct riscv_program *p, enum gdb_regno to_sa
/* Helpers to assemble various instructions. Return 0 on success. These might
* assemble into a multi-instruction sequence that overwrites some other
* register, but those will be properly saved and restored. */
+int riscv_program_ldr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno a, int o);
int riscv_program_lwr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno a, int o);
int riscv_program_lhr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno a, int o);
int riscv_program_lbr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno a, int o);
+int riscv_program_sdr(struct riscv_program *p, enum gdb_regno s, enum gdb_regno a, int o);
int riscv_program_swr(struct riscv_program *p, enum gdb_regno s, enum gdb_regno a, int o);
int riscv_program_shr(struct riscv_program *p, enum gdb_regno s, enum gdb_regno a, int o);
int riscv_program_sbr(struct riscv_program *p, enum gdb_regno s, enum gdb_regno a, int o);
+int riscv_program_csrrsi(struct riscv_program *p, enum gdb_regno d, unsigned int z, enum gdb_regno csr);
+int riscv_program_csrrci(struct riscv_program *p, enum gdb_regno d, unsigned int z, enum gdb_regno csr);
int riscv_program_csrr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno csr);
int riscv_program_csrw(struct riscv_program *p, enum gdb_regno s, enum gdb_regno csr);
diff --git a/src/target/riscv/riscv-011.c b/src/target/riscv/riscv-011.c
index cb7b744..9b5f749 100644
--- a/src/target/riscv/riscv-011.c
+++ b/src/target/riscv/riscv-011.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
/*
* Support for RISC-V, debug version 0.11. This was never an officially adopted
* spec, but SiFive made some silicon that uses it.
@@ -204,7 +206,6 @@ typedef struct {
* before the interrupt is cleared. */
unsigned int interrupt_high_delay;
- bool need_strict_step;
bool never_halted;
} riscv011_info_t;
@@ -519,6 +520,8 @@ typedef struct {
static scans_t *scans_new(struct target *target, unsigned int scan_count)
{
scans_t *scans = malloc(sizeof(scans_t));
+ if (!scans)
+ goto error0;
scans->scan_count = scan_count;
/* This code also gets called before xlen is detected. */
if (riscv_xlen(target))
@@ -527,10 +530,25 @@ static scans_t *scans_new(struct target *target, unsigned int scan_count)
scans->scan_size = 2 + 128 / 8;
scans->next_scan = 0;
scans->in = calloc(scans->scan_size, scans->scan_count);
+ if (!scans->in)
+ goto error1;
scans->out = calloc(scans->scan_size, scans->scan_count);
+ if (!scans->out)
+ goto error2;
scans->field = calloc(scans->scan_count, sizeof(struct scan_field));
+ if (!scans->field)
+ goto error3;
scans->target = target;
return scans;
+
+error3:
+ free(scans->out);
+error2:
+ free(scans->in);
+error1:
+ free(scans);
+error0:
+ return NULL;
}
static scans_t *scans_delete(scans_t *scans)
@@ -844,6 +862,8 @@ static int cache_write(struct target *target, unsigned int address, bool run)
LOG_DEBUG("enter");
riscv011_info_t *info = get_info(target);
scans_t *scans = scans_new(target, info->dramsize + 2);
+ if (!scans)
+ return ERROR_FAIL;
unsigned int last = info->dramsize;
for (unsigned int i = 0; i < info->dramsize; i++) {
@@ -1012,7 +1032,7 @@ static int wait_for_state(struct target *target, enum target_state state)
}
}
-static int read_csr(struct target *target, uint64_t *value, uint32_t csr)
+static int read_remote_csr(struct target *target, uint64_t *value, uint32_t csr)
{
riscv011_info_t *info = get_info(target);
cache_set32(target, 0, csrr(S0, csr));
@@ -1034,7 +1054,7 @@ static int read_csr(struct target *target, uint64_t *value, uint32_t csr)
return ERROR_OK;
}
-static int write_csr(struct target *target, uint32_t csr, uint64_t value)
+static int write_remote_csr(struct target *target, uint32_t csr, uint64_t value)
{
LOG_DEBUG("csr 0x%x <- 0x%" PRIx64, csr, value);
cache_set_load(target, 0, S0, SLOT0);
@@ -1062,7 +1082,7 @@ static int maybe_read_tselect(struct target *target)
riscv011_info_t *info = get_info(target);
if (info->tselect_dirty) {
- int result = read_csr(target, &info->tselect, CSR_TSELECT);
+ int result = read_remote_csr(target, &info->tselect, CSR_TSELECT);
if (result != ERROR_OK)
return result;
info->tselect_dirty = false;
@@ -1076,7 +1096,7 @@ static int maybe_write_tselect(struct target *target)
riscv011_info_t *info = get_info(target);
if (!info->tselect_dirty) {
- int result = write_csr(target, CSR_TSELECT, info->tselect);
+ int result = write_remote_csr(target, CSR_TSELECT, info->tselect);
if (result != ERROR_OK)
return result;
info->tselect_dirty = true;
@@ -1115,7 +1135,10 @@ static int execute_resume(struct target *target, bool step)
}
}
- info->dcsr |= DCSR_EBREAKM | DCSR_EBREAKH | DCSR_EBREAKS | DCSR_EBREAKU;
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKM, riscv_ebreakm);
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKS, riscv_ebreaks);
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKU, riscv_ebreaku);
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKH, 1);
info->dcsr &= ~DCSR_HALT;
if (step)
@@ -1255,7 +1278,7 @@ static int register_write(struct target *target, unsigned int number,
if (number == S0) {
cache_set_load(target, 0, S0, SLOT0);
- cache_set32(target, 1, csrw(S0, CSR_DSCRATCH));
+ cache_set32(target, 1, csrw(S0, CSR_DSCRATCH0));
cache_set_jump(target, 2);
} else if (number == S1) {
cache_set_load(target, 0, S0, SLOT0);
@@ -1384,25 +1407,6 @@ static int halt(struct target *target)
return ERROR_OK;
}
-static int init_target(struct command_context *cmd_ctx,
- struct target *target)
-{
- LOG_DEBUG("init");
- riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
- generic_info->get_register = get_register;
- generic_info->set_register = set_register;
-
- generic_info->version_specific = calloc(1, sizeof(riscv011_info_t));
- if (!generic_info->version_specific)
- return ERROR_FAIL;
-
- /* Assume 32-bit until we discover the real value in examine(). */
- generic_info->xlen[0] = 32;
- riscv_init_registers(target);
-
- return ERROR_OK;
-}
-
static void deinit_target(struct target *target)
{
LOG_DEBUG("riscv_deinit_target()");
@@ -1413,8 +1417,6 @@ static void deinit_target(struct target *target)
static int strict_step(struct target *target, bool announce)
{
- riscv011_info_t *info = get_info(target);
-
LOG_DEBUG("enter");
struct watchpoint *watchpoint = target->watchpoints;
@@ -1433,16 +1435,12 @@ static int strict_step(struct target *target, bool announce)
watchpoint = watchpoint->next;
}
- info->need_strict_step = false;
-
return ERROR_OK;
}
static int step(struct target *target, int current, target_addr_t address,
int handle_breakpoints)
{
- riscv011_info_t *info = get_info(target);
-
jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
if (!current) {
@@ -1455,7 +1453,7 @@ static int step(struct target *target, int current, target_addr_t address,
return result;
}
- if (info->need_strict_step || handle_breakpoints) {
+ if (handle_breakpoints) {
int result = strict_step(target, true);
if (result != ERROR_OK)
return result;
@@ -1486,7 +1484,6 @@ static int examine(struct target *target)
}
RISCV_INFO(r);
- r->hart_count = 1;
riscv011_info_t *info = get_info(target);
info->addrbits = get_field(dtmcontrol, DTMCONTROL_ADDRBITS);
@@ -1570,11 +1567,11 @@ static int examine(struct target *target)
}
LOG_DEBUG("Discovered XLEN is %d", riscv_xlen(target));
- if (read_csr(target, &r->misa[0], CSR_MISA) != ERROR_OK) {
+ if (read_remote_csr(target, &r->misa[0], CSR_MISA) != ERROR_OK) {
const unsigned old_csr_misa = 0xf10;
LOG_WARNING("Failed to read misa at 0x%x; trying 0x%x.", CSR_MISA,
old_csr_misa);
- if (read_csr(target, &r->misa[0], old_csr_misa) != ERROR_OK) {
+ if (read_remote_csr(target, &r->misa[0], old_csr_misa) != ERROR_OK) {
/* Maybe this is an old core that still has $misa at the old
* address. */
LOG_ERROR("Failed to read misa at 0x%x.", old_csr_misa);
@@ -1606,6 +1603,8 @@ static riscv_error_t handle_halt_routine(struct target *target)
riscv011_info_t *info = get_info(target);
scans_t *scans = scans_new(target, 256);
+ if (!scans)
+ return RE_FAIL;
/* Read all GPRs as fast as we can, because gdb is going to ask for them
* anyway. Reading them one at a time is much slower. */
@@ -1634,7 +1633,7 @@ static riscv_error_t handle_halt_routine(struct target *target)
scans_add_read(scans, SLOT0, false);
/* Read S0 from dscratch */
- unsigned int csr[] = {CSR_DSCRATCH, CSR_DPC, CSR_DCSR};
+ unsigned int csr[] = {CSR_DSCRATCH0, CSR_DPC, CSR_DCSR};
for (unsigned int i = 0; i < DIM(csr); i++) {
scans_add_write32(scans, 0, csrr(S0, csr[i]), true);
scans_add_read(scans, SLOT0, false);
@@ -1848,9 +1847,6 @@ static int handle_halt(struct target *target, bool announce)
break;
case DCSR_CAUSE_HWBP:
target->debug_reason = DBG_REASON_WATCHPOINT;
- /* If we halted because of a data trigger, gdb doesn't know to do
- * the disable-breakpoints-step-enable-breakpoints dance. */
- info->need_strict_step = true;
break;
case DCSR_CAUSE_DEBUGINT:
target->debug_reason = DBG_REASON_DBGRQ;
@@ -1935,26 +1931,10 @@ static int riscv011_poll(struct target *target)
static int riscv011_resume(struct target *target, int current,
target_addr_t address, int handle_breakpoints, int debug_execution)
{
- riscv011_info_t *info = get_info(target);
-
+ RISCV_INFO(r);
jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
- if (!current) {
- if (riscv_xlen(target) > 32) {
- LOG_WARNING("Asked to resume at 32-bit PC on %d-bit target.",
- riscv_xlen(target));
- }
- int result = register_write(target, GDB_REGNO_PC, address);
- if (result != ERROR_OK)
- return result;
- }
-
- if (info->need_strict_step || handle_breakpoints) {
- int result = strict_step(target, false);
- if (result != ERROR_OK)
- return result;
- }
-
+ r->prepped = false;
return resume(target, debug_execution, false);
}
@@ -1973,8 +1953,11 @@ static int assert_reset(struct target *target)
/* Not sure what we should do when there are multiple cores.
* Here just reset the single hart we're talking to. */
- info->dcsr |= DCSR_EBREAKM | DCSR_EBREAKH | DCSR_EBREAKS |
- DCSR_EBREAKU | DCSR_HALT;
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKM, riscv_ebreakm);
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKS, riscv_ebreaks);
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKU, riscv_ebreaku);
+ info->dcsr = set_field(info->dcsr, DCSR_EBREAKH, 1);
+ info->dcsr |= DCSR_HALT;
if (target->reset_halt)
info->dcsr |= DCSR_NDRESET;
else
@@ -2001,8 +1984,13 @@ static int deassert_reset(struct target *target)
}
static int read_memory(struct target *target, target_addr_t address,
- uint32_t size, uint32_t count, uint8_t *buffer)
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
{
+ if (increment != size) {
+ LOG_ERROR("read_memory with custom increment not implemented");
+ return ERROR_NOT_IMPLEMENTED;
+ }
+
jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
cache_set32(target, 0, lw(S0, ZERO, DEBUG_RAM_START + 16));
@@ -2029,6 +2017,8 @@ static int read_memory(struct target *target, target_addr_t address,
riscv011_info_t *info = get_info(target);
const unsigned max_batch_size = 256;
scans_t *scans = scans_new(target, max_batch_size);
+ if (!scans)
+ return ERROR_FAIL;
uint32_t result_value = 0x777;
uint32_t i = 0;
@@ -2185,6 +2175,8 @@ static int write_memory(struct target *target, target_addr_t address,
const unsigned max_batch_size = 256;
scans_t *scans = scans_new(target, max_batch_size);
+ if (!scans)
+ return ERROR_FAIL;
uint32_t result_value = 0x777;
uint32_t i = 0;
@@ -2304,6 +2296,26 @@ static int arch_state(struct target *target)
return ERROR_OK;
}
+static int init_target(struct command_context *cmd_ctx,
+ struct target *target)
+{
+ LOG_DEBUG("init");
+ riscv_info_t *generic_info = (riscv_info_t *)target->arch_info;
+ generic_info->get_register = get_register;
+ generic_info->set_register = set_register;
+ generic_info->read_memory = read_memory;
+
+ generic_info->version_specific = calloc(1, sizeof(riscv011_info_t));
+ if (!generic_info->version_specific)
+ return ERROR_FAIL;
+
+ /* Assume 32-bit until we discover the real value in examine(). */
+ generic_info->xlen[0] = 32;
+ riscv_init_registers(target);
+
+ return ERROR_OK;
+}
+
struct target_type riscv011_target = {
.name = "riscv",
@@ -2321,7 +2333,6 @@ struct target_type riscv011_target = {
.assert_reset = assert_reset,
.deassert_reset = deassert_reset,
- .read_memory = read_memory,
.write_memory = write_memory,
.arch_state = arch_state,
diff --git a/src/target/riscv/riscv-013.c b/src/target/riscv/riscv-013.c
index 2f8da5b..8558ba8 100644
--- a/src/target/riscv/riscv-013.c
+++ b/src/target/riscv/riscv-013.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
/*
* Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
* latest draft.
@@ -27,11 +29,12 @@
#include "asm.h"
#include "batch.h"
-#define DMI_DATA1 (DMI_DATA0 + 1)
-#define DMI_PROGBUF1 (DMI_PROGBUF0 + 1)
+#define DM_DATA1 (DM_DATA0 + 1)
+#define DM_PROGBUF1 (DM_PROGBUF0 + 1)
static int riscv013_on_step_or_resume(struct target *target, bool step);
-static int riscv013_step_or_resume_current_hart(struct target *target, bool step);
+static int riscv013_step_or_resume_current_hart(struct target *target,
+ bool step, bool use_hasel);
static void riscv013_clear_abstract_error(struct target *target);
/* Implementations of the functions in riscv_info_t. */
@@ -39,12 +42,13 @@ static int riscv013_get_register(struct target *target,
riscv_reg_t *value, int hid, int rid);
static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
static int riscv013_select_current_hart(struct target *target);
-static int riscv013_halt_current_hart(struct target *target);
-static int riscv013_resume_current_hart(struct target *target);
+static int riscv013_halt_prep(struct target *target);
+static int riscv013_halt_go(struct target *target);
+static int riscv013_resume_go(struct target *target);
static int riscv013_step_current_hart(struct target *target);
static int riscv013_on_halt(struct target *target);
static int riscv013_on_step(struct target *target);
-static int riscv013_on_resume(struct target *target);
+static int riscv013_resume_prep(struct target *target);
static bool riscv013_is_halted(struct target *target);
static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
static int riscv013_write_debug_buffer(struct target *target, unsigned index,
@@ -61,7 +65,7 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t
static int register_write_direct(struct target *target, unsigned number,
uint64_t value);
static int read_memory(struct target *target, target_addr_t address,
- uint32_t size, uint32_t count, uint8_t *buffer);
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
static int write_memory(struct target *target, target_addr_t address,
uint32_t size, uint32_t count, const uint8_t *buffer);
static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
@@ -89,6 +93,7 @@ static int riscv013_test_compliance(struct target *target);
#define CSR_DCSR_CAUSE_DEBUGINT 3
#define CSR_DCSR_CAUSE_STEP 4
#define CSR_DCSR_CAUSE_HALT 5
+#define CSR_DCSR_CAUSE_GROUP 6
#define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
@@ -105,12 +110,6 @@ typedef enum {
DMI_STATUS_BUSY = 3
} dmi_status_t;
-typedef enum {
- RE_OK,
- RE_FAIL,
- RE_AGAIN
-} riscv_error_t;
-
typedef enum slot {
SLOT0,
SLOT1,
@@ -146,12 +145,20 @@ typedef enum {
typedef struct {
struct list_head list;
int abs_chain_position;
+
+ /* The number of harts connected to this DM. */
+ int hart_count;
/* Indicates we already reset this DM, so don't need to do it again. */
bool was_reset;
/* Targets that are connected to this DM. */
struct list_head target_list;
/* The currently selected hartid on this DM. */
int current_hartid;
+ bool hasel_supported;
+
+ /* The program buffer stores executable code. 0 is an illegal instruction,
+ * so we use 0 to mean the cached value is invalid. */
+ uint32_t progbuf_cache[16];
} dm013_info_t;
typedef struct {
@@ -160,6 +167,8 @@ typedef struct {
} target_list_t;
typedef struct {
+ /* The indexed used to address this hart in its DM. */
+ unsigned index;
/* Number of address bits in the dbus register. */
unsigned abits;
/* Number of abstract command data registers. */
@@ -229,7 +238,7 @@ static riscv013_info_t *get_info(const struct target *target)
* global list of DMs. If it's not in there, then create one and initialize it
* to 0.
*/
-static dm013_info_t *get_dm(struct target *target)
+dm013_info_t *get_dm(struct target *target)
{
RISCV013_INFO(info);
if (info->dm)
@@ -247,9 +256,13 @@ static dm013_info_t *get_dm(struct target *target)
}
if (!dm) {
+ LOG_DEBUG("[%d] Allocating new DM", target->coreid);
dm = calloc(1, sizeof(dm013_info_t));
+ if (!dm)
+ return NULL;
dm->abs_chain_position = abs_chain_position;
dm->current_hartid = -1;
+ dm->hart_count = -1;
INIT_LIST_HEAD(&dm->target_list);
list_add(&dm->list, &dm_list);
}
@@ -261,6 +274,10 @@ static dm013_info_t *get_dm(struct target *target)
return dm;
}
target_entry = calloc(1, sizeof(*target_entry));
+ if (!target_entry) {
+ info->dm = NULL;
+ return NULL;
+ }
target_entry->target = target;
list_add(&target_entry->list, &dm->target_list);
@@ -269,14 +286,14 @@ static dm013_info_t *get_dm(struct target *target)
static uint32_t set_hartsel(uint32_t initial, uint32_t index)
{
- initial &= ~DMI_DMCONTROL_HARTSELLO;
- initial &= ~DMI_DMCONTROL_HARTSELHI;
+ initial &= ~DM_DMCONTROL_HARTSELLO;
+ initial &= ~DM_DMCONTROL_HARTSELHI;
- uint32_t index_lo = index & ((1 << DMI_DMCONTROL_HARTSELLO_LENGTH) - 1);
- initial |= index_lo << DMI_DMCONTROL_HARTSELLO_OFFSET;
- uint32_t index_hi = index >> DMI_DMCONTROL_HARTSELLO_LENGTH;
- assert(index_hi < 1 << DMI_DMCONTROL_HARTSELHI_LENGTH);
- initial |= index_hi << DMI_DMCONTROL_HARTSELHI_OFFSET;
+ uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
+ initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
+ uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
+ assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
+ initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
return initial;
}
@@ -288,52 +305,56 @@ static void decode_dmi(char *text, unsigned address, unsigned data)
uint64_t mask;
const char *name;
} description[] = {
- { DMI_DMCONTROL, DMI_DMCONTROL_HALTREQ, "haltreq" },
- { DMI_DMCONTROL, DMI_DMCONTROL_RESUMEREQ, "resumereq" },
- { DMI_DMCONTROL, DMI_DMCONTROL_HARTRESET, "hartreset" },
- { DMI_DMCONTROL, DMI_DMCONTROL_HASEL, "hasel" },
- { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELHI, "hartselhi" },
- { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO, "hartsello" },
- { DMI_DMCONTROL, DMI_DMCONTROL_NDMRESET, "ndmreset" },
- { DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE, "dmactive" },
- { DMI_DMCONTROL, DMI_DMCONTROL_ACKHAVERESET, "ackhavereset" },
-
- { DMI_DMSTATUS, DMI_DMSTATUS_IMPEBREAK, "impebreak" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ALLHAVERESET, "allhavereset" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ANYHAVERESET, "anyhavereset" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ALLRESUMEACK, "allresumeack" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ALLUNAVAIL, "allunavail" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ANYUNAVAIL, "anyunavail" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ALLRUNNING, "allrunning" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ANYRUNNING, "anyrunning" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ALLHALTED, "allhalted" },
- { DMI_DMSTATUS, DMI_DMSTATUS_ANYHALTED, "anyhalted" },
- { DMI_DMSTATUS, DMI_DMSTATUS_AUTHENTICATED, "authenticated" },
- { DMI_DMSTATUS, DMI_DMSTATUS_AUTHBUSY, "authbusy" },
- { DMI_DMSTATUS, DMI_DMSTATUS_DEVTREEVALID, "devtreevalid" },
- { DMI_DMSTATUS, DMI_DMSTATUS_VERSION, "version" },
-
- { DMI_ABSTRACTCS, DMI_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
- { DMI_ABSTRACTCS, DMI_ABSTRACTCS_BUSY, "busy" },
- { DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR, "cmderr" },
- { DMI_ABSTRACTCS, DMI_ABSTRACTCS_DATACOUNT, "datacount" },
-
- { DMI_COMMAND, DMI_COMMAND_CMDTYPE, "cmdtype" },
-
- { DMI_SBCS, DMI_SBCS_SBREADONADDR, "sbreadonaddr" },
- { DMI_SBCS, DMI_SBCS_SBACCESS, "sbaccess" },
- { DMI_SBCS, DMI_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
- { DMI_SBCS, DMI_SBCS_SBREADONDATA, "sbreadondata" },
- { DMI_SBCS, DMI_SBCS_SBERROR, "sberror" },
- { DMI_SBCS, DMI_SBCS_SBASIZE, "sbasize" },
- { DMI_SBCS, DMI_SBCS_SBACCESS128, "sbaccess128" },
- { DMI_SBCS, DMI_SBCS_SBACCESS64, "sbaccess64" },
- { DMI_SBCS, DMI_SBCS_SBACCESS32, "sbaccess32" },
- { DMI_SBCS, DMI_SBCS_SBACCESS16, "sbaccess16" },
- { DMI_SBCS, DMI_SBCS_SBACCESS8, "sbaccess8" },
+ { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
+ { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
+ { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
+ { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
+ { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
+ { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
+ { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
+ { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
+ { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
+
+ { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
+ { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
+ { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
+ { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
+ { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
+ { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
+ { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
+ { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
+ { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
+ { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
+ { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
+ { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
+ { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
+ { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
+ { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
+ { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
+ { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
+ { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
+
+ { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
+ { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
+ { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
+ { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
+
+ { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
+
+ { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
+ { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
+ { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
+ { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
+ { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
+ { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
+ { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
+ { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
+ { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
+ { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
+ { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
+ { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
+ { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
+ { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
};
text[0] = 0;
@@ -376,10 +397,9 @@ static void dump_field(int idle, const struct scan_field *field)
log_printf_lf(LOG_LVL_DEBUG,
__FILE__, __LINE__, "scan",
- "%db %di %s %08x @%02x -> %s %08x @%02x",
- field->num_bits, idle,
- op_string[out_op], out_data, out_address,
- status_string[in_op], in_data, in_address);
+ "%db %s %08x @%02x -> %s %08x @%02x; %di",
+ field->num_bits, op_string[out_op], out_data, out_address,
+ status_string[in_op], in_data, in_address, idle);
char out_text[500];
char in_text[500];
@@ -395,6 +415,10 @@ static void dump_field(int idle, const struct scan_field *field)
static void select_dmi(struct target *target)
{
+ if (bscan_tunnel_ir_width != 0) {
+ select_dmi_via_bscan(target);
+ return;
+ }
jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
}
@@ -404,6 +428,9 @@ static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
uint8_t in_value[4];
uint8_t out_value[4] = { 0 };
+ if (bscan_tunnel_ir_width != 0)
+ return dtmcontrol_scan_via_bscan(target, out);
+
buf_set_u32(out_value, 0, 32, out);
jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
@@ -458,6 +485,7 @@ static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
.out_value = out,
.in_value = in
};
+ riscv_bscan_tunneled_scan_context_t bscan_ctxt;
if (r->reset_delays_wait >= 0) {
r->reset_delays_wait--;
@@ -476,8 +504,18 @@ static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
- /* Assume dbus is already selected. */
- jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+ /* I wanted to place this code in a different function, but the way JTAG command
+ queueing works in the jtag handling functions, the scan fields either have to be
+ heap allocated, global/static, or else they need to stay on the stack until
+ the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
+ the best fit. Declaring stack based field values in a subsidiary function call wouldn't
+ work. */
+ if (bscan_tunnel_ir_width != 0) {
+ riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
+ } else {
+ /* Assume dbus is already selected. */
+ jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+ }
int idle_count = info->dmi_busy_delay;
if (exec)
@@ -489,25 +527,42 @@ static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
int retval = jtag_execute_queue();
if (retval != ERROR_OK) {
LOG_ERROR("dmi_scan failed jtag scan");
+ if (data_in)
+ *data_in = ~0;
return DMI_STATUS_FAILED;
}
+ if (bscan_tunnel_ir_width != 0) {
+ /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
+ buffer_shr(in, num_bytes, 1);
+ }
+
if (data_in)
*data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
if (address_in)
*address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
-
dump_field(idle_count, &field);
-
return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
}
-/* If dmi_busy_encountered is non-NULL, this function will use it to tell the
- * caller whether DMI was ever busy during this call. */
+/**
+ * @param data_in The data we received from the target.
+ * @param dmi_op The operation to perform (read/write/nop).
+ * @param dmi_busy_encountered
+ * If non-NULL, will be updated to reflect whether DMI busy was
+ * encountered while executing this operation or not.
+ * @param address The address argument to that operation.
+ * @param data_out The data to send to the target.
+ * @param exec When true, this scan will execute something, so extra RTI
+ * cycles may be added.
+ * @param ensure_success
+ * Scan a nop after the requested operation, ensuring the
+ * DMI operation succeeded.
+ */
static int dmi_op_timeout(struct target *target, uint32_t *data_in,
bool *dmi_busy_encountered, int dmi_op, uint32_t address,
- uint32_t data_out, int timeout_sec, bool exec)
+ uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
{
select_dmi(target);
@@ -558,34 +613,32 @@ static int dmi_op_timeout(struct target *target, uint32_t *data_in,
return ERROR_FAIL;
}
- /* This second loop ensures the request succeeded, and gets back data.
- * Note that NOP can result in a 'busy' result as well, but that would be
- * noticed on the next DMI access we do. */
- while (1) {
- status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
- false);
- if (status == DMI_STATUS_BUSY) {
- increase_dmi_busy_delay(target);
- } else if (status == DMI_STATUS_SUCCESS) {
- break;
- } else {
- LOG_ERROR("failed %s (NOP) at 0x%x, status=%d", op_name, address,
- status);
- return ERROR_FAIL;
- }
- if (time(NULL) - start > timeout_sec)
- return ERROR_TIMEOUT_REACHED;
- }
-
- if (status != DMI_STATUS_SUCCESS) {
- if (status == DMI_STATUS_FAILED || !data_in) {
- LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
- status);
- } else {
- LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
- op_name, address, *data_in, status);
+ if (ensure_success) {
+ /* This second loop ensures the request succeeded, and gets back data.
+ * Note that NOP can result in a 'busy' result as well, but that would be
+ * noticed on the next DMI access we do. */
+ while (1) {
+ status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
+ false);
+ if (status == DMI_STATUS_BUSY) {
+ increase_dmi_busy_delay(target);
+ if (dmi_busy_encountered)
+ *dmi_busy_encountered = true;
+ } else if (status == DMI_STATUS_SUCCESS) {
+ break;
+ } else {
+ if (data_in) {
+ LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
+ op_name, address, *data_in, status);
+ } else {
+ LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
+ status);
+ }
+ return ERROR_FAIL;
+ }
+ if (time(NULL) - start > timeout_sec)
+ return ERROR_TIMEOUT_REACHED;
}
- return ERROR_FAIL;
}
return ERROR_OK;
@@ -593,10 +646,10 @@ static int dmi_op_timeout(struct target *target, uint32_t *data_in,
static int dmi_op(struct target *target, uint32_t *data_in,
bool *dmi_busy_encountered, int dmi_op, uint32_t address,
- uint32_t data_out, bool exec)
+ uint32_t data_out, bool exec, bool ensure_success)
{
int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
- address, data_out, riscv_command_timeout_sec, exec);
+ address, data_out, riscv_command_timeout_sec, exec, ensure_success);
if (result == ERROR_TIMEOUT_REACHED) {
LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
"either really slow or broken. You could increase the "
@@ -609,32 +662,39 @@ static int dmi_op(struct target *target, uint32_t *data_in,
static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
{
- return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false);
+ return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
}
static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
{
- return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true);
+ return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
}
static int dmi_write(struct target *target, uint32_t address, uint32_t value)
{
- return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false);
+ return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
}
-static int dmi_write_exec(struct target *target, uint32_t address, uint32_t value)
+static int dmi_write_exec(struct target *target, uint32_t address,
+ uint32_t value, bool ensure_success)
{
- return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true);
+ return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
}
int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
bool authenticated, unsigned timeout_sec)
{
int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
- DMI_DMSTATUS, 0, timeout_sec, false);
+ DM_DMSTATUS, 0, timeout_sec, false, true);
if (result != ERROR_OK)
return result;
- if (authenticated && !get_field(*dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
+ int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
+ if (dmstatus_version != 2 && dmstatus_version != 3) {
+ LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (0.14), not "
+ "%d (dmstatus=0x%x). This error might be caused by a JTAG "
+ "signal issue. Try reducing the JTAG clock speed.",
+ get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
+ } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
LOG_ERROR("Debugger is not authenticated to target Debug Module. "
"(dmstatus=0x%x). Use `riscv authdata_read` and "
"`riscv authdata_write` commands to authenticate.", *dmstatus);
@@ -663,11 +723,11 @@ uint32_t abstract_register_size(unsigned width)
{
switch (width) {
case 32:
- return set_field(0, AC_ACCESS_REGISTER_SIZE, 2);
+ return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
case 64:
- return set_field(0, AC_ACCESS_REGISTER_SIZE, 3);
+ return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
case 128:
- return set_field(0, AC_ACCESS_REGISTER_SIZE, 4);
+ return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
default:
LOG_ERROR("Unsupported register width: %d", width);
return 0;
@@ -679,14 +739,14 @@ static int wait_for_idle(struct target *target, uint32_t *abstractcs)
RISCV013_INFO(info);
time_t start = time(NULL);
while (1) {
- if (dmi_read(target, abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
return ERROR_FAIL;
- if (get_field(*abstractcs, DMI_ABSTRACTCS_BUSY) == 0)
+ if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
return ERROR_OK;
if (time(NULL) - start > riscv_command_timeout_sec) {
- info->cmderr = get_field(*abstractcs, DMI_ABSTRACTCS_CMDERR);
+ info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
if (info->cmderr != CMDERR_NONE) {
const char *errors[8] = {
"none",
@@ -715,12 +775,12 @@ static int execute_abstract_command(struct target *target, uint32_t command)
{
RISCV013_INFO(info);
if (debug_level >= LOG_LVL_DEBUG) {
- switch (get_field(command, DMI_COMMAND_CMDTYPE)) {
+ switch (get_field(command, DM_COMMAND_CMDTYPE)) {
case 0:
LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
"transfer=%d, write=%d, regno=0x%x",
command,
- 8 << get_field(command, AC_ACCESS_REGISTER_SIZE),
+ 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
get_field(command, AC_ACCESS_REGISTER_TRANSFER),
get_field(command, AC_ACCESS_REGISTER_WRITE),
@@ -732,17 +792,17 @@ static int execute_abstract_command(struct target *target, uint32_t command)
}
}
- dmi_write_exec(target, DMI_COMMAND, command);
+ if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
+ return ERROR_FAIL;
uint32_t abstractcs = 0;
- wait_for_idle(target, &abstractcs);
+ int result = wait_for_idle(target, &abstractcs);
- info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
- if (info->cmderr != 0) {
+ info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
+ if (info->cmderr != 0 || result != ERROR_OK) {
LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
/* Clear the error. */
- dmi_write(target, DMI_ABSTRACTCS, set_field(0, DMI_ABSTRACTCS_CMDERR,
- info->cmderr));
+ dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
return ERROR_FAIL;
}
@@ -757,14 +817,14 @@ static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
unsigned offset = index * size_bits / 32;
switch (size_bits) {
default:
- LOG_ERROR("Unsupported size: %d", size_bits);
+ LOG_ERROR("Unsupported size: %d bits", size_bits);
return ~0;
case 64:
- dmi_read(target, &v, DMI_DATA0 + offset + 1);
+ dmi_read(target, &v, DM_DATA0 + offset + 1);
value |= ((uint64_t) v) << 32;
/* falls through */
case 32:
- dmi_read(target, &v, DMI_DATA0 + offset);
+ dmi_read(target, &v, DM_DATA0 + offset);
value |= v;
}
return value;
@@ -776,13 +836,13 @@ static int write_abstract_arg(struct target *target, unsigned index,
unsigned offset = index * size_bits / 32;
switch (size_bits) {
default:
- LOG_ERROR("Unsupported size: %d", size_bits);
+ LOG_ERROR("Unsupported size: %d bits", size_bits);
return ERROR_FAIL;
case 64:
- dmi_write(target, DMI_DATA0 + offset + 1, value >> 32);
+ dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
/* falls through */
case 32:
- dmi_write(target, DMI_DATA0 + offset, value);
+ dmi_write(target, DM_DATA0 + offset, value);
}
return ERROR_OK;
}
@@ -793,15 +853,17 @@ static int write_abstract_arg(struct target *target, unsigned index,
static uint32_t access_register_command(struct target *target, uint32_t number,
unsigned size, uint32_t flags)
{
- uint32_t command = set_field(0, DMI_COMMAND_CMDTYPE, 0);
+ uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
switch (size) {
case 32:
- command = set_field(command, AC_ACCESS_REGISTER_SIZE, 2);
+ command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
break;
case 64:
- command = set_field(command, AC_ACCESS_REGISTER_SIZE, 3);
+ command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
break;
default:
+ LOG_ERROR("%d-bit register %s not supported.", size,
+ gdb_regno_name(number));
assert(0);
}
@@ -821,6 +883,8 @@ static uint32_t access_register_command(struct target *target, uint32_t number,
assert(reg_info);
command = set_field(command, AC_ACCESS_REGISTER_REGNO,
0xc000 + reg_info->custom_number);
+ } else {
+ assert(0);
}
command |= flags;
@@ -839,6 +903,9 @@ static int register_read_abstract(struct target *target, uint64_t *value,
if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
!info->abstract_read_csr_supported)
return ERROR_FAIL;
+ /* The spec doesn't define abstract register numbers for vector registers. */
+ if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
+ return ERROR_FAIL;
uint32_t command = access_register_command(target, number, size,
AC_ACCESS_REGISTER_TRANSFER);
@@ -899,6 +966,45 @@ static int register_write_abstract(struct target *target, uint32_t number,
return ERROR_OK;
}
+/*
+ * Sets the AAMSIZE field of a memory access abstract command based on
+ * the width (bits).
+ */
+static uint32_t abstract_memory_size(unsigned width)
+{
+ switch (width) {
+ case 8:
+ return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
+ case 16:
+ return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
+ case 32:
+ return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
+ case 64:
+ return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
+ case 128:
+ return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
+ default:
+ LOG_ERROR("Unsupported memory width: %d", width);
+ return 0;
+ }
+}
+
+/*
+ * Creates a memory access abstract command.
+ */
+static uint32_t access_memory_command(struct target *target, bool virtual,
+ unsigned width, bool postincrement, bool write)
+{
+ uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
+ command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
+ command |= abstract_memory_size(width);
+ command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
+ postincrement);
+ command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
+
+ return command;
+}
+
static int examine_progbuf(struct target *target)
{
riscv013_info_t *info = get_info(target);
@@ -942,7 +1048,7 @@ static int examine_progbuf(struct target *target)
}
uint32_t written;
- if (dmi_read(target, &written, DMI_PROGBUF0) != ERROR_OK)
+ if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
return ERROR_FAIL;
if (written == (uint32_t) info->progbuf_address) {
LOG_INFO("progbuf is writable at 0x%" PRIx64,
@@ -958,8 +1064,58 @@ static int examine_progbuf(struct target *target)
return ERROR_OK;
}
+static int is_fpu_reg(uint32_t gdb_regno)
+{
+ return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
+ (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
+ (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
+ (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
+}
+
+static int is_vector_reg(uint32_t gdb_regno)
+{
+ return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
+ gdb_regno == GDB_REGNO_VSTART ||
+ gdb_regno == GDB_REGNO_VXSAT ||
+ gdb_regno == GDB_REGNO_VXRM ||
+ gdb_regno == GDB_REGNO_VL ||
+ gdb_regno == GDB_REGNO_VTYPE ||
+ gdb_regno == GDB_REGNO_VLENB;
+}
+
+static int prep_for_register_access(struct target *target, uint64_t *mstatus,
+ int regno)
+{
+ if (is_fpu_reg(regno) || is_vector_reg(regno)) {
+ if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
+ return ERROR_FAIL;
+ if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
+ if (register_write_direct(target, GDB_REGNO_MSTATUS,
+ set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
+ return ERROR_FAIL;
+ } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
+ if (register_write_direct(target, GDB_REGNO_MSTATUS,
+ set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ } else {
+ *mstatus = 0;
+ }
+ return ERROR_OK;
+}
+
+static int cleanup_after_register_access(struct target *target,
+ uint64_t mstatus, int regno)
+{
+ if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
+ (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
+ if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
+ return ERROR_FAIL;
+ return ERROR_OK;
+}
+
typedef enum {
- SPACE_DMI_DATA,
+ SPACE_DM_DATA,
SPACE_DMI_PROGBUF,
SPACE_DMI_RAM
} memory_space_t;
@@ -990,6 +1146,7 @@ static int scratch_reserve(struct target *target,
riscv013_info_t *info = get_info(target);
+ /* Option 1: See if data# registers can be used as the scratch memory */
if (info->dataaccess == 1) {
/* Sign extend dataaddr. */
scratch->hart_address = info->dataaddr;
@@ -1000,12 +1157,13 @@ static int scratch_reserve(struct target *target,
if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
info->datasize) {
- scratch->memory_space = SPACE_DMI_DATA;
+ scratch->memory_space = SPACE_DM_DATA;
scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
return ERROR_OK;
}
}
+ /* Option 2: See if progbuf can be used as the scratch memory */
if (examine_progbuf(target) != ERROR_OK)
return ERROR_FAIL;
@@ -1013,13 +1171,15 @@ static int scratch_reserve(struct target *target,
unsigned program_size = (program->instruction_count + 1) * 4;
scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
~(alignment - 1);
- if ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
- info->progbufsize) {
+ if ((info->progbuf_writable == YNM_YES) &&
+ ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
+ info->progbufsize)) {
scratch->memory_space = SPACE_DMI_PROGBUF;
scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
return ERROR_OK;
}
+ /* Option 3: User-configured memory area as scratch RAM */
if (target_alloc_working_area(target, size_bytes + alignment - 1,
&scratch->area) == ERROR_OK) {
scratch->hart_address = (scratch->area->address + alignment - 1) &
@@ -1048,26 +1208,26 @@ static int scratch_read64(struct target *target, scratch_mem_t *scratch,
{
uint32_t v;
switch (scratch->memory_space) {
- case SPACE_DMI_DATA:
- if (dmi_read(target, &v, DMI_DATA0 + scratch->debug_address) != ERROR_OK)
+ case SPACE_DM_DATA:
+ if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
return ERROR_FAIL;
*value = v;
- if (dmi_read(target, &v, DMI_DATA1 + scratch->debug_address) != ERROR_OK)
+ if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
return ERROR_FAIL;
*value |= ((uint64_t) v) << 32;
break;
case SPACE_DMI_PROGBUF:
- if (dmi_read(target, &v, DMI_PROGBUF0 + scratch->debug_address) != ERROR_OK)
+ if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
return ERROR_FAIL;
*value = v;
- if (dmi_read(target, &v, DMI_PROGBUF1 + scratch->debug_address) != ERROR_OK)
+ if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
return ERROR_FAIL;
*value |= ((uint64_t) v) << 32;
break;
case SPACE_DMI_RAM:
{
- uint8_t buffer[8];
- if (read_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
+ uint8_t buffer[8] = {0};
+ if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
return ERROR_FAIL;
*value = buffer[0] |
(((uint64_t) buffer[1]) << 8) |
@@ -1087,13 +1247,13 @@ static int scratch_write64(struct target *target, scratch_mem_t *scratch,
uint64_t value)
{
switch (scratch->memory_space) {
- case SPACE_DMI_DATA:
- dmi_write(target, DMI_DATA0 + scratch->debug_address, value);
- dmi_write(target, DMI_DATA1 + scratch->debug_address, value >> 32);
+ case SPACE_DM_DATA:
+ dmi_write(target, DM_DATA0 + scratch->debug_address, value);
+ dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
break;
case SPACE_DMI_PROGBUF:
- dmi_write(target, DMI_PROGBUF0 + scratch->debug_address, value);
- dmi_write(target, DMI_PROGBUF1 + scratch->debug_address, value >> 32);
+ dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
+ dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
break;
case SPACE_DMI_RAM:
{
@@ -1126,6 +1286,14 @@ static unsigned register_size(struct target *target, unsigned number)
return riscv_xlen(target);
}
+static bool has_sufficient_progbuf(struct target *target, unsigned size)
+{
+ RISCV013_INFO(info);
+ RISCV_INFO(r);
+
+ return info->progbufsize + r->impebreak >= size;
+}
+
/**
* Immediately write the new value to the requested register. This mechanism
* bypasses any caches.
@@ -1133,19 +1301,12 @@ static unsigned register_size(struct target *target, unsigned number)
static int register_write_direct(struct target *target, unsigned number,
uint64_t value)
{
- RISCV013_INFO(info);
- RISCV_INFO(r);
-
- LOG_DEBUG("{%d} reg[0x%x] <- 0x%" PRIx64, riscv_current_hartid(target),
- number, value);
+ LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
+ gdb_regno_name(number), value);
int result = register_write_abstract(target, number, value,
register_size(target, number));
- if (result == ERROR_OK && target->reg_cache) {
- struct reg *reg = &target->reg_cache->reg_list[number];
- buf_set_u64(reg->value, 0, reg->size, value);
- }
- if (result == ERROR_OK || info->progbufsize + r->impebreak < 2 ||
+ if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
!riscv_is_halted(target))
return result;
@@ -1156,6 +1317,10 @@ static int register_write_direct(struct target *target, unsigned number,
if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
return ERROR_FAIL;
+ uint64_t mstatus;
+ if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
+ return ERROR_FAIL;
+
scratch_mem_t scratch;
bool use_scratch = false;
if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
@@ -1180,6 +1345,10 @@ static int register_write_direct(struct target *target, unsigned number,
return ERROR_FAIL;
}
+ } else if (number == GDB_REGNO_VTYPE) {
+ riscv_program_insert(&program, csrr(S0, CSR_VL));
+ riscv_program_insert(&program, vsetvli(ZERO, S0, value));
+
} else {
if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
return ERROR_FAIL;
@@ -1189,6 +1358,15 @@ static int register_write_direct(struct target *target, unsigned number,
riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
else
riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
+ } else if (number == GDB_REGNO_VL) {
+ /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
+ * vsetvli and vsetvl instructions, and the fault-only-rst vector
+ * load instruction variants." */
+ riscv_reg_t vtype;
+ if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
+ return ERROR_FAIL;
} else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
riscv_program_csrw(&program, S0, number);
} else {
@@ -1207,6 +1385,9 @@ static int register_write_direct(struct target *target, unsigned number,
if (use_scratch)
scratch_release(target, &scratch);
+ if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
+ return ERROR_FAIL;
+
/* Restore S0. */
if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
return ERROR_FAIL;
@@ -1234,14 +1415,11 @@ static int register_read(struct target *target, uint64_t *value, uint32_t number
/** Actually read registers from the target right now. */
static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
{
- RISCV013_INFO(info);
- RISCV_INFO(r);
-
int result = register_read_abstract(target, value, number,
register_size(target, number));
if (result != ERROR_OK &&
- info->progbufsize + r->impebreak >= 2 &&
+ has_sufficient_progbuf(target, 2) &&
number > GDB_REGNO_XPR31) {
struct riscv_program program;
riscv_program_init(&program, target);
@@ -1249,21 +1427,17 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t
scratch_mem_t scratch;
bool use_scratch = false;
- uint64_t s0;
+ riscv_reg_t s0;
if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
return ERROR_FAIL;
/* Write program to move data into s0. */
uint64_t mstatus;
- if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
- if (register_read(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
- return ERROR_FAIL;
- if ((mstatus & MSTATUS_FS) == 0)
- if (register_write_direct(target, GDB_REGNO_MSTATUS,
- set_field(mstatus, MSTATUS_FS, 1)) != ERROR_OK)
- return ERROR_FAIL;
+ if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
+ return ERROR_FAIL;
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
&& riscv_xlen(target) < 64) {
/* There are no instructions to move all the bits from a
@@ -1289,7 +1463,7 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t
} else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
riscv_program_csrr(&program, S0, number);
} else {
- LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
+ LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
return ERROR_FAIL;
}
@@ -1308,10 +1482,8 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t
return ERROR_FAIL;
}
- if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
- (mstatus & MSTATUS_FS) == 0)
- if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
- return ERROR_FAIL;
+ if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
+ return ERROR_FAIL;
/* Restore S0. */
if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
@@ -1319,8 +1491,8 @@ static int register_read_direct(struct target *target, uint64_t *value, uint32_t
}
if (result == ERROR_OK) {
- LOG_DEBUG("{%d} reg[0x%x] = 0x%" PRIx64, riscv_current_hartid(target),
- number, *value);
+ LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
+ gdb_regno_name(number), *value);
}
return result;
@@ -1335,7 +1507,7 @@ int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
return ERROR_FAIL;
if (dmstatus)
*dmstatus = value;
- if (!get_field(value, DMI_DMSTATUS_AUTHBUSY))
+ if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
break;
if (time(NULL) - start > riscv_command_timeout_sec) {
LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
@@ -1360,6 +1532,36 @@ static void deinit_target(struct target *target)
info->version_specific = NULL;
}
+static int set_haltgroup(struct target *target, bool *supported)
+{
+ uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
+ if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
+ return ERROR_FAIL;
+ uint32_t read;
+ if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
+ return ERROR_FAIL;
+ *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
+ return ERROR_OK;
+}
+
+static int discover_vlenb(struct target *target, int hartid)
+{
+ RISCV_INFO(r);
+ riscv_reg_t vlenb;
+
+ if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
+ LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
+ target_name(target));
+ r->vlenb[hartid] = 0;
+ return ERROR_OK;
+ }
+ r->vlenb[hartid] = vlenb;
+
+ LOG_INFO("hart %d: Vector support with vlenb=%d", hartid, r->vlenb[hartid]);
+
+ return ERROR_OK;
+}
+
static int examine(struct target *target)
{
/* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
@@ -1382,43 +1584,50 @@ static int examine(struct target *target)
}
riscv013_info_t *info = get_info(target);
+ /* TODO: This won't be true if there are multiple DMs. */
+ info->index = target->coreid;
info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
/* Reset the Debug Module. */
dm013_info_t *dm = get_dm(target);
+ if (!dm)
+ return ERROR_FAIL;
if (!dm->was_reset) {
- dmi_write(target, DMI_DMCONTROL, 0);
- dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE);
+ dmi_write(target, DM_DMCONTROL, 0);
+ dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
dm->was_reset = true;
}
- dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO |
- DMI_DMCONTROL_HARTSELHI | DMI_DMCONTROL_DMACTIVE);
+ dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
+ DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
+ DM_DMCONTROL_HASEL);
uint32_t dmcontrol;
- if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
return ERROR_FAIL;
- if (!get_field(dmcontrol, DMI_DMCONTROL_DMACTIVE)) {
+ if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
dmcontrol);
return ERROR_FAIL;
}
+ dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
+
uint32_t dmstatus;
if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
return ERROR_FAIL;
LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
- if (get_field(dmstatus, DMI_DMSTATUS_VERSION) != 2) {
- LOG_ERROR("OpenOCD only supports Debug Module version 2, not %d "
- "(dmstatus=0x%x)", get_field(dmstatus, DMI_DMSTATUS_VERSION), dmstatus);
+ int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
+ if (dmstatus_version != 2 && dmstatus_version != 3) {
+ /* Error was already printed out in dmstatus_read(). */
return ERROR_FAIL;
}
uint32_t hartsel =
- (get_field(dmcontrol, DMI_DMCONTROL_HARTSELHI) <<
- DMI_DMCONTROL_HARTSELLO_LENGTH) |
- get_field(dmcontrol, DMI_DMCONTROL_HARTSELLO);
+ (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
+ DM_DMCONTROL_HARTSELLO_LENGTH) |
+ get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
info->hartsellen = 0;
while (hartsel & 1) {
info->hartsellen++;
@@ -1427,14 +1636,14 @@ static int examine(struct target *target)
LOG_DEBUG("hartsellen=%d", info->hartsellen);
uint32_t hartinfo;
- if (dmi_read(target, &hartinfo, DMI_HARTINFO) != ERROR_OK)
+ if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
return ERROR_FAIL;
- info->datasize = get_field(hartinfo, DMI_HARTINFO_DATASIZE);
- info->dataaccess = get_field(hartinfo, DMI_HARTINFO_DATAACCESS);
- info->dataaddr = get_field(hartinfo, DMI_HARTINFO_DATAADDR);
+ info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
+ info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
+ info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
- if (!get_field(dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
+ if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
LOG_ERROR("Debugger is not authenticated to target Debug Module. "
"(dmstatus=0x%x). Use `riscv authdata_read` and "
"`riscv authdata_write` commands to authenticate.", dmstatus);
@@ -1445,33 +1654,65 @@ static int examine(struct target *target)
return ERROR_OK;
}
- if (dmi_read(target, &info->sbcs, DMI_SBCS) != ERROR_OK)
+ if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
return ERROR_FAIL;
/* Check that abstract data registers are accessible. */
uint32_t abstractcs;
- if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
return ERROR_FAIL;
- info->datacount = get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT);
- info->progbufsize = get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE);
+ info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
+ info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
RISCV_INFO(r);
- r->impebreak = get_field(dmstatus, DMI_DMSTATUS_IMPEBREAK);
+ r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
- if (info->progbufsize + r->impebreak < 2) {
+ if (!has_sufficient_progbuf(target, 2)) {
LOG_WARNING("We won't be able to execute fence instructions on this "
"target. Memory may not always appear consistent. "
"(progbufsize=%d, impebreak=%d)", info->progbufsize,
r->impebreak);
}
+ if (info->progbufsize < 4 && riscv_enable_virtual) {
+ LOG_ERROR("set_enable_virtual is not available on this target. It "
+ "requires a program buffer size of at least 4. (progbufsize=%d) "
+ "Use `riscv set_enable_virtual off` to continue."
+ , info->progbufsize);
+ }
+
/* Before doing anything else we must first enumerate the harts. */
+ if (dm->hart_count < 0) {
+ for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
+ r->current_hartid = i;
+ if (riscv013_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint32_t s;
+ if (dmstatus_read(target, &s, true) != ERROR_OK)
+ return ERROR_FAIL;
+ if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
+ break;
+ dm->hart_count = i + 1;
+
+ if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
+ dmi_write(target, DM_DMCONTROL,
+ set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
+ }
+
+ LOG_DEBUG("Detected %d harts.", dm->hart_count);
+ }
+
+ if (dm->hart_count == 0) {
+ LOG_ERROR("No harts found!");
+ return ERROR_FAIL;
+ }
/* Don't call any riscv_* functions until after we've counted the number of
* cores and initialized registers. */
- for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
+ for (int i = 0; i < dm->hart_count; ++i) {
if (!riscv_rtos_enabled(target) && i != target->coreid)
continue;
@@ -1479,20 +1720,9 @@ static int examine(struct target *target)
if (riscv013_select_current_hart(target) != ERROR_OK)
return ERROR_FAIL;
- uint32_t s;
- if (dmstatus_read(target, &s, true) != ERROR_OK)
- return ERROR_FAIL;
- if (get_field(s, DMI_DMSTATUS_ANYNONEXISTENT))
- break;
- r->hart_count = i + 1;
-
- if (get_field(s, DMI_DMSTATUS_ANYHAVERESET))
- dmi_write(target, DMI_DMCONTROL,
- set_hartsel(DMI_DMCONTROL_DMACTIVE | DMI_DMCONTROL_ACKHAVERESET, i));
-
bool halted = riscv_is_halted(target);
if (!halted) {
- if (riscv013_halt_current_hart(target) != ERROR_OK) {
+ if (riscv013_halt_go(target) != ERROR_OK) {
LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
return ERROR_FAIL;
}
@@ -1513,6 +1743,11 @@ static int examine(struct target *target)
return ERROR_FAIL;
}
+ if (riscv_supports_extension(target, i, 'V')) {
+ if (discover_vlenb(target, i) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
/* Now init registers based on what we discovered. */
if (riscv_init_registers(target) != ERROR_OK)
return ERROR_FAIL;
@@ -1523,18 +1758,23 @@ static int examine(struct target *target)
r->misa[i]);
if (!halted)
- riscv013_resume_current_hart(target);
+ riscv013_step_or_resume_current_hart(target, false, false);
}
- LOG_DEBUG("Enumerated %d harts", r->hart_count);
+ target_set_examined(target);
- if (r->hart_count == 0) {
- LOG_ERROR("No harts found!");
- return ERROR_FAIL;
+ if (target->smp) {
+ bool haltgroup_supported;
+ if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
+ return ERROR_FAIL;
+ if (haltgroup_supported)
+ LOG_INFO("Core %d made part of halt group %d.", target->coreid,
+ target->smp);
+ else
+ LOG_INFO("Core %d could not be made part of halt group %d.",
+ target->coreid, target->smp);
}
- target_set_examined(target);
-
/* Some regression suites rely on seeing 'Examined RISC-V core' to know
* when they can connect with gdb/telnet.
* We will need to update those suites if we want to change that text. */
@@ -1556,7 +1796,7 @@ int riscv013_authdata_read(struct target *target, uint32_t *value)
if (wait_for_authbusy(target, NULL) != ERROR_OK)
return ERROR_FAIL;
- return dmi_read(target, value, DMI_AUTHDATA);
+ return dmi_read(target, value, DM_AUTHDATA);
}
int riscv013_authdata_write(struct target *target, uint32_t value)
@@ -1565,16 +1805,18 @@ int riscv013_authdata_write(struct target *target, uint32_t value)
if (wait_for_authbusy(target, &before) != ERROR_OK)
return ERROR_FAIL;
- dmi_write(target, DMI_AUTHDATA, value);
+ dmi_write(target, DM_AUTHDATA, value);
if (wait_for_authbusy(target, &after) != ERROR_OK)
return ERROR_FAIL;
- if (!get_field(before, DMI_DMSTATUS_AUTHENTICATED) &&
- get_field(after, DMI_DMSTATUS_AUTHENTICATED)) {
+ if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
+ get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
LOG_INFO("authdata_write resulted in successful authentication");
int result = ERROR_OK;
dm013_info_t *dm = get_dm(target);
+ if (!dm)
+ return ERROR_FAIL;
target_list_t *entry;
list_for_each_entry(entry, &dm->target_list, list) {
if (examine(entry->target) != ERROR_OK)
@@ -1586,6 +1828,183 @@ int riscv013_authdata_write(struct target *target, uint32_t value)
return ERROR_OK;
}
+static int riscv013_hart_count(struct target *target)
+{
+ dm013_info_t *dm = get_dm(target);
+ assert(dm);
+ return dm->hart_count;
+}
+
+static unsigned riscv013_data_bits(struct target *target)
+{
+ RISCV013_INFO(info);
+ /* TODO: Once there is a spec for discovering abstract commands, we can
+ * take those into account as well. For now we assume abstract commands
+ * support XLEN-wide accesses. */
+ if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
+ return riscv_xlen(target);
+
+ if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
+ return 128;
+ if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
+ return 64;
+ if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
+ return 32;
+ if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
+ return 16;
+ if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
+ return 8;
+
+ return riscv_xlen(target);
+}
+
+static int prep_for_vector_access(struct target *target, uint64_t *vtype,
+ uint64_t *vl, unsigned *debug_vl)
+{
+ RISCV_INFO(r);
+ /* TODO: this continuous save/restore is terrible for performance. */
+ /* Write vtype and vl. */
+ unsigned encoded_vsew;
+ switch (riscv_xlen(target)) {
+ case 32:
+ encoded_vsew = 2;
+ break;
+ case 64:
+ encoded_vsew = 3;
+ break;
+ default:
+ LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
+ return ERROR_FAIL;
+ }
+
+ /* Save vtype and vl. */
+ if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
+ return ERROR_FAIL;
+ if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
+ return ERROR_FAIL;
+ *debug_vl = DIV_ROUND_UP(r->vlenb[r->current_hartid] * 8,
+ riscv_xlen(target));
+ if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
+ return ERROR_FAIL;
+
+ return ERROR_OK;
+}
+
+static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
+ uint64_t vl)
+{
+ /* Restore vtype and vl. */
+ if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
+ return ERROR_FAIL;
+ if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
+ return ERROR_FAIL;
+ return ERROR_OK;
+}
+
+static int riscv013_get_register_buf(struct target *target,
+ uint8_t *value, int regno)
+{
+ assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
+
+ riscv_reg_t s0;
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint64_t mstatus;
+ if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint64_t vtype, vl;
+ unsigned debug_vl;
+ if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
+ return ERROR_FAIL;
+
+ unsigned vnum = regno - GDB_REGNO_V0;
+ unsigned xlen = riscv_xlen(target);
+
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+ riscv_program_insert(&program, vmv_x_s(S0, vnum));
+ riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
+
+ int result = ERROR_OK;
+ for (unsigned i = 0; i < debug_vl; i++) {
+ /* Executing the program might result in an exception if there is some
+ * issue with the vector implementation/instructions we're using. If that
+ * happens, attempt to restore as usual. We may have clobbered the
+ * vector register we tried to read already.
+ * For other failures, we just return error because things are probably
+ * so messed up that attempting to restore isn't going to help. */
+ result = riscv_program_exec(&program, target);
+ if (result == ERROR_OK) {
+ uint64_t v;
+ if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+ buf_set_u64(value, xlen * i, xlen, v);
+ } else {
+ break;
+ }
+ }
+
+ if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
+ return ERROR_FAIL;
+ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ return result;
+}
+
+static int riscv013_set_register_buf(struct target *target,
+ int regno, const uint8_t *value)
+{
+ assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
+
+ riscv_reg_t s0;
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint64_t mstatus;
+ if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint64_t vtype, vl;
+ unsigned debug_vl;
+ if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
+ return ERROR_FAIL;
+
+ unsigned vnum = regno - GDB_REGNO_V0;
+ unsigned xlen = riscv_xlen(target);
+
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+ riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
+ int result = ERROR_OK;
+ for (unsigned i = 0; i < debug_vl; i++) {
+ if (register_write_direct(target, GDB_REGNO_S0,
+ buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
+ return ERROR_FAIL;
+ result = riscv_program_exec(&program, target);
+ if (result != ERROR_OK)
+ break;
+ }
+
+ if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
+ return ERROR_FAIL;
+ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ return result;
+}
+
static int init_target(struct command_context *cmd_ctx,
struct target *target)
{
@@ -1594,13 +2013,16 @@ static int init_target(struct command_context *cmd_ctx,
generic_info->get_register = &riscv013_get_register;
generic_info->set_register = &riscv013_set_register;
+ generic_info->get_register_buf = &riscv013_get_register_buf;
+ generic_info->set_register_buf = &riscv013_set_register_buf;
generic_info->select_current_hart = &riscv013_select_current_hart;
generic_info->is_halted = &riscv013_is_halted;
- generic_info->halt_current_hart = &riscv013_halt_current_hart;
- generic_info->resume_current_hart = &riscv013_resume_current_hart;
+ generic_info->resume_go = &riscv013_resume_go;
generic_info->step_current_hart = &riscv013_step_current_hart;
generic_info->on_halt = &riscv013_on_halt;
- generic_info->on_resume = &riscv013_on_resume;
+ generic_info->resume_prep = &riscv013_resume_prep;
+ generic_info->halt_prep = &riscv013_halt_prep;
+ generic_info->halt_go = &riscv013_halt_go;
generic_info->on_step = &riscv013_on_step;
generic_info->halt_reason = &riscv013_halt_reason;
generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
@@ -1614,8 +2036,11 @@ static int init_target(struct command_context *cmd_ctx,
generic_info->authdata_write = &riscv013_authdata_write;
generic_info->dmi_read = &dmi_read;
generic_info->dmi_write = &dmi_write;
+ generic_info->read_memory = read_memory;
generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
generic_info->test_compliance = &riscv013_test_compliance;
+ generic_info->hart_count = &riscv013_hart_count;
+ generic_info->data_bits = &riscv013_data_bits;
generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
if (!generic_info->version_specific)
return ERROR_FAIL;
@@ -1647,7 +2072,7 @@ static int assert_reset(struct target *target)
select_dmi(target);
- uint32_t control_base = set_field(0, DMI_DMCONTROL_DMACTIVE, 1);
+ uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
if (target->rtos) {
/* There's only one target, and OpenOCD thinks each hart is a thread.
@@ -1662,25 +2087,34 @@ static int assert_reset(struct target *target)
continue;
control = set_hartsel(control_base, i);
- control = set_field(control, DMI_DMCONTROL_HALTREQ,
+ control = set_field(control, DM_DMCONTROL_HALTREQ,
target->reset_halt ? 1 : 0);
- dmi_write(target, DMI_DMCONTROL, control);
+ dmi_write(target, DM_DMCONTROL, control);
}
/* Assert ndmreset */
- control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
- dmi_write(target, DMI_DMCONTROL, control);
+ control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
+ dmi_write(target, DM_DMCONTROL, control);
} else {
/* Reset just this hart. */
uint32_t control = set_hartsel(control_base, r->current_hartid);
- control = set_field(control, DMI_DMCONTROL_HALTREQ,
+ control = set_field(control, DM_DMCONTROL_HALTREQ,
target->reset_halt ? 1 : 0);
- control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
- dmi_write(target, DMI_DMCONTROL, control);
+ control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
+ dmi_write(target, DM_DMCONTROL, control);
}
target->state = TARGET_RESET;
+ dm013_info_t *dm = get_dm(target);
+ if (!dm)
+ return ERROR_FAIL;
+
+ /* The DM might have gotten reset if OpenOCD called us in some reset that
+ * involves SRST being toggled. So clear our cache which may be out of
+ * date. */
+ memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
+
return ERROR_OK;
}
@@ -1692,9 +2126,9 @@ static int deassert_reset(struct target *target)
/* Clear the reset, but make sure haltreq is still set */
uint32_t control = 0;
- control = set_field(control, DMI_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
- control = set_field(control, DMI_DMCONTROL_DMACTIVE, 1);
- dmi_write(target, DMI_DMCONTROL,
+ control = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
+ control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
+ dmi_write(target, DM_DMCONTROL,
set_hartsel(control, r->current_hartid));
uint32_t dmstatus;
@@ -1706,7 +2140,7 @@ static int deassert_reset(struct target *target)
if (target->rtos) {
if (!riscv_hart_enabled(target, index))
continue;
- dmi_write(target, DMI_DMCONTROL,
+ dmi_write(target, DM_DMCONTROL,
set_hartsel(control, index));
} else {
index = r->current_hartid;
@@ -1716,10 +2150,10 @@ static int deassert_reset(struct target *target)
uint32_t expected_field;
if (target->reset_halt) {
operation = "halt";
- expected_field = DMI_DMSTATUS_ALLHALTED;
+ expected_field = DM_DMSTATUS_ALLHALTED;
} else {
operation = "run";
- expected_field = DMI_DMSTATUS_ALLRUNNING;
+ expected_field = DM_DMSTATUS_ALLRUNNING;
}
LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
while (1) {
@@ -1744,11 +2178,11 @@ static int deassert_reset(struct target *target)
}
target->state = TARGET_HALTED;
- if (get_field(dmstatus, DMI_DMSTATUS_ALLHAVERESET)) {
+ if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
/* Ack reset. */
- dmi_write(target, DMI_DMCONTROL,
+ dmi_write(target, DM_DMCONTROL,
set_hartsel(control, index) |
- DMI_DMCONTROL_ACKHAVERESET);
+ DM_DMCONTROL_ACKHAVERESET);
}
if (!target->rtos)
@@ -1758,33 +2192,6 @@ static int deassert_reset(struct target *target)
return ERROR_OK;
}
-/**
- * @par size in bytes
- */
-static void write_to_buf(uint8_t *buffer, uint64_t value, unsigned size)
-{
- switch (size) {
- case 8:
- buffer[7] = value >> 56;
- buffer[6] = value >> 48;
- buffer[5] = value >> 40;
- buffer[4] = value >> 32;
- /* falls through */
- case 4:
- buffer[3] = value >> 24;
- buffer[2] = value >> 16;
- /* falls through */
- case 2:
- buffer[1] = value >> 8;
- /* falls through */
- case 1:
- buffer[0] = value;
- break;
- default:
- assert(false);
- }
-}
-
static int execute_fence(struct target *target)
{
int old_hartid = riscv_current_hartid(target);
@@ -1805,6 +2212,10 @@ static int execute_fence(struct target *target)
if (!riscv_hart_enabled(target, i))
continue;
+ if (i == old_hartid)
+ /* Fence already executed for this hart */
+ continue;
+
riscv_set_current_hartid(target, i);
struct riscv_program program;
@@ -1830,7 +2241,21 @@ static void log_memory_access(target_addr_t address, uint64_t value,
char fmt[80];
sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
address, read ? "read" : "write", size_bytes * 2);
- value &= (((uint64_t) 0x1) << (size_bytes * 8)) - 1;
+ switch (size_bytes) {
+ case 1:
+ value &= 0xff;
+ break;
+ case 2:
+ value &= 0xffff;
+ break;
+ case 4:
+ value &= 0xffffffffUL;
+ break;
+ case 8:
+ break;
+ default:
+ assert(false);
+ }
LOG_DEBUG(fmt, value);
}
@@ -1840,28 +2265,16 @@ static int read_memory_bus_word(struct target *target, target_addr_t address,
uint32_t size, uint8_t *buffer)
{
uint32_t value;
- if (size > 12) {
- if (dmi_read(target, &value, DMI_SBDATA3) != ERROR_OK)
- return ERROR_FAIL;
- write_to_buf(buffer + 12, value, 4);
- log_memory_access(address + 12, value, 4, true);
- }
- if (size > 8) {
- if (dmi_read(target, &value, DMI_SBDATA2) != ERROR_OK)
- return ERROR_FAIL;
- write_to_buf(buffer + 8, value, 4);
- log_memory_access(address + 8, value, 4, true);
- }
- if (size > 4) {
- if (dmi_read(target, &value, DMI_SBDATA1) != ERROR_OK)
- return ERROR_FAIL;
- write_to_buf(buffer + 4, value, 4);
- log_memory_access(address + 4, value, 4, true);
+ int result;
+ static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
+ assert(size <= 16);
+ for (int i = (size - 1) / 4; i >= 0; i--) {
+ result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
+ if (result != ERROR_OK)
+ return result;
+ buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
+ log_memory_access(address + i * 4, value, MIN(size, 4), true);
}
- if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
- return ERROR_FAIL;
- write_to_buf(buffer, value, MIN(size, 4));
- log_memory_access(address, value, MIN(size, 4), true);
return ERROR_OK;
}
@@ -1869,15 +2282,15 @@ static uint32_t sb_sbaccess(unsigned size_bytes)
{
switch (size_bytes) {
case 1:
- return set_field(0, DMI_SBCS_SBACCESS, 0);
+ return set_field(0, DM_SBCS_SBACCESS, 0);
case 2:
- return set_field(0, DMI_SBCS_SBACCESS, 1);
+ return set_field(0, DM_SBCS_SBACCESS, 1);
case 4:
- return set_field(0, DMI_SBCS_SBACCESS, 2);
+ return set_field(0, DM_SBCS_SBACCESS, 2);
case 8:
- return set_field(0, DMI_SBCS_SBACCESS, 3);
+ return set_field(0, DM_SBCS_SBACCESS, 3);
case 16:
- return set_field(0, DMI_SBCS_SBACCESS, 4);
+ return set_field(0, DM_SBCS_SBACCESS, 4);
}
assert(0);
return 0; /* Make mingw happy. */
@@ -1886,15 +2299,15 @@ static uint32_t sb_sbaccess(unsigned size_bytes)
static target_addr_t sb_read_address(struct target *target)
{
RISCV013_INFO(info);
- unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
+ unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
target_addr_t address = 0;
uint32_t v;
if (sbasize > 32) {
- dmi_read(target, &v, DMI_SBADDRESS1);
+ dmi_read(target, &v, DM_SBADDRESS1);
address |= v;
address <<= 32;
}
- dmi_read(target, &v, DMI_SBADDRESS0);
+ dmi_read(target, &v, DM_SBADDRESS0);
address |= v;
return address;
}
@@ -1902,24 +2315,24 @@ static target_addr_t sb_read_address(struct target *target)
static int sb_write_address(struct target *target, target_addr_t address)
{
RISCV013_INFO(info);
- unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
+ unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
/* There currently is no support for >64-bit addresses in OpenOCD. */
if (sbasize > 96)
- dmi_write(target, DMI_SBADDRESS3, 0);
+ dmi_write(target, DM_SBADDRESS3, 0);
if (sbasize > 64)
- dmi_write(target, DMI_SBADDRESS2, 0);
+ dmi_write(target, DM_SBADDRESS2, 0);
if (sbasize > 32)
- dmi_write(target, DMI_SBADDRESS1, address >> 32);
- return dmi_write(target, DMI_SBADDRESS0, address);
+ dmi_write(target, DM_SBADDRESS1, address >> 32);
+ return dmi_write(target, DM_SBADDRESS0, address);
}
static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
{
time_t start = time(NULL);
while (1) {
- if (dmi_read(target, sbcs, DMI_SBCS) != ERROR_OK)
+ if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
return ERROR_FAIL;
- if (!get_field(*sbcs, DMI_SBCS_SBBUSY))
+ if (!get_field(*sbcs, DM_SBCS_SBBUSY))
return ERROR_OK;
if (time(NULL) - start > riscv_command_timeout_sec) {
LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
@@ -1930,9 +2343,45 @@ static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
}
}
+static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
+{
+ if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
+ /* Read DCSR */
+ uint64_t dcsr;
+ if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Read and save MSTATUS */
+ if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
+ return ERROR_FAIL;
+ *mstatus_old = *mstatus;
+
+ /* If we come from m-mode with mprv set, we want to keep mpp */
+ if (get_field(dcsr, DCSR_PRV) < 3) {
+ /* MPP = PRIV */
+ *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
+
+ /* MPRV = 1 */
+ *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
+
+ /* Write MSTATUS */
+ if (*mstatus != *mstatus_old)
+ if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+
+ return ERROR_OK;
+}
+
static int read_memory_bus_v0(struct target *target, target_addr_t address,
- uint32_t size, uint32_t count, uint8_t *buffer)
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
{
+ if (size != increment) {
+ LOG_ERROR("sba v0 reads only support size==increment");
+ return ERROR_NOT_IMPLEMENTED;
+ }
+
LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
TARGET_PRIxADDR, size, count, address);
uint8_t *t_buffer = buffer;
@@ -1940,29 +2389,29 @@ static int read_memory_bus_v0(struct target *target, target_addr_t address,
riscv_addr_t fin_addr = address + (count * size);
uint32_t access = 0;
- const int DMI_SBCS_SBSINGLEREAD_OFFSET = 20;
- const uint32_t DMI_SBCS_SBSINGLEREAD = (0x1U << DMI_SBCS_SBSINGLEREAD_OFFSET);
+ const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
+ const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
- const int DMI_SBCS_SBAUTOREAD_OFFSET = 15;
- const uint32_t DMI_SBCS_SBAUTOREAD = (0x1U << DMI_SBCS_SBAUTOREAD_OFFSET);
+ const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
+ const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
/* ww favorise one off reading if there is an issue */
if (count == 1) {
for (uint32_t i = 0; i < count; i++) {
- if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
+ if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
return ERROR_FAIL;
- dmi_write(target, DMI_SBADDRESS0, cur_addr);
+ dmi_write(target, DM_SBADDRESS0, cur_addr);
/* size/2 matching the bit access of the spec 0.13 */
- access = set_field(access, DMI_SBCS_SBACCESS, size/2);
- access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
+ access = set_field(access, DM_SBCS_SBACCESS, size/2);
+ access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
- dmi_write(target, DMI_SBCS, access);
+ dmi_write(target, DM_SBCS, access);
/* 3) read */
uint32_t value;
- if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
+ if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
return ERROR_FAIL;
LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
- write_to_buf(t_buffer, value, size);
+ buf_set_u32(t_buffer, 0, 8 * size, value);
t_buffer += size;
cur_addr += size;
}
@@ -1971,36 +2420,36 @@ static int read_memory_bus_v0(struct target *target, target_addr_t address,
/* has to be the same size if we want to read a block */
LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
- if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
+ if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
return ERROR_FAIL;
/* set current address */
- dmi_write(target, DMI_SBADDRESS0, cur_addr);
+ dmi_write(target, DM_SBADDRESS0, cur_addr);
/* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
* size/2 matching the bit access of the spec 0.13 */
- access = set_field(access, DMI_SBCS_SBACCESS, size/2);
- access = set_field(access, DMI_SBCS_SBAUTOREAD, 1);
- access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
- access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
+ access = set_field(access, DM_SBCS_SBACCESS, size/2);
+ access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
+ access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
+ access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
LOG_DEBUG("\r\naccess: 0x%08x", access);
- dmi_write(target, DMI_SBCS, access);
+ dmi_write(target, DM_SBCS, access);
while (cur_addr < fin_addr) {
LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
PRIx64, size, count, cur_addr);
/* read */
uint32_t value;
- if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
+ if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
return ERROR_FAIL;
- write_to_buf(t_buffer, value, size);
+ buf_set_u32(t_buffer, 0, 8 * size, value);
cur_addr += size;
t_buffer += size;
/* if we are reaching last address, we must clear autoread */
if (cur_addr == fin_addr && count != 1) {
- dmi_write(target, DMI_SBCS, 0);
- if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
+ dmi_write(target, DM_SBCS, 0);
+ if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
return ERROR_FAIL;
- write_to_buf(t_buffer, value, size);
+ buf_set_u32(t_buffer, 0, 8 * size, value);
}
}
@@ -2011,21 +2460,30 @@ static int read_memory_bus_v0(struct target *target, target_addr_t address,
* Read the requested memory using the system bus interface.
*/
static int read_memory_bus_v1(struct target *target, target_addr_t address,
- uint32_t size, uint32_t count, uint8_t *buffer)
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
{
+ if (increment != size && increment != 0) {
+ LOG_ERROR("sba v1 reads only support increment of size or 0");
+ return ERROR_NOT_IMPLEMENTED;
+ }
+
RISCV013_INFO(info);
target_addr_t next_address = address;
target_addr_t end_address = address + count * size;
while (next_address < end_address) {
- uint32_t sbcs = set_field(0, DMI_SBCS_SBREADONADDR, 1);
- sbcs |= sb_sbaccess(size);
- sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
- sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, count > 1);
- dmi_write(target, DMI_SBCS, sbcs);
+ uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
+ sbcs_write |= sb_sbaccess(size);
+ if (increment == size)
+ sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
+ if (count > 1)
+ sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
+ if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
+ return ERROR_FAIL;
/* This address write will trigger the first read. */
- sb_write_address(target, next_address);
+ if (sb_write_address(target, next_address) != ERROR_OK)
+ return ERROR_FAIL;
if (info->bus_master_read_delay) {
jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
@@ -2035,35 +2493,98 @@ static int read_memory_bus_v1(struct target *target, target_addr_t address,
}
}
+ /* First value has been read, and is waiting for us to issue a DMI read
+ * to get it. */
+
+ static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
+ assert(size <= 16);
+ target_addr_t next_read = address - 1;
for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
- read_memory_bus_word(target, address + i * size, size,
- buffer + i * size);
+ for (int j = (size - 1) / 4; j >= 0; j--) {
+ uint32_t value;
+ unsigned attempt = 0;
+ while (1) {
+ if (attempt++ > 100) {
+ LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
+ next_read);
+ return ERROR_FAIL;
+ }
+ dmi_status_t status = dmi_scan(target, NULL, &value,
+ DMI_OP_READ, sbdata[j], 0, false);
+ if (status == DMI_STATUS_BUSY)
+ increase_dmi_busy_delay(target);
+ else if (status == DMI_STATUS_SUCCESS)
+ break;
+ else
+ return ERROR_FAIL;
+ }
+ if (next_read != address - 1) {
+ buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
+ log_memory_access(next_read, value, MIN(size, 4), true);
+ }
+ next_read = address + i * size + j * 4;
+ }
}
- sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, 0);
- dmi_write(target, DMI_SBCS, sbcs);
+ uint32_t sbcs_read = 0;
+ if (count > 1) {
+ uint32_t value;
+ unsigned attempt = 0;
+ while (1) {
+ if (attempt++ > 100) {
+ LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
+ next_read);
+ return ERROR_FAIL;
+ }
+ dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
+ if (status == DMI_STATUS_BUSY)
+ increase_dmi_busy_delay(target);
+ else if (status == DMI_STATUS_SUCCESS)
+ break;
+ else
+ return ERROR_FAIL;
+ }
+ buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
+ log_memory_access(next_read, value, MIN(size, 4), true);
- read_memory_bus_word(target, address + (count - 1) * size, size,
- buffer + (count - 1) * size);
+ /* "Writes to sbcs while sbbusy is high result in undefined behavior.
+ * A debugger must not write to sbcs until it reads sbbusy as 0." */
+ if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
+ return ERROR_FAIL;
- if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
- return ERROR_FAIL;
+ sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
+ if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ /* Read the last word, after we disabled sbreadondata if necessary. */
+ if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
+ !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
+ if (read_memory_bus_word(target, address + (count - 1) * size, size,
+ buffer + (count - 1) * size) != ERROR_OK)
+ return ERROR_FAIL;
- if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
+ if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
/* We read while the target was busy. Slow down and try again. */
- dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
+ if (dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR) != ERROR_OK)
+ return ERROR_FAIL;
next_address = sb_read_address(target);
info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
continue;
}
- unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
+ unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
if (error == 0) {
next_address = end_address;
} else {
/* Some error indicating the bus access failed, but not because of
* something we did wrong. */
- dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
+ if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
+ return ERROR_FAIL;
return ERROR_FAIL;
}
}
@@ -2086,21 +2607,152 @@ static int batch_run(const struct target *target, struct riscv_batch *batch)
return riscv_batch_run(batch);
}
+/*
+ * Performs a memory read using memory access abstract commands. The read sizes
+ * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
+ * aamsize fields in the memory access abstract command.
+ */
+static int read_memory_abstract(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
+{
+ if (size != increment) {
+ LOG_ERROR("abstract command reads only support size==increment");
+ return ERROR_NOT_IMPLEMENTED;
+ }
+
+ int result = ERROR_OK;
+
+ LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
+ size, address);
+
+ memset(buffer, 0, count * size);
+
+ /* Convert the size (bytes) to width (bits) */
+ unsigned width = size << 3;
+ if (width > 64) {
+ /* TODO: Add 128b support if it's ever used. Involves modifying
+ read/write_abstract_arg() to work on two 64b values. */
+ LOG_ERROR("Unsupported size: %d bits", size);
+ return ERROR_FAIL;
+ }
+
+ /* Create the command (physical address, postincrement, read) */
+ uint32_t command = access_memory_command(target, false, width, true, false);
+
+ /* Execute the reads */
+ uint8_t *p = buffer;
+ bool updateaddr = true;
+ unsigned width32 = (width + 31) / 32 * 32;
+ for (uint32_t c = 0; c < count; c++) {
+ /* Only update the address initially and let postincrement update it */
+ if (updateaddr) {
+ /* Set arg1 to the address: address + c * size */
+ result = write_abstract_arg(target, 1, address, riscv_xlen(target));
+ if (result != ERROR_OK) {
+ LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
+ return result;
+ }
+ }
+
+ /* Execute the command */
+ result = execute_abstract_command(target, command);
+ if (result != ERROR_OK) {
+ LOG_ERROR("Failed to execute command read_memory_abstract().");
+ return result;
+ }
+
+ /* Copy arg0 to buffer (rounded width up to nearest 32) */
+ riscv_reg_t value = read_abstract_arg(target, 0, width32);
+ buf_set_u64(p, 0, 8 * size, value);
+
+ updateaddr = false;
+ p += size;
+ }
+
+ return result;
+}
+
+/*
+ * Performs a memory write using memory access abstract commands. The write
+ * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
+ * byte aamsize fields in the memory access abstract command.
+ */
+static int write_memory_abstract(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ int result = ERROR_OK;
+
+ LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
+ size, address);
+
+ /* Convert the size (bytes) to width (bits) */
+ unsigned width = size << 3;
+ if (width > 64) {
+ /* TODO: Add 128b support if it's ever used. Involves modifying
+ read/write_abstract_arg() to work on two 64b values. */
+ LOG_ERROR("Unsupported size: %d bits", width);
+ return ERROR_FAIL;
+ }
+
+ /* Create the command (physical address, postincrement, write) */
+ uint32_t command = access_memory_command(target, false, width, true, true);
+
+ /* Execute the writes */
+ const uint8_t *p = buffer;
+ bool updateaddr = true;
+ for (uint32_t c = 0; c < count; c++) {
+ /* Move data to arg0 */
+ riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
+ result = write_abstract_arg(target, 0, value, riscv_xlen(target));
+ if (result != ERROR_OK) {
+ LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
+ return result;
+ }
+
+ /* Only update the address initially and let postincrement update it */
+ if (updateaddr) {
+ /* Set arg1 to the address: address + c * size */
+ result = write_abstract_arg(target, 1, address, riscv_xlen(target));
+ if (result != ERROR_OK) {
+ LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
+ return result;
+ }
+ }
+
+ /* Execute the command */
+ result = execute_abstract_command(target, command);
+ if (result != ERROR_OK) {
+ LOG_ERROR("Failed to execute command write_memory_abstract().");
+ return result;
+ }
+
+ updateaddr = false;
+ p += size;
+ }
+
+ return result;
+}
+
/**
* Read the requested memory, taking care to execute every read exactly once,
* even if cmderr=busy is encountered.
*/
static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
- uint32_t size, uint32_t count, uint8_t *buffer)
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
{
RISCV013_INFO(info);
int result = ERROR_OK;
- /* Write address to S0, and execute buffer. */
+ /* Write address to S0. */
result = register_write_direct(target, GDB_REGNO_S0, address);
if (result != ERROR_OK)
- goto error;
+ return result;
+
+ if (increment == 0 &&
+ register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
+ return ERROR_FAIL;
+
uint32_t command = access_register_command(target, GDB_REGNO_S1,
riscv_xlen(target),
AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
@@ -2108,32 +2760,30 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
return ERROR_FAIL;
/* First read has just triggered. Result is in s1. */
-
if (count == 1) {
uint64_t value;
if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
return ERROR_FAIL;
- write_to_buf(buffer, value, size);
+ buf_set_u64(buffer, 0, 8 * size, value);
log_memory_access(address, value, size, true);
return ERROR_OK;
}
- if (dmi_write(target, DMI_ABSTRACTAUTO,
- 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
+ if (dmi_write(target, DM_ABSTRACTAUTO,
+ 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
goto error;
/* Read garbage from dmi_data0, which triggers another execution of the
* program. Now dmi_data0 contains the first good result, and s1 the next
* memory value. */
- if (dmi_read_exec(target, NULL, DMI_DATA0) != ERROR_OK)
+ if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
goto error;
/* read_addr is the next address that the hart will read from, which is the
* value in s0. */
- riscv_addr_t read_addr = address + 2 * size;
- riscv_addr_t fin_addr = address + (count * size);
- while (read_addr < fin_addr) {
- LOG_DEBUG("read_addr=0x%" PRIx64 ", fin_addr=0x%" PRIx64, read_addr,
- fin_addr);
+ unsigned index = 2;
+ while (index < count) {
+ riscv_addr_t read_addr = address + index * increment;
+ LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
/* The pipeline looks like this:
* memory -> s1 -> dm_data0 -> debugger
* Right now:
@@ -2142,15 +2792,16 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
* dm_data0 contains[read_addr-size*2]
*/
- LOG_DEBUG("creating burst to read from 0x%" PRIx64
- " up to 0x%" PRIx64, read_addr, fin_addr);
- assert(read_addr >= address && read_addr < fin_addr);
struct riscv_batch *batch = riscv_batch_alloc(target, 32,
info->dmi_busy_delay + info->ac_busy_delay);
+ if (!batch)
+ return ERROR_FAIL;
- size_t reads = 0;
- for (riscv_addr_t addr = read_addr; addr < fin_addr; addr += size) {
- riscv_batch_add_dmi_read(batch, DMI_DATA0);
+ unsigned reads = 0;
+ for (unsigned j = index; j < count; j++) {
+ if (size > 4)
+ riscv_batch_add_dmi_read(batch, DM_DATA1);
+ riscv_batch_add_dmi_read(batch, DM_DATA0);
reads++;
if (riscv_batch_full(batch))
@@ -2163,19 +2814,19 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
* and update our copy of cmderr. If we see that DMI is busy here,
* dmi_busy_delay will be incremented. */
uint32_t abstractcs;
- if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
return ERROR_FAIL;
- while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
- if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
+ if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
return ERROR_FAIL;
- info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
+ info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
- riscv_addr_t next_read_addr;
+ unsigned next_index;
unsigned ignore_last = 0;
switch (info->cmderr) {
case CMDERR_NONE:
LOG_DEBUG("successful (partial?) memory read");
- next_read_addr = read_addr + reads * size;
+ next_index = index + reads;
break;
case CMDERR_BUSY:
LOG_DEBUG("memory read resulted in busy response");
@@ -2183,35 +2834,49 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
increase_ac_busy_delay(target);
riscv013_clear_abstract_error(target);
- dmi_write(target, DMI_ABSTRACTAUTO, 0);
+ dmi_write(target, DM_ABSTRACTAUTO, 0);
- uint32_t dmi_data0;
+ uint32_t dmi_data0, dmi_data1 = 0;
/* This is definitely a good version of the value that we
* attempted to read when we discovered that the target was
* busy. */
- if (dmi_read(target, &dmi_data0, DMI_DATA0) != ERROR_OK) {
+ if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
+ riscv_batch_free(batch);
+ goto error;
+ }
+ if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
riscv_batch_free(batch);
goto error;
}
/* See how far we got, clobbering dmi_data0. */
- result = register_read_direct(target, &next_read_addr,
- GDB_REGNO_S0);
+ if (increment == 0) {
+ uint64_t counter;
+ result = register_read_direct(target, &counter, GDB_REGNO_S2);
+ next_index = counter;
+ } else {
+ uint64_t next_read_addr;
+ result = register_read_direct(target, &next_read_addr,
+ GDB_REGNO_S0);
+ next_index = (next_read_addr - address) / increment;
+ }
if (result != ERROR_OK) {
riscv_batch_free(batch);
goto error;
}
- write_to_buf(buffer + next_read_addr - 2 * size - address, dmi_data0, size);
- log_memory_access(next_read_addr - 2 * size, dmi_data0, size, true);
+
+ uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
+ buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
+ log_memory_access(address + (next_index - 2) * size, value64, size, true);
/* Restore the command, and execute it.
- * Now DMI_DATA0 contains the next value just as it would if no
+ * Now DM_DATA0 contains the next value just as it would if no
* error had occurred. */
- dmi_write_exec(target, DMI_COMMAND, command);
- next_read_addr += size;
+ dmi_write_exec(target, DM_COMMAND, command, true);
+ next_index++;
- dmi_write(target, DMI_ABSTRACTAUTO,
- 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
+ dmi_write(target, DM_ABSTRACTAUTO,
+ 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
ignore_last = 1;
@@ -2226,16 +2891,18 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
/* Now read whatever we got out of the batch. */
dmi_status_t status = DMI_STATUS_SUCCESS;
- for (size_t i = 0; i < reads; i++) {
- riscv_addr_t receive_addr = read_addr + (i-2) * size;
- assert(receive_addr < address + size * count);
- if (receive_addr < address)
- continue;
- if (receive_addr > next_read_addr - (3 + ignore_last) * size)
+ unsigned read = 0;
+ assert(index >= 2);
+ for (unsigned j = index - 2; j < index + reads; j++) {
+ assert(j < count);
+ LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
+ index, reads, next_index, ignore_last, j);
+ if (j + 3 + ignore_last > next_index)
break;
- uint64_t dmi_out = riscv_batch_get_dmi_read(batch, i);
- status = get_field(dmi_out, DTM_DMI_OP);
+ status = riscv_batch_get_dmi_read_op(batch, read);
+ uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
+ read++;
if (status != DMI_STATUS_SUCCESS) {
/* If we're here because of busy count, dmi_busy_delay will
* already have been increased and busy state will have been
@@ -2251,28 +2918,41 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
result = ERROR_FAIL;
goto error;
}
- uint32_t value = get_field(dmi_out, DTM_DMI_DATA);
- riscv_addr_t offset = receive_addr - address;
- write_to_buf(buffer + offset, value, size);
- log_memory_access(receive_addr, value, size, true);
-
- receive_addr += size;
+ if (size > 4) {
+ status = riscv_batch_get_dmi_read_op(batch, read);
+ if (status != DMI_STATUS_SUCCESS) {
+ LOG_WARNING("Batch memory read encountered DMI error %d. "
+ "Falling back on slower reads.", status);
+ riscv_batch_free(batch);
+ result = ERROR_FAIL;
+ goto error;
+ }
+ value <<= 32;
+ value |= riscv_batch_get_dmi_read_data(batch, read);
+ read++;
+ }
+ riscv_addr_t offset = j * size;
+ buf_set_u64(buffer + offset, 0, 8 * size, value);
+ log_memory_access(address + j * increment, value, size, true);
}
- read_addr = next_read_addr;
+ index = next_index;
riscv_batch_free(batch);
}
- dmi_write(target, DMI_ABSTRACTAUTO, 0);
+ dmi_write(target, DM_ABSTRACTAUTO, 0);
if (count > 1) {
/* Read the penultimate word. */
- uint32_t value;
- if (dmi_read(target, &value, DMI_DATA0) != ERROR_OK)
+ uint32_t dmi_data0, dmi_data1 = 0;
+ if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
+ return ERROR_FAIL;
+ if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
return ERROR_FAIL;
- write_to_buf(buffer + size * (count-2), value, size);
- log_memory_access(address + size * (count-2), value, size, true);
+ uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
+ buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
+ log_memory_access(address + size * (count - 2), value64, size, true);
}
/* Read the last word. */
@@ -2280,23 +2960,100 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
result = register_read_direct(target, &value, GDB_REGNO_S1);
if (result != ERROR_OK)
goto error;
- write_to_buf(buffer + size * (count-1), value, size);
+ buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
log_memory_access(address + size * (count-1), value, size, true);
return ERROR_OK;
error:
- dmi_write(target, DMI_ABSTRACTAUTO, 0);
+ dmi_write(target, DM_ABSTRACTAUTO, 0);
return result;
}
+/* Only need to save/restore one GPR to read a single word, and the progbuf
+ * program doesn't need to increment. */
+static int read_memory_progbuf_one(struct target *target, target_addr_t address,
+ uint32_t size, uint8_t *buffer)
+{
+ uint64_t mstatus = 0;
+ uint64_t mstatus_old = 0;
+ if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint64_t s0;
+
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Write the program (load, increment) */
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+ if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
+ riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
+ switch (size) {
+ case 1:
+ riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
+ break;
+ case 2:
+ riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
+ break;
+ case 4:
+ riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
+ break;
+ case 8:
+ riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
+ break;
+ default:
+ LOG_ERROR("Unsupported size: %d", size);
+ return ERROR_FAIL;
+ }
+ if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
+ riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
+
+ if (riscv_program_ebreak(&program) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_program_write(&program) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Write address to S0, and execute buffer. */
+ if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
+ return ERROR_FAIL;
+ uint32_t command = access_register_command(target, GDB_REGNO_S0,
+ riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
+ AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
+ if (execute_abstract_command(target, command) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint64_t value;
+ if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+ buf_set_u64(buffer, 0, 8 * size, value);
+ log_memory_access(address, value, size, true);
+
+ if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Restore MSTATUS */
+ if (mstatus != mstatus_old)
+ if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
+ return ERROR_FAIL;
+
+ return ERROR_OK;
+}
+
/**
* Read the requested memory, silently handling memory access errors.
*/
static int read_memory_progbuf(struct target *target, target_addr_t address,
- uint32_t size, uint32_t count, uint8_t *buffer)
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
{
+ if (riscv_xlen(target) < size * 8) {
+ LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
+ riscv_xlen(target), size * 8);
+ return ERROR_FAIL;
+ }
+
int result = ERROR_OK;
LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
@@ -2306,21 +3063,35 @@ static int read_memory_progbuf(struct target *target, target_addr_t address,
memset(buffer, 0, count*size);
+ if (execute_fence(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (count == 1)
+ return read_memory_progbuf_one(target, address, size, buffer);
+
+ uint64_t mstatus = 0;
+ uint64_t mstatus_old = 0;
+ if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
+ return ERROR_FAIL;
+
/* s0 holds the next address to write to
* s1 holds the next data value to write
+ * s2 is a counter in case increment is 0
*/
- uint64_t s0, s1;
+ uint64_t s0, s1, s2;
if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
return ERROR_FAIL;
if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
return ERROR_FAIL;
-
- if (execute_fence(target) != ERROR_OK)
+ if (increment == 0 && register_read(target, &s2, GDB_REGNO_S1) != ERROR_OK)
return ERROR_FAIL;
/* Write the program (load, increment) */
struct riscv_program program;
riscv_program_init(&program, target);
+ if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
+ riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
+
switch (size) {
case 1:
riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
@@ -2331,39 +3102,48 @@ static int read_memory_progbuf(struct target *target, target_addr_t address,
case 4:
riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
break;
+ case 8:
+ riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
default:
LOG_ERROR("Unsupported size: %d", size);
return ERROR_FAIL;
}
- riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
+
+ if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
+ riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
+ if (increment == 0)
+ riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
+ else
+ riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
if (riscv_program_ebreak(&program) != ERROR_OK)
return ERROR_FAIL;
- riscv_program_write(&program);
+ if (riscv_program_write(&program) != ERROR_OK)
+ return ERROR_FAIL;
- result = read_memory_progbuf_inner(target, address, size, count, buffer);
+ result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
if (result != ERROR_OK) {
/* The full read did not succeed, so we will try to read each word individually. */
/* This will not be fast, but reading outside actual memory is a special case anyway. */
/* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
target_addr_t address_i = address;
- uint32_t size_i = size;
uint32_t count_i = 1;
uint8_t *buffer_i = buffer;
- for (uint32_t i = 0; i < count; i++, address_i += size_i, buffer_i += size_i) {
+ for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
+ keep_alive();
/* TODO: This is much slower than it needs to be because we end up
* writing the address to read for every word we read. */
- result = read_memory_progbuf_inner(target, address_i, size_i, count_i, buffer_i);
+ result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
/* The read of a single word failed, so we will just return 0 for that instead */
if (result != ERROR_OK) {
LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
- size_i, address_i);
+ size, address_i);
- uint64_t value_i = 0;
- write_to_buf(buffer_i, value_i, size_i);
+ buf_set_u64(buffer_i, 0, 8 * size, 0);
}
}
result = ERROR_OK;
@@ -2371,32 +3151,47 @@ static int read_memory_progbuf(struct target *target, target_addr_t address,
riscv_set_register(target, GDB_REGNO_S0, s0);
riscv_set_register(target, GDB_REGNO_S1, s1);
+ if (increment == 0)
+ riscv_set_register(target, GDB_REGNO_S2, s2);
+
+ /* Restore MSTATUS */
+ if (mstatus != mstatus_old)
+ if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
+ return ERROR_FAIL;
+
return result;
}
static int read_memory(struct target *target, target_addr_t address,
- uint32_t size, uint32_t count, uint8_t *buffer)
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
{
- RISCV013_INFO(info);
- if (info->progbufsize >= 2 && !riscv_prefer_sba)
- return read_memory_progbuf(target, address, size, count, buffer);
+ if (count == 0)
+ return ERROR_OK;
- if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
- if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
- return read_memory_bus_v0(target, address, size, count, buffer);
- else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
- return read_memory_bus_v1(target, address, size, count, buffer);
+ RISCV013_INFO(info);
+ if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
+ return read_memory_progbuf(target, address, size, count, buffer,
+ increment);
+
+ if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
+ if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
+ return read_memory_bus_v0(target, address, size, count, buffer,
+ increment);
+ else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
+ return read_memory_bus_v1(target, address, size, count, buffer,
+ increment);
}
- if (info->progbufsize >= 2)
- return read_memory_progbuf(target, address, size, count, buffer);
+ if (has_sufficient_progbuf(target, 3))
+ return read_memory_progbuf(target, address, size, count, buffer,
+ increment);
- LOG_ERROR("Don't know how to read memory on this target.");
- return ERROR_FAIL;
+ return read_memory_abstract(target, address, size, count, buffer,
+ increment);
}
static int write_memory_bus_v0(struct target *target, target_addr_t address,
@@ -2405,7 +3200,7 @@ static int write_memory_bus_v0(struct target *target, target_addr_t address,
/*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
TARGET_PRIxADDR, size, count, address);
- dmi_write(target, DMI_SBADDRESS0, address);
+ dmi_write(target, DM_SBADDRESS0, address);
int64_t value = 0;
int64_t access = 0;
riscv_addr_t offset = 0;
@@ -2414,42 +3209,24 @@ static int write_memory_bus_v0(struct target *target, target_addr_t address,
/* B.8 Writing Memory, single write check if we write in one go */
if (count == 1) { /* count is in bytes here */
- /* check the size */
- switch (size) {
- case 1:
- value = t_buffer[0];
- break;
- case 2:
- value = t_buffer[0]
- | ((uint32_t) t_buffer[1] << 8);
- break;
- case 4:
- value = t_buffer[0]
- | ((uint32_t) t_buffer[1] << 8)
- | ((uint32_t) t_buffer[2] << 16)
- | ((uint32_t) t_buffer[3] << 24);
- break;
- default:
- LOG_ERROR("unsupported access size: %d", size);
- return ERROR_FAIL;
- }
+ value = buf_get_u64(t_buffer, 0, 8 * size);
access = 0;
- access = set_field(access, DMI_SBCS_SBACCESS, size/2);
- dmi_write(target, DMI_SBCS, access);
+ access = set_field(access, DM_SBCS_SBACCESS, size/2);
+ dmi_write(target, DM_SBCS, access);
LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
- dmi_write(target, DMI_SBDATA0, value);
+ dmi_write(target, DM_SBDATA0, value);
return ERROR_OK;
}
/*B.8 Writing Memory, using autoincrement*/
access = 0;
- access = set_field(access, DMI_SBCS_SBACCESS, size/2);
- access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
+ access = set_field(access, DM_SBCS_SBACCESS, size/2);
+ access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
- dmi_write(target, DMI_SBCS, access);
+ dmi_write(target, DM_SBCS, access);
/*2)set the value according to the size required and write*/
for (riscv_addr_t i = 0; i < count; ++i) {
@@ -2458,31 +3235,14 @@ static int write_memory_bus_v0(struct target *target, target_addr_t address,
t_addr = address + offset;
t_buffer = buffer + offset;
- switch (size) {
- case 1:
- value = t_buffer[0];
- break;
- case 2:
- value = t_buffer[0]
- | ((uint32_t) t_buffer[1] << 8);
- break;
- case 4:
- value = t_buffer[0]
- | ((uint32_t) t_buffer[1] << 8)
- | ((uint32_t) t_buffer[2] << 16)
- | ((uint32_t) t_buffer[3] << 24);
- break;
- default:
- LOG_ERROR("unsupported access size: %d", size);
- return ERROR_FAIL;
- }
+ value = buf_get_u64(t_buffer, 0, 8 * size);
LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
PRIx64, (uint32_t)t_addr, (uint32_t)value);
- dmi_write(target, DMI_SBDATA0, value);
+ dmi_write(target, DM_SBDATA0, value);
}
/*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
- access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 0);
- dmi_write(target, DMI_SBCS, access);
+ access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
+ dmi_write(target, DM_SBCS, access);
return ERROR_OK;
}
@@ -2492,30 +3252,47 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
{
RISCV013_INFO(info);
uint32_t sbcs = sb_sbaccess(size);
- sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
- dmi_write(target, DMI_SBCS, sbcs);
+ sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
+ dmi_write(target, DM_SBCS, sbcs);
target_addr_t next_address = address;
target_addr_t end_address = address + count * size;
+ int result;
+
sb_write_address(target, next_address);
while (next_address < end_address) {
+ LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
+ next_address);
+
+ struct riscv_batch *batch = riscv_batch_alloc(
+ target,
+ 32,
+ info->dmi_busy_delay + info->bus_master_write_delay);
+ if (!batch)
+ return ERROR_FAIL;
+
for (uint32_t i = (next_address - address) / size; i < count; i++) {
const uint8_t *p = buffer + i * size;
+
+ if (riscv_batch_available_scans(batch) < (size + 3) / 4)
+ break;
+
if (size > 12)
- dmi_write(target, DMI_SBDATA3,
+ riscv_batch_add_dmi_write(batch, DM_SBDATA3,
((uint32_t) p[12]) |
(((uint32_t) p[13]) << 8) |
(((uint32_t) p[14]) << 16) |
(((uint32_t) p[15]) << 24));
+
if (size > 8)
- dmi_write(target, DMI_SBDATA2,
+ riscv_batch_add_dmi_write(batch, DM_SBDATA2,
((uint32_t) p[8]) |
(((uint32_t) p[9]) << 8) |
(((uint32_t) p[10]) << 16) |
(((uint32_t) p[11]) << 24));
if (size > 4)
- dmi_write(target, DMI_SBDATA1,
+ riscv_batch_add_dmi_write(batch, DM_SBDATA1,
((uint32_t) p[4]) |
(((uint32_t) p[5]) << 8) |
(((uint32_t) p[6]) << 16) |
@@ -2527,37 +3304,60 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
}
if (size > 1)
value |= ((uint32_t) p[1]) << 8;
- dmi_write(target, DMI_SBDATA0, value);
+ riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
log_memory_access(address + i * size, value, size, false);
-
- if (info->bus_master_write_delay) {
- jtag_add_runtest(info->bus_master_write_delay, TAP_IDLE);
- if (jtag_execute_queue() != ERROR_OK) {
- LOG_ERROR("Failed to scan idle sequence");
- return ERROR_FAIL;
- }
- }
+ next_address += size;
}
- if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
+ result = batch_run(target, batch);
+ riscv_batch_free(batch);
+ if (result != ERROR_OK)
+ return result;
+
+ bool dmi_busy_encountered;
+ if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
+ DM_SBCS, 0, false, false) != ERROR_OK)
return ERROR_FAIL;
- if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
+ time_t start = time(NULL);
+ bool dmi_busy = dmi_busy_encountered;
+ while (get_field(sbcs, DM_SBCS_SBBUSY) || dmi_busy) {
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
+ "Increase the timeout with riscv set_command_timeout_sec.",
+ riscv_command_timeout_sec, sbcs);
+ return ERROR_FAIL;
+ }
+
+ if (dmi_op(target, &sbcs, &dmi_busy, DMI_OP_READ,
+ DM_SBCS, 0, false, true) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
/* We wrote while the target was busy. Slow down and try again. */
- dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
- next_address = sb_read_address(target);
+ dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR);
info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
+ }
+
+ if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
+ next_address = sb_read_address(target);
+ if (next_address < address) {
+ /* This should never happen, probably buggy hardware. */
+ LOG_DEBUG("unexpected system bus address 0x%" TARGET_PRIxADDR,
+ next_address);
+ return ERROR_FAIL;
+ }
+
continue;
}
- unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
- if (error == 0) {
- next_address = end_address;
- } else {
+ unsigned error = get_field(sbcs, DM_SBCS_SBERROR);
+ if (error != 0) {
/* Some error indicating the bus access failed, but not because of
* something we did wrong. */
- dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
+ dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
return ERROR_FAIL;
}
}
@@ -2570,10 +3370,21 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
{
RISCV013_INFO(info);
+ if (riscv_xlen(target) < size * 8) {
+ LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
+ riscv_xlen(target), size * 8);
+ return ERROR_FAIL;
+ }
+
LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
select_dmi(target);
+ uint64_t mstatus = 0;
+ uint64_t mstatus_old = 0;
+ if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
+ return ERROR_FAIL;
+
/* s0 holds the next address to write to
* s1 holds the next data value to write
*/
@@ -2588,6 +3399,8 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
/* Write the program (store, increment) */
struct riscv_program program;
riscv_program_init(&program, target);
+ if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
+ riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
switch (size) {
case 1:
@@ -2599,12 +3412,17 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
case 4:
riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
break;
+ case 8:
+ riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
default:
- LOG_ERROR("Unsupported size: %d", size);
+ LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
result = ERROR_FAIL;
goto error;
}
+ if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
+ riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
result = riscv_program_ebreak(&program);
@@ -2624,6 +3442,8 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
target,
32,
info->dmi_busy_delay + info->ac_busy_delay);
+ if (!batch)
+ goto error;
/* To write another word, we put it in S1 and execute the program. */
unsigned start = (cur_addr - address) / size;
@@ -2631,27 +3451,7 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
unsigned offset = size*i;
const uint8_t *t_buffer = buffer + offset;
- uint32_t value;
- switch (size) {
- case 1:
- value = t_buffer[0];
- break;
- case 2:
- value = t_buffer[0]
- | ((uint32_t) t_buffer[1] << 8);
- break;
- case 4:
- value = t_buffer[0]
- | ((uint32_t) t_buffer[1] << 8)
- | ((uint32_t) t_buffer[2] << 16)
- | ((uint32_t) t_buffer[3] << 24);
- break;
- default:
- LOG_ERROR("unsupported access size: %d", size);
- riscv_batch_free(batch);
- result = ERROR_FAIL;
- goto error;
- }
+ uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
log_memory_access(address + offset, value, size, false);
cur_addr += size;
@@ -2665,12 +3465,14 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
}
/* Write value. */
- dmi_write(target, DMI_DATA0, value);
+ if (size > 4)
+ dmi_write(target, DM_DATA1, value >> 32);
+ dmi_write(target, DM_DATA0, value);
/* Write and execute command that moves value into S1 and
* executes program buffer. */
uint32_t command = access_register_command(target,
- GDB_REGNO_S1, 32,
+ GDB_REGNO_S1, riscv_xlen(target),
AC_ACCESS_REGISTER_POSTEXEC |
AC_ACCESS_REGISTER_TRANSFER |
AC_ACCESS_REGISTER_WRITE);
@@ -2681,12 +3483,14 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
}
/* Turn on autoexec */
- dmi_write(target, DMI_ABSTRACTAUTO,
- 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
+ dmi_write(target, DM_ABSTRACTAUTO,
+ 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
setup_needed = false;
} else {
- riscv_batch_add_dmi_write(batch, DMI_DATA0, value);
+ if (size > 4)
+ riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
+ riscv_batch_add_dmi_write(batch, DM_DATA0, value);
if (riscv_batch_full(batch))
break;
}
@@ -2703,13 +3507,14 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
uint32_t abstractcs;
bool dmi_busy_encountered;
- if (dmi_op(target, &abstractcs, &dmi_busy_encountered, DMI_OP_READ,
- DMI_ABSTRACTCS, 0, false) != ERROR_OK)
+ result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
+ DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
+ if (result != ERROR_OK)
goto error;
- while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
- if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
+ if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
return ERROR_FAIL;
- info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
+ info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
LOG_DEBUG("successful (partial?) memory write");
} else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
@@ -2720,7 +3525,7 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
riscv013_clear_abstract_error(target);
increase_ac_busy_delay(target);
- dmi_write(target, DMI_ABSTRACTAUTO, 0);
+ dmi_write(target, DM_ABSTRACTAUTO, 0);
result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
if (result != ERROR_OK)
goto error;
@@ -2734,13 +3539,18 @@ static int write_memory_progbuf(struct target *target, target_addr_t address,
}
error:
- dmi_write(target, DMI_ABSTRACTAUTO, 0);
+ dmi_write(target, DM_ABSTRACTAUTO, 0);
if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
return ERROR_FAIL;
if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
return ERROR_FAIL;
+ /* Restore MSTATUS */
+ if (mstatus != mstatus_old)
+ if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
+ return ERROR_FAIL;
+
if (execute_fence(target) != ERROR_OK)
return ERROR_FAIL;
@@ -2751,25 +3561,25 @@ static int write_memory(struct target *target, target_addr_t address,
uint32_t size, uint32_t count, const uint8_t *buffer)
{
RISCV013_INFO(info);
- if (info->progbufsize >= 2 && !riscv_prefer_sba)
+
+ if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
return write_memory_progbuf(target, address, size, count, buffer);
- if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
- (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
- if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
+ if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
+ (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
+ if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
return write_memory_bus_v0(target, address, size, count, buffer);
- else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
+ else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
return write_memory_bus_v1(target, address, size, count, buffer);
}
- if (info->progbufsize >= 2)
+ if (has_sufficient_progbuf(target, 3))
return write_memory_progbuf(target, address, size, count, buffer);
- LOG_ERROR("Don't know how to write memory on this target.");
- return ERROR_FAIL;
+ return write_memory_abstract(target, address, size, count, buffer);
}
static int arch_state(struct target *target)
@@ -2785,14 +3595,12 @@ struct target_type riscv013_target = {
.examine = examine,
.poll = &riscv_openocd_poll,
- .halt = &riscv_openocd_halt,
- .resume = &riscv_openocd_resume,
+ .halt = &riscv_halt,
.step = &riscv_openocd_step,
.assert_reset = assert_reset,
.deassert_reset = deassert_reset,
- .read_memory = read_memory,
.write_memory = write_memory,
.arch_state = arch_state,
@@ -2802,16 +3610,19 @@ struct target_type riscv013_target = {
static int riscv013_get_register(struct target *target,
riscv_reg_t *value, int hid, int rid)
{
- LOG_DEBUG("reading register %s on hart %d", gdb_regno_name(rid), hid);
+ LOG_DEBUG("[%d] reading register %s on hart %d", target->coreid,
+ gdb_regno_name(rid), hid);
riscv_set_current_hartid(target, hid);
int result = ERROR_OK;
if (rid == GDB_REGNO_PC) {
+ /* TODO: move this into riscv.c. */
result = register_read(target, value, GDB_REGNO_DPC);
- LOG_DEBUG("read PC from DPC: 0x%" PRIx64, *value);
+ LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
} else if (rid == GDB_REGNO_PRIV) {
uint64_t dcsr;
+ /* TODO: move this into riscv.c. */
result = register_read(target, &dcsr, GDB_REGNO_DCSR);
*value = get_field(dcsr, CSR_DCSR_PRV);
} else {
@@ -2825,19 +3636,19 @@ static int riscv013_get_register(struct target *target,
static int riscv013_set_register(struct target *target, int hid, int rid, uint64_t value)
{
- LOG_DEBUG("writing 0x%" PRIx64 " to register %s on hart %d", value,
- gdb_regno_name(rid), hid);
+ LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s on hart %d",
+ target->coreid, value, gdb_regno_name(rid), hid);
riscv_set_current_hartid(target, hid);
if (rid <= GDB_REGNO_XPR31) {
return register_write_direct(target, rid, value);
} else if (rid == GDB_REGNO_PC) {
- LOG_DEBUG("writing PC to DPC: 0x%" PRIx64, value);
+ LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
register_write_direct(target, GDB_REGNO_DPC, value);
uint64_t actual_value;
register_read_direct(target, &actual_value, GDB_REGNO_DPC);
- LOG_DEBUG(" actual DPC written: 0x%016" PRIx64, actual_value);
+ LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
if (value != actual_value) {
LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
"value (0x%" PRIx64 ")", value, actual_value);
@@ -2860,32 +3671,97 @@ static int riscv013_select_current_hart(struct target *target)
RISCV_INFO(r);
dm013_info_t *dm = get_dm(target);
+ if (!dm)
+ return ERROR_FAIL;
if (r->current_hartid == dm->current_hartid)
return ERROR_OK;
uint32_t dmcontrol;
/* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
- if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
return ERROR_FAIL;
dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
- int result = dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
dm->current_hartid = r->current_hartid;
return result;
}
-static int riscv013_halt_current_hart(struct target *target)
+/* Select all harts that were prepped and that are selectable, clearing the
+ * prepped flag on the harts that actually were selected. */
+static int select_prepped_harts(struct target *target, bool *use_hasel)
{
+ dm013_info_t *dm = get_dm(target);
+ if (!dm)
+ return ERROR_FAIL;
+ if (!dm->hasel_supported) {
+ RISCV_INFO(r);
+ r->prepped = false;
+ *use_hasel = false;
+ return ERROR_OK;
+ }
+
+ assert(dm->hart_count);
+ unsigned hawindow_count = (dm->hart_count + 31) / 32;
+ uint32_t hawindow[hawindow_count];
+
+ memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
+
+ target_list_t *entry;
+ unsigned total_selected = 0;
+ list_for_each_entry(entry, &dm->target_list, list) {
+ struct target *t = entry->target;
+ riscv_info_t *r = riscv_info(t);
+ riscv013_info_t *info = get_info(t);
+ unsigned index = info->index;
+ LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
+ r->selected = r->prepped;
+ if (r->prepped) {
+ hawindow[index / 32] |= 1 << (index % 32);
+ r->prepped = false;
+ total_selected++;
+ }
+ index++;
+ }
+
+ /* Don't use hasel if we only need to talk to one hart. */
+ if (total_selected <= 1) {
+ *use_hasel = false;
+ return ERROR_OK;
+ }
+
+ for (unsigned i = 0; i < hawindow_count; i++) {
+ if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
+ return ERROR_FAIL;
+ if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ *use_hasel = true;
+ return ERROR_OK;
+}
+
+static int riscv013_halt_prep(struct target *target)
+{
+ return ERROR_OK;
+}
+
+static int riscv013_halt_go(struct target *target)
+{
+ bool use_hasel = false;
+ if (!riscv_rtos_enabled(target)) {
+ if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
RISCV_INFO(r);
LOG_DEBUG("halting hart %d", r->current_hartid);
- if (riscv_is_halted(target))
- LOG_ERROR("Hart %d is already halted!", r->current_hartid);
/* Issue the halt command, and then wait for the current hart to halt. */
- uint32_t dmcontrol;
- if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
- return ERROR_FAIL;
- dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 1);
- dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
+ if (use_hasel)
+ dmcontrol |= DM_DMCONTROL_HASEL;
+ dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
+ dmi_write(target, DM_DMCONTROL, dmcontrol);
for (size_t i = 0; i < 256; ++i)
if (riscv_is_halted(target))
break;
@@ -2894,7 +3770,7 @@ static int riscv013_halt_current_hart(struct target *target)
uint32_t dmstatus;
if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
return ERROR_FAIL;
- if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
return ERROR_FAIL;
LOG_ERROR("unable to halt hart %d", r->current_hartid);
@@ -2903,23 +3779,43 @@ static int riscv013_halt_current_hart(struct target *target)
return ERROR_FAIL;
}
- dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 0);
- dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
+ dmi_write(target, DM_DMCONTROL, dmcontrol);
+
+ if (use_hasel) {
+ target_list_t *entry;
+ dm013_info_t *dm = get_dm(target);
+ if (!dm)
+ return ERROR_FAIL;
+ list_for_each_entry(entry, &dm->target_list, list) {
+ struct target *t = entry->target;
+ t->state = TARGET_HALTED;
+ if (t->debug_reason == DBG_REASON_NOTHALTED)
+ t->debug_reason = DBG_REASON_DBGRQ;
+ }
+ }
+ /* The "else" case is handled in halt_go(). */
return ERROR_OK;
}
-static int riscv013_resume_current_hart(struct target *target)
+static int riscv013_resume_go(struct target *target)
{
- return riscv013_step_or_resume_current_hart(target, false);
+ bool use_hasel = false;
+ if (!riscv_rtos_enabled(target)) {
+ if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ return riscv013_step_or_resume_current_hart(target, false, use_hasel);
}
static int riscv013_step_current_hart(struct target *target)
{
- return riscv013_step_or_resume_current_hart(target, true);
+ return riscv013_step_or_resume_current_hart(target, true, false);
}
-static int riscv013_on_resume(struct target *target)
+static int riscv013_resume_prep(struct target *target)
{
return riscv013_on_step_or_resume(target, false);
}
@@ -2939,16 +3835,16 @@ static bool riscv013_is_halted(struct target *target)
uint32_t dmstatus;
if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
return false;
- if (get_field(dmstatus, DMI_DMSTATUS_ANYUNAVAIL))
+ if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
- if (get_field(dmstatus, DMI_DMSTATUS_ANYNONEXISTENT))
+ if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
- if (get_field(dmstatus, DMI_DMSTATUS_ANYHAVERESET)) {
+ if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
int hartid = riscv_current_hartid(target);
LOG_INFO("Hart %d unexpectedly reset!", hartid);
/* TODO: Can we make this more obvious to eg. a gdb user? */
- uint32_t dmcontrol = DMI_DMCONTROL_DMACTIVE |
- DMI_DMCONTROL_ACKHAVERESET;
+ uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
+ DM_DMCONTROL_ACKHAVERESET;
dmcontrol = set_hartsel(dmcontrol, hartid);
/* If we had been halted when we reset, request another halt. If we
* ended up running out of reset, then the user will (hopefully) get a
@@ -2956,10 +3852,10 @@ static bool riscv013_is_halted(struct target *target)
* that it is halted again once the request goes through.
*/
if (target->state == TARGET_HALTED)
- dmcontrol |= DMI_DMCONTROL_HALTREQ;
- dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ dmcontrol |= DM_DMCONTROL_HALTREQ;
+ dmi_write(target, DM_DMCONTROL, dmcontrol);
}
- return get_field(dmstatus, DMI_DMSTATUS_ALLHALTED);
+ return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
}
static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
@@ -2984,6 +3880,8 @@ static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
case CSR_DCSR_CAUSE_DEBUGINT:
case CSR_DCSR_CAUSE_HALT:
return RISCV_HALT_INTERRUPT;
+ case CSR_DCSR_CAUSE_GROUP:
+ return RISCV_HALT_GROUP;
}
LOG_ERROR("Unknown DCSR cause field: %x", (int)get_field(dcsr, CSR_DCSR_CAUSE));
@@ -2993,20 +3891,30 @@ static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
{
- return dmi_write(target, DMI_PROGBUF0 + index, data);
+ dm013_info_t *dm = get_dm(target);
+ if (!dm)
+ return ERROR_FAIL;
+ if (dm->progbuf_cache[index] != data) {
+ if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
+ return ERROR_FAIL;
+ dm->progbuf_cache[index] = data;
+ } else {
+ LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
+ }
+ return ERROR_OK;
}
riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
{
uint32_t value;
- dmi_read(target, &value, DMI_PROGBUF0 + index);
+ dmi_read(target, &value, DM_PROGBUF0 + index);
return value;
}
int riscv013_execute_debug_buffer(struct target *target)
{
uint32_t run_program = 0;
- run_program = set_field(run_program, AC_ACCESS_REGISTER_SIZE, 2);
+ run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
@@ -3043,11 +3951,11 @@ static int get_max_sbaccess(struct target *target)
{
RISCV013_INFO(info);
- uint32_t sbaccess128 = get_field(info->sbcs, DMI_SBCS_SBACCESS128);
- uint32_t sbaccess64 = get_field(info->sbcs, DMI_SBCS_SBACCESS64);
- uint32_t sbaccess32 = get_field(info->sbcs, DMI_SBCS_SBACCESS32);
- uint32_t sbaccess16 = get_field(info->sbcs, DMI_SBCS_SBACCESS16);
- uint32_t sbaccess8 = get_field(info->sbcs, DMI_SBCS_SBACCESS8);
+ uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
+ uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
+ uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
+ uint32_t sbaccess16 = get_field(info->sbcs, DM_SBCS_SBACCESS16);
+ uint32_t sbaccess8 = get_field(info->sbcs, DM_SBCS_SBACCESS8);
if (sbaccess128)
return 4;
@@ -3067,9 +3975,9 @@ static uint32_t get_num_sbdata_regs(struct target *target)
{
RISCV013_INFO(info);
- uint32_t sbaccess128 = get_field(info->sbcs, DMI_SBCS_SBACCESS128);
- uint32_t sbaccess64 = get_field(info->sbcs, DMI_SBCS_SBACCESS64);
- uint32_t sbaccess32 = get_field(info->sbcs, DMI_SBCS_SBACCESS32);
+ uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
+ uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
+ uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
if (sbaccess128)
return 4;
@@ -3091,7 +3999,7 @@ static int riscv013_test_sba_config_reg(struct target *target,
uint32_t rd_val;
uint32_t sbcs_orig;
- dmi_read(target, &sbcs_orig, DMI_SBCS);
+ dmi_read(target, &sbcs_orig, DM_SBCS);
uint32_t sbcs = sbcs_orig;
bool test_passed;
@@ -3103,25 +4011,26 @@ static int riscv013_test_sba_config_reg(struct target *target,
return ERROR_FAIL;
}
- if (get_field(sbcs, DMI_SBCS_SBVERSION) != 1) {
+ if (get_field(sbcs, DM_SBCS_SBVERSION) != 1) {
LOG_ERROR("System Bus Access unsupported SBVERSION (%d). Only version 1 is supported.",
- get_field(sbcs, DMI_SBCS_SBVERSION));
+ get_field(sbcs, DM_SBCS_SBVERSION));
return ERROR_FAIL;
}
uint32_t num_sbdata_regs = get_num_sbdata_regs(target);
+ assert(num_sbdata_regs);
uint32_t rd_buf[num_sbdata_regs];
/* Test 1: Simple write/read test */
test_passed = true;
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBAUTOINCREMENT, 0);
- dmi_write(target, DMI_SBCS, sbcs);
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 0);
+ dmi_write(target, DM_SBCS, sbcs);
uint32_t test_patterns[4] = {0xdeadbeef, 0xfeedbabe, 0x12345678, 0x08675309};
for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
- sbcs = set_field(sbcs, DMI_SBCS_SBACCESS, sbaccess);
- dmi_write(target, DMI_SBCS, sbcs);
+ sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
+ dmi_write(target, DM_SBCS, sbcs);
uint32_t compare_mask = (sbaccess == 0) ? 0xff : (sbaccess == 1) ? 0xffff : 0xffffffff;
@@ -3153,14 +4062,14 @@ static int riscv013_test_sba_config_reg(struct target *target,
target_addr_t curr_addr;
target_addr_t prev_addr;
test_passed = true;
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBAUTOINCREMENT, 1);
- dmi_write(target, DMI_SBCS, sbcs);
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 1);
+ dmi_write(target, DM_SBCS, sbcs);
for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
- sbcs = set_field(sbcs, DMI_SBCS_SBACCESS, sbaccess);
- dmi_write(target, DMI_SBCS, sbcs);
+ sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
+ dmi_write(target, DM_SBCS, sbcs);
- dmi_write(target, DMI_SBADDRESS0, legal_address);
+ dmi_write(target, DM_SBADDRESS0, legal_address);
read_sbcs_nonbusy(target, &sbcs);
curr_addr = legal_address;
for (uint32_t i = 0; i < num_words; i++) {
@@ -3172,17 +4081,17 @@ static int riscv013_test_sba_config_reg(struct target *target,
test_passed = false;
tests_failed++;
}
- dmi_write(target, DMI_SBDATA0, i);
+ dmi_write(target, DM_SBDATA0, i);
}
read_sbcs_nonbusy(target, &sbcs);
- dmi_write(target, DMI_SBADDRESS0, legal_address);
+ dmi_write(target, DM_SBADDRESS0, legal_address);
uint32_t val;
- sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, 1);
- dmi_write(target, DMI_SBCS, sbcs);
- dmi_read(target, &val, DMI_SBDATA0); /* Dummy read to trigger first system bus read */
+ sbcs = set_field(sbcs, DM_SBCS_SBREADONDATA, 1);
+ dmi_write(target, DM_SBCS, sbcs);
+ dmi_read(target, &val, DM_SBDATA0); /* Dummy read to trigger first system bus read */
curr_addr = legal_address;
for (uint32_t i = 0; i < num_words; i++) {
prev_addr = curr_addr;
@@ -3193,7 +4102,7 @@ static int riscv013_test_sba_config_reg(struct target *target,
test_passed = false;
tests_failed++;
}
- dmi_read(target, &val, DMI_SBDATA0);
+ dmi_read(target, &val, DM_SBDATA0);
read_sbcs_nonbusy(target, &sbcs);
if (i != val) {
LOG_ERROR("System Bus Access Test 2: Error reading auto-incremented address,"
@@ -3209,12 +4118,12 @@ static int riscv013_test_sba_config_reg(struct target *target,
/* Test 3: Read from illegal address */
read_memory_sba_simple(target, illegal_address, rd_buf, 1, sbcs_orig);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 2) {
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBERROR, 2);
- dmi_write(target, DMI_SBCS, sbcs);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 0)
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
+ dmi_write(target, DM_SBCS, sbcs);
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
LOG_INFO("System Bus Access Test 3: Illegal address read test PASSED.");
else
LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to clear to 0.");
@@ -3225,12 +4134,12 @@ static int riscv013_test_sba_config_reg(struct target *target,
/* Test 4: Write to illegal address */
write_memory_sba_simple(target, illegal_address, test_patterns, 1, sbcs_orig);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 2) {
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBERROR, 2);
- dmi_write(target, DMI_SBCS, sbcs);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 0)
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
+ dmi_write(target, DM_SBCS, sbcs);
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
LOG_INFO("System Bus Access Test 4: Illegal address write test PASSED.");
else {
LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to clear to 0.");
@@ -3242,21 +4151,21 @@ static int riscv013_test_sba_config_reg(struct target *target,
}
/* Test 5: Write with unsupported sbaccess size */
- uint32_t sbaccess128 = get_field(sbcs_orig, DMI_SBCS_SBACCESS128);
+ uint32_t sbaccess128 = get_field(sbcs_orig, DM_SBCS_SBACCESS128);
if (sbaccess128) {
LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED, all sbaccess sizes supported.");
} else {
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBACCESS, 4);
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 4);
write_memory_sba_simple(target, legal_address, test_patterns, 1, sbcs);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 4) {
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBERROR, 4);
- dmi_write(target, DMI_SBCS, sbcs);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 0)
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 4) {
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 4);
+ dmi_write(target, DM_SBCS, sbcs);
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED.");
else {
LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to clear to 0.");
@@ -3269,16 +4178,16 @@ static int riscv013_test_sba_config_reg(struct target *target,
}
/* Test 6: Write to misaligned address */
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBACCESS, 1);
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 1);
write_memory_sba_simple(target, legal_address+1, test_patterns, 1, sbcs);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 3) {
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBERROR, 3);
- dmi_write(target, DMI_SBCS, sbcs);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBERROR) == 0)
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 3) {
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 3);
+ dmi_write(target, DM_SBCS, sbcs);
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
LOG_INFO("System Bus Access Test 6: SBCS address alignment error test PASSED");
else {
LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to clear to 0.");
@@ -3292,21 +4201,21 @@ static int riscv013_test_sba_config_reg(struct target *target,
/* Test 7: Set sbbusyerror, only run this case in simulation as it is likely
* impossible to hit otherwise */
if (run_sbbusyerror_test) {
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBREADONADDR, 1);
- dmi_write(target, DMI_SBCS, sbcs);
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBREADONADDR, 1);
+ dmi_write(target, DM_SBCS, sbcs);
for (int i = 0; i < 16; i++)
- dmi_write(target, DMI_SBDATA0, 0xdeadbeef);
+ dmi_write(target, DM_SBDATA0, 0xdeadbeef);
for (int i = 0; i < 16; i++)
- dmi_write(target, DMI_SBADDRESS0, legal_address);
-
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBBUSYERROR)) {
- sbcs = set_field(sbcs_orig, DMI_SBCS_SBBUSYERROR, 1);
- dmi_write(target, DMI_SBCS, sbcs);
- dmi_read(target, &rd_val, DMI_SBCS);
- if (get_field(rd_val, DMI_SBCS_SBBUSYERROR) == 0)
+ dmi_write(target, DM_SBADDRESS0, legal_address);
+
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBBUSYERROR)) {
+ sbcs = set_field(sbcs_orig, DM_SBCS_SBBUSYERROR, 1);
+ dmi_write(target, DM_SBCS, sbcs);
+ dmi_read(target, &rd_val, DM_SBCS);
+ if (get_field(rd_val, DM_SBCS_SBBUSYERROR) == 0)
LOG_INFO("System Bus Access Test 7: SBCS sbbusyerror test PASSED.");
else {
LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to clear to 0.");
@@ -3336,26 +4245,26 @@ void write_memory_sba_simple(struct target *target, target_addr_t addr,
uint32_t rd_sbcs;
uint32_t masked_addr;
- uint32_t sba_size = get_field(info->sbcs, DMI_SBCS_SBASIZE);
+ uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
read_sbcs_nonbusy(target, &rd_sbcs);
- uint32_t sbcs_no_readonaddr = set_field(sbcs, DMI_SBCS_SBREADONADDR, 0);
- dmi_write(target, DMI_SBCS, sbcs_no_readonaddr);
+ uint32_t sbcs_no_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 0);
+ dmi_write(target, DM_SBCS, sbcs_no_readonaddr);
for (uint32_t i = 0; i < sba_size/32; i++) {
masked_addr = (addr >> 32*i) & 0xffffffff;
if (i != 3)
- dmi_write(target, DMI_SBADDRESS0+i, masked_addr);
+ dmi_write(target, DM_SBADDRESS0+i, masked_addr);
else
- dmi_write(target, DMI_SBADDRESS3, masked_addr);
+ dmi_write(target, DM_SBADDRESS3, masked_addr);
}
/* Write SBDATA registers starting with highest address, since write to
* SBDATA0 triggers write */
for (int i = write_size-1; i >= 0; i--)
- dmi_write(target, DMI_SBDATA0+i, write_data[i]);
+ dmi_write(target, DM_SBDATA0+i, write_data[i]);
}
void read_memory_sba_simple(struct target *target, target_addr_t addr,
@@ -3366,27 +4275,27 @@ void read_memory_sba_simple(struct target *target, target_addr_t addr,
uint32_t rd_sbcs;
uint32_t masked_addr;
- uint32_t sba_size = get_field(info->sbcs, DMI_SBCS_SBASIZE);
+ uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
read_sbcs_nonbusy(target, &rd_sbcs);
- uint32_t sbcs_readonaddr = set_field(sbcs, DMI_SBCS_SBREADONADDR, 1);
- dmi_write(target, DMI_SBCS, sbcs_readonaddr);
+ uint32_t sbcs_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 1);
+ dmi_write(target, DM_SBCS, sbcs_readonaddr);
/* Write addresses starting with highest address register */
for (int i = sba_size/32-1; i >= 0; i--) {
masked_addr = (addr >> 32*i) & 0xffffffff;
if (i != 3)
- dmi_write(target, DMI_SBADDRESS0+i, masked_addr);
+ dmi_write(target, DM_SBADDRESS0+i, masked_addr);
else
- dmi_write(target, DMI_SBADDRESS3, masked_addr);
+ dmi_write(target, DM_SBADDRESS3, masked_addr);
}
read_sbcs_nonbusy(target, &rd_sbcs);
for (uint32_t i = 0; i < read_size; i++)
- dmi_read(target, &(rd_buf[i]), DMI_SBDATA0+i);
+ dmi_read(target, &(rd_buf[i]), DM_SBDATA0+i);
}
int riscv013_dmi_write_u64_bits(struct target *target)
@@ -3397,9 +4306,7 @@ int riscv013_dmi_write_u64_bits(struct target *target)
static int maybe_execute_fence_i(struct target *target)
{
- RISCV013_INFO(info);
- RISCV_INFO(r);
- if (info->progbufsize + r->impebreak >= 3)
+ if (has_sufficient_progbuf(target, 3))
return execute_fence(target);
return ERROR_OK;
}
@@ -3416,13 +4323,14 @@ static int riscv013_on_step_or_resume(struct target *target, bool step)
if (result != ERROR_OK)
return result;
dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
- dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, 1);
- dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, 1);
- dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, 1);
+ dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
+ dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
+ dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
}
-static int riscv013_step_or_resume_current_hart(struct target *target, bool step)
+static int riscv013_step_or_resume_current_hart(struct target *target,
+ bool step, bool use_hasel)
{
RISCV_INFO(r);
LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
@@ -3431,39 +4339,40 @@ static int riscv013_step_or_resume_current_hart(struct target *target, bool step
return ERROR_FAIL;
}
- if (maybe_execute_fence_i(target) != ERROR_OK)
- return ERROR_FAIL;
-
/* Issue the resume command, and then wait for the current hart to resume. */
- uint32_t dmcontrol = DMI_DMCONTROL_DMACTIVE;
+ uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
+ if (use_hasel)
+ dmcontrol |= DM_DMCONTROL_HASEL;
dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
- dmi_write(target, DMI_DMCONTROL, dmcontrol | DMI_DMCONTROL_RESUMEREQ);
+ dmi_write(target, DM_DMCONTROL, dmcontrol);
+
+ dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
+ dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
uint32_t dmstatus;
for (size_t i = 0; i < 256; ++i) {
usleep(10);
if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
return ERROR_FAIL;
- if (get_field(dmstatus, DMI_DMSTATUS_ALLRESUMEACK) == 0)
+ if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
continue;
- if (step && get_field(dmstatus, DMI_DMSTATUS_ALLHALTED) == 0)
+ if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
continue;
- dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ dmi_write(target, DM_DMCONTROL, dmcontrol);
return ERROR_OK;
}
+ dmi_write(target, DM_DMCONTROL, dmcontrol);
+
LOG_ERROR("unable to resume hart %d", r->current_hartid);
- if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
- return ERROR_FAIL;
- LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
return ERROR_FAIL;
LOG_ERROR(" dmstatus =0x%08x", dmstatus);
if (step) {
LOG_ERROR(" was stepping, halting");
- riscv013_halt_current_hart(target);
+ riscv_halt(target);
return ERROR_OK;
}
@@ -3475,9 +4384,9 @@ void riscv013_clear_abstract_error(struct target *target)
/* Wait for busy to go away. */
time_t start = time(NULL);
uint32_t abstractcs;
- dmi_read(target, &abstractcs, DMI_ABSTRACTCS);
- while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY)) {
- dmi_read(target, &abstractcs, DMI_ABSTRACTCS);
+ dmi_read(target, &abstractcs, DM_ABSTRACTCS);
+ while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
+ dmi_read(target, &abstractcs, DM_ABSTRACTCS);
if (time(NULL) - start > riscv_command_timeout_sec) {
LOG_ERROR("abstractcs.busy is not going low after %d seconds "
@@ -3489,17 +4398,25 @@ void riscv013_clear_abstract_error(struct target *target)
}
}
/* Clear the error status. */
- dmi_write(target, DMI_ABSTRACTCS, abstractcs & DMI_ABSTRACTCS_CMDERR);
+ dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
}
+#ifdef _WIN32
+#define FILE_SEP '\\'
+#else
+#define FILE_SEP '/'
+#endif
#define COMPLIANCE_TEST(b, message) \
-{ \
+{ \
+ const char *last_sep = strrchr(__FILE__, FILE_SEP); \
+ const char *fname = (last_sep == NULL ? __FILE__ : last_sep + 1); \
+ LOG_INFO("Executing test %d (%s:%d): %s", total_tests, fname, __LINE__, message); \
int pass = 0; \
if (b) { \
pass = 1; \
passed_tests++; \
} \
- LOG_INFO("%s test %d (%s)\n", (pass) ? "PASSED" : "FAILED", total_tests, message); \
+ LOG_INFO(" %s", (pass) ? "PASSED" : "FAILED"); \
assert(pass); \
total_tests++; \
}
@@ -3521,17 +4438,27 @@ void riscv013_clear_abstract_error(struct target *target)
int riscv013_test_compliance(struct target *target)
{
- LOG_INFO("Testing Compliance against RISC-V Debug Spec v0.13");
+ LOG_INFO("Basic compliance test against RISC-V Debug Spec v0.13");
+ LOG_INFO("This test is not complete, and not well supported.");
+ LOG_INFO("Your core might pass this test without being compliant.");
+ LOG_INFO("Your core might fail this test while being compliant.");
+ LOG_INFO("Use your judgment, and please contribute improvements.");
if (!riscv_rtos_enabled(target)) {
LOG_ERROR("Please run with -rtos riscv to run compliance test.");
return ERROR_FAIL;
}
+ if (!target_was_examined(target)) {
+ LOG_ERROR("Cannot run compliance test, because target has not yet "
+ "been examined, or the examination failed.\n");
+ return ERROR_FAIL;
+ }
+
int total_tests = 0;
int passed_tests = 0;
- uint32_t dmcontrol_orig = DMI_DMCONTROL_DMACTIVE;
+ uint32_t dmcontrol_orig = DM_DMCONTROL_DMACTIVE;
uint32_t dmcontrol;
uint32_t testvar;
uint32_t testvar_read;
@@ -3545,76 +4472,76 @@ int riscv013_test_compliance(struct target *target)
or it is tied to 0. This check doesn't really do anything, but
it does attempt to set the bit to 1 and then back to 0, which needs to
work if its implemented. */
- COMPLIANCE_WRITE(target, DMI_DMCONTROL, set_field(dmcontrol_orig, DMI_DMCONTROL_HARTRESET, 1));
- COMPLIANCE_WRITE(target, DMI_DMCONTROL, set_field(dmcontrol_orig, DMI_DMCONTROL_HARTRESET, 0));
- COMPLIANCE_READ(target, &dmcontrol, DMI_DMCONTROL);
- COMPLIANCE_TEST((get_field(dmcontrol, DMI_DMCONTROL_HARTRESET) == 0),
+ COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 1));
+ COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 0));
+ COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
+ COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HARTRESET) == 0),
"DMCONTROL.hartreset can be 0 or RW.");
/* hasel */
- COMPLIANCE_WRITE(target, DMI_DMCONTROL, set_field(dmcontrol_orig, DMI_DMCONTROL_HASEL, 1));
- COMPLIANCE_WRITE(target, DMI_DMCONTROL, set_field(dmcontrol_orig, DMI_DMCONTROL_HASEL, 0));
- COMPLIANCE_READ(target, &dmcontrol, DMI_DMCONTROL);
- COMPLIANCE_TEST((get_field(dmcontrol, DMI_DMCONTROL_HASEL) == 0),
+ COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 1));
+ COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 0));
+ COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
+ COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HASEL) == 0),
"DMCONTROL.hasel can be 0 or RW.");
/* TODO: test that hamask registers exist if hasel does. */
/* haltreq */
- COMPLIANCE_MUST_PASS(riscv_halt_all_harts(target));
+ COMPLIANCE_MUST_PASS(riscv_halt(target));
/* This bit is not actually readable according to the spec, so nothing to check.*/
/* DMSTATUS */
- COMPLIANCE_CHECK_RO(target, DMI_DMSTATUS);
+ COMPLIANCE_CHECK_RO(target, DM_DMSTATUS);
/* resumereq */
/* This bit is not actually readable according to the spec, so nothing to check.*/
- COMPLIANCE_MUST_PASS(riscv_resume_all_harts(target));
+ COMPLIANCE_MUST_PASS(riscv_resume(target, true, 0, false, false, false));
/* Halt all harts again so the test can continue.*/
- COMPLIANCE_MUST_PASS(riscv_halt_all_harts(target));
+ COMPLIANCE_MUST_PASS(riscv_halt(target));
/* HARTINFO: Read-Only. This is per-hart, so need to adjust hartsel. */
uint32_t hartinfo;
- COMPLIANCE_READ(target, &hartinfo, DMI_HARTINFO);
+ COMPLIANCE_READ(target, &hartinfo, DM_HARTINFO);
for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
- COMPLIANCE_CHECK_RO(target, DMI_HARTINFO);
+ COMPLIANCE_CHECK_RO(target, DM_HARTINFO);
/* $dscratch CSRs */
- uint32_t nscratch = get_field(hartinfo, DMI_HARTINFO_NSCRATCH);
+ uint32_t nscratch = get_field(hartinfo, DM_HARTINFO_NSCRATCH);
for (unsigned int d = 0; d < nscratch; d++) {
riscv_reg_t testval, testval_read;
- /* Because DSCRATCH is not guaranteed to last across PB executions, need to put
+ /* Because DSCRATCH0 is not guaranteed to last across PB executions, need to put
this all into one PB execution. Which may not be possible on all implementations.*/
if (info->progbufsize >= 5) {
for (testval = 0x0011223300112233;
testval != 0xDEAD;
testval = testval == 0x0011223300112233 ? ~testval : 0xDEAD) {
COMPLIANCE_TEST(register_write_direct(target, GDB_REGNO_S0, testval) == ERROR_OK,
- "Need to be able to write S0 in order to test DSCRATCH.");
+ "Need to be able to write S0 in order to test DSCRATCH0.");
struct riscv_program program32;
riscv_program_init(&program32, target);
- riscv_program_csrw(&program32, GDB_REGNO_S0, GDB_REGNO_DSCRATCH + d);
- riscv_program_csrr(&program32, GDB_REGNO_S1, GDB_REGNO_DSCRATCH + d);
+ riscv_program_csrw(&program32, GDB_REGNO_S0, GDB_REGNO_DSCRATCH0 + d);
+ riscv_program_csrr(&program32, GDB_REGNO_S1, GDB_REGNO_DSCRATCH0 + d);
riscv_program_fence(&program32);
riscv_program_ebreak(&program32);
COMPLIANCE_TEST(riscv_program_exec(&program32, target) == ERROR_OK,
- "Accessing DSCRATCH with program buffer should succeed.");
+ "Accessing DSCRATCH0 with program buffer should succeed.");
COMPLIANCE_TEST(register_read_direct(target, &testval_read, GDB_REGNO_S1) == ERROR_OK,
- "Need to be able to read S1 in order to test DSCRATCH.");
+ "Need to be able to read S1 in order to test DSCRATCH0.");
if (riscv_xlen(target) > 32) {
COMPLIANCE_TEST(testval == testval_read,
- "All DSCRATCH registers in HARTINFO must be R/W.");
+ "All DSCRATCH0 registers in HARTINFO must be R/W.");
} else {
COMPLIANCE_TEST(testval_read == (testval & 0xFFFFFFFF),
- "All DSCRATCH registers in HARTINFO must be R/W.");
+ "All DSCRATCH0 registers in HARTINFO must be R/W.");
}
}
}
}
/* TODO: dataaccess */
- if (get_field(hartinfo, DMI_HARTINFO_DATAACCESS)) {
+ if (get_field(hartinfo, DM_HARTINFO_DATAACCESS)) {
/* TODO: Shadowed in memory map. */
/* TODO: datasize */
/* TODO: dataaddr */
@@ -3633,16 +4560,16 @@ int riscv013_test_compliance(struct target *target)
for (int i = 0; i < MIN(riscv_count_harts(target), 32); i++)
expected_haltsum0 |= (1 << i);
- COMPLIANCE_READ(target, &testvar_read, DMI_HALTSUM0);
+ COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
COMPLIANCE_TEST(testvar_read == expected_haltsum0,
"HALTSUM0 should report summary of up to 32 halted harts");
- COMPLIANCE_WRITE(target, DMI_HALTSUM0, 0xffffffff);
- COMPLIANCE_READ(target, &testvar_read, DMI_HALTSUM0);
+ COMPLIANCE_WRITE(target, DM_HALTSUM0, 0xffffffff);
+ COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
- COMPLIANCE_WRITE(target, DMI_HALTSUM0, 0x0);
- COMPLIANCE_READ(target, &testvar_read, DMI_HALTSUM0);
+ COMPLIANCE_WRITE(target, DM_HALTSUM0, 0x0);
+ COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
/* HALTSUM1 */
@@ -3650,16 +4577,16 @@ int riscv013_test_compliance(struct target *target)
for (int i = 0; i < MIN(riscv_count_harts(target), 1024); i += 32)
expected_haltsum1 |= (1 << (i/32));
- COMPLIANCE_READ(target, &testvar_read, DMI_HALTSUM1);
+ COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
COMPLIANCE_TEST(testvar_read == expected_haltsum1,
"HALTSUM1 should report summary of up to 1024 halted harts");
- COMPLIANCE_WRITE(target, DMI_HALTSUM1, 0xffffffff);
- COMPLIANCE_READ(target, &testvar_read, DMI_HALTSUM1);
+ COMPLIANCE_WRITE(target, DM_HALTSUM1, 0xffffffff);
+ COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
- COMPLIANCE_WRITE(target, DMI_HALTSUM1, 0x0);
- COMPLIANCE_READ(target, &testvar_read, DMI_HALTSUM1);
+ COMPLIANCE_WRITE(target, DM_HALTSUM1, 0x0);
+ COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
/* TODO: HAWINDOWSEL */
@@ -3669,38 +4596,38 @@ int riscv013_test_compliance(struct target *target)
/* ABSTRACTCS */
uint32_t abstractcs;
- COMPLIANCE_READ(target, &abstractcs, DMI_ABSTRACTCS);
+ COMPLIANCE_READ(target, &abstractcs, DM_ABSTRACTCS);
/* Check that all reported Data Words are really R/W */
for (int invert = 0; invert < 2; invert++) {
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
testvar = (i + 1) * 0x11111111;
if (invert)
testvar = ~testvar;
- COMPLIANCE_WRITE(target, DMI_DATA0 + i, testvar);
+ COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
}
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
testvar = (i + 1) * 0x11111111;
if (invert)
testvar = ~testvar;
- COMPLIANCE_READ(target, &testvar_read, DMI_DATA0 + i);
+ COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
COMPLIANCE_TEST(testvar_read == testvar, "All reported DATA words must be R/W");
}
}
/* Check that all reported ProgBuf words are really R/W */
for (int invert = 0; invert < 2; invert++) {
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
testvar = (i + 1) * 0x11111111;
if (invert)
testvar = ~testvar;
- COMPLIANCE_WRITE(target, DMI_PROGBUF0 + i, testvar);
+ COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
}
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
testvar = (i + 1) * 0x11111111;
if (invert)
testvar = ~testvar;
- COMPLIANCE_READ(target, &testvar_read, DMI_PROGBUF0 + i);
+ COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
COMPLIANCE_TEST(testvar_read == testvar, "All reported PROGBUF words must be R/W");
}
}
@@ -3710,17 +4637,17 @@ int riscv013_test_compliance(struct target *target)
/* COMMAND
According to the spec, this register is only W, so can't really check the read result.
But at any rate, this is not legal and should cause an error. */
- COMPLIANCE_WRITE(target, DMI_COMMAND, 0xAAAAAAAA);
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTCS);
- COMPLIANCE_TEST(get_field(testvar_read, DMI_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
+ COMPLIANCE_WRITE(target, DM_COMMAND, 0xAAAAAAAA);
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
+ COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
"Illegal COMMAND should result in UNSUPPORTED");
- COMPLIANCE_WRITE(target, DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
- COMPLIANCE_WRITE(target, DMI_COMMAND, 0x55555555);
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTCS);
- COMPLIANCE_TEST(get_field(testvar_read, DMI_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
+ COMPLIANCE_WRITE(target, DM_COMMAND, 0x55555555);
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
+ COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
"Illegal COMMAND should result in UNSUPPORTED");
- COMPLIANCE_WRITE(target, DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
/* Basic Abstract Commands */
for (unsigned int i = 1; i < 32; i = i << 1) {
@@ -3742,11 +4669,11 @@ int riscv013_test_compliance(struct target *target)
/* ABSTRACTAUTO
See which bits are actually writable */
- COMPLIANCE_WRITE(target, DMI_ABSTRACTAUTO, 0xFFFFFFFF);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
uint32_t abstractauto;
uint32_t busy;
- COMPLIANCE_READ(target, &abstractauto, DMI_ABSTRACTAUTO);
- COMPLIANCE_WRITE(target, DMI_ABSTRACTAUTO, 0x0);
+ COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
if (abstractauto > 0) {
/* This mechanism only works when you have a reasonable sized progbuf, which is not
a true compliance requirement. */
@@ -3761,39 +4688,39 @@ int riscv013_test_compliance(struct target *target)
COMPLIANCE_MUST_PASS(riscv_program_insert(&program, wfi()));
COMPLIANCE_MUST_PASS(riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, 1));
COMPLIANCE_MUST_PASS(riscv_program_ebreak(&program));
- COMPLIANCE_WRITE(target, DMI_ABSTRACTAUTO, 0x0);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
COMPLIANCE_MUST_PASS(riscv_program_exec(&program, target));
testvar++;
- COMPLIANCE_WRITE(target, DMI_ABSTRACTAUTO, 0xFFFFFFFF);
- COMPLIANCE_READ(target, &abstractauto, DMI_ABSTRACTAUTO);
- uint32_t autoexec_data = get_field(abstractauto, DMI_ABSTRACTAUTO_AUTOEXECDATA);
- uint32_t autoexec_progbuf = get_field(abstractauto, DMI_ABSTRACTAUTO_AUTOEXECPROGBUF);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
+ COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
+ uint32_t autoexec_data = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECDATA);
+ uint32_t autoexec_progbuf = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECPROGBUF);
for (unsigned int i = 0; i < 12; i++) {
- COMPLIANCE_READ(target, &testvar_read, DMI_DATA0 + i);
+ COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
do {
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTCS);
- busy = get_field(testvar_read, DMI_ABSTRACTCS_BUSY);
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
+ busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
} while (busy);
if (autoexec_data & (1 << i)) {
- COMPLIANCE_TEST(i < get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT),
+ COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT),
"AUTOEXEC may be writable up to DATACOUNT bits.");
testvar++;
}
}
for (unsigned int i = 0; i < 16; i++) {
- COMPLIANCE_READ(target, &testvar_read, DMI_PROGBUF0 + i);
+ COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
do {
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTCS);
- busy = get_field(testvar_read, DMI_ABSTRACTCS_BUSY);
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
+ busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
} while (busy);
if (autoexec_progbuf & (1 << i)) {
- COMPLIANCE_TEST(i < get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE),
+ COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE),
"AUTOEXEC may be writable up to PROGBUFSIZE bits.");
testvar++;
}
}
- COMPLIANCE_WRITE(target, DMI_ABSTRACTAUTO, 0);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
COMPLIANCE_TEST(ERROR_OK == register_read_direct(target, &value, GDB_REGNO_S0),
"Need to be able to read S0 to test ABSTRACTAUTO");
@@ -3852,20 +4779,20 @@ int riscv013_test_compliance(struct target *target)
*/
/* Write some registers. They should not be impacted by ndmreset. */
- COMPLIANCE_WRITE(target, DMI_COMMAND, 0xFFFFFFFF);
+ COMPLIANCE_WRITE(target, DM_COMMAND, 0xFFFFFFFF);
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
testvar = (i + 1) * 0x11111111;
- COMPLIANCE_WRITE(target, DMI_PROGBUF0 + i, testvar);
+ COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
}
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
testvar = (i + 1) * 0x11111111;
- COMPLIANCE_WRITE(target, DMI_DATA0 + i, testvar);
+ COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
}
- COMPLIANCE_WRITE(target, DMI_ABSTRACTAUTO, 0xFFFFFFFF);
- COMPLIANCE_READ(target, &abstractauto, DMI_ABSTRACTAUTO);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
+ COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
/* Pulse reset. */
target->reset_halt = true;
@@ -3874,25 +4801,25 @@ int riscv013_test_compliance(struct target *target)
COMPLIANCE_TEST(ERROR_OK == deassert_reset(target), "Must be able to deassert NDMRESET");
/* Verify that most stuff is not affected by ndmreset. */
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTCS);
- COMPLIANCE_TEST(get_field(testvar_read, DMI_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
- "NDMRESET should not affect DMI_ABSTRACTCS");
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTAUTO);
- COMPLIANCE_TEST(testvar_read == abstractauto, "NDMRESET should not affect DMI_ABSTRACTAUTO");
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
+ COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
+ "NDMRESET should not affect DM_ABSTRACTCS");
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
+ COMPLIANCE_TEST(testvar_read == abstractauto, "NDMRESET should not affect DM_ABSTRACTAUTO");
/* Clean up to avoid future test failures */
- COMPLIANCE_WRITE(target, DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR);
- COMPLIANCE_WRITE(target, DMI_ABSTRACTAUTO, 0);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
+ COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
testvar = (i + 1) * 0x11111111;
- COMPLIANCE_READ(target, &testvar_read, DMI_PROGBUF0 + i);
+ COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
COMPLIANCE_TEST(testvar_read == testvar, "PROGBUF words must not be affected by NDMRESET");
}
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT); i++) {
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
testvar = (i + 1) * 0x11111111;
- COMPLIANCE_READ(target, &testvar_read, DMI_DATA0 + i);
+ COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
COMPLIANCE_TEST(testvar_read == testvar, "DATA words must not be affected by NDMRESET");
}
@@ -3909,20 +4836,20 @@ int riscv013_test_compliance(struct target *target)
/* DMACTIVE -- deasserting DMACTIVE should reset all the above values. */
/* Toggle dmactive */
- COMPLIANCE_WRITE(target, DMI_DMCONTROL, 0);
- COMPLIANCE_WRITE(target, DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE);
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTCS);
- COMPLIANCE_TEST(get_field(testvar_read, DMI_ABSTRACTCS_CMDERR) == 0, "ABSTRACTCS.cmderr should reset to 0");
- COMPLIANCE_READ(target, &testvar_read, DMI_ABSTRACTAUTO);
+ COMPLIANCE_WRITE(target, DM_DMCONTROL, 0);
+ COMPLIANCE_WRITE(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
+ COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == 0, "ABSTRACTCS.cmderr should reset to 0");
+ COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
COMPLIANCE_TEST(testvar_read == 0, "ABSTRACTAUTO should reset to 0");
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE); i++) {
- COMPLIANCE_READ(target, &testvar_read, DMI_PROGBUF0 + i);
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
+ COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
COMPLIANCE_TEST(testvar_read == 0, "PROGBUF words should reset to 0");
}
- for (unsigned int i = 0; i < get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT); i++) {
- COMPLIANCE_READ(target, &testvar_read, DMI_DATA0 + i);
+ for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
+ COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
COMPLIANCE_TEST(testvar_read == 0, "DATA words should reset to 0");
}
@@ -3936,7 +4863,7 @@ int riscv013_test_compliance(struct target *target)
*/
/* Halt every hart for any follow-up tests*/
- COMPLIANCE_MUST_PASS(riscv_halt_all_harts(target));
+ COMPLIANCE_MUST_PASS(riscv_halt(target));
uint32_t failed_tests = total_tests - passed_tests;
if (total_tests == passed_tests) {
diff --git a/src/target/riscv/riscv.c b/src/target/riscv/riscv.c
index de2f095..4ef969b 100644
--- a/src/target/riscv/riscv.c
+++ b/src/target/riscv/riscv.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#include <assert.h>
#include <stdlib.h>
#include <time.h>
@@ -18,48 +20,6 @@
#include "gdb_regs.h"
#include "rtos/rtos.h"
-/**
- * Since almost everything can be accomplish by scanning the dbus register, all
- * functions here assume dbus is already selected. The exception are functions
- * called directly by OpenOCD, which can't assume anything about what's
- * currently in IR. They should set IR to dbus explicitly.
- */
-
-/**
- * Code structure
- *
- * At the bottom of the stack are the OpenOCD JTAG functions:
- * jtag_add_[id]r_scan
- * jtag_execute_query
- * jtag_add_runtest
- *
- * There are a few functions to just instantly shift a register and get its
- * value:
- * dtmcontrol_scan
- * idcode_scan
- * dbus_scan
- *
- * Because doing one scan and waiting for the result is slow, most functions
- * batch up a bunch of dbus writes and then execute them all at once. They use
- * the scans "class" for this:
- * scans_new
- * scans_delete
- * scans_execute
- * scans_add_...
- * Usually you new(), call a bunch of add functions, then execute() and look
- * at the results by calling scans_get...()
- *
- * Optimized functions will directly use the scans class above, but slightly
- * lazier code will use the cache functions that in turn use the scans
- * functions:
- * cache_get...
- * cache_set...
- * cache_write
- * cache_set... update a local structure, which is then synced to the target
- * with cache_write(). Only Debug RAM words that are actually changed are sent
- * to the target. Afterwards use cache_get... to read results.
- */
-
#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
@@ -108,12 +68,6 @@ typedef enum {
#define DBUS_DATA_SIZE 34
#define DBUS_ADDRESS_START 36
-typedef enum {
- RE_OK,
- RE_FAIL,
- RE_AGAIN
-} riscv_error_t;
-
typedef enum slot {
SLOT0,
SLOT1,
@@ -170,6 +124,71 @@ struct scan_field select_idcode = {
.out_value = ir_idcode
};
+bscan_tunnel_type_t bscan_tunnel_type;
+int bscan_tunnel_ir_width; /* if zero, then tunneling is not present/active */
+
+static uint8_t bscan_zero[4] = {0};
+static uint8_t bscan_one[4] = {1};
+
+uint8_t ir_user4[4] = {0x23};
+struct scan_field select_user4 = {
+ .in_value = NULL,
+ .out_value = ir_user4
+};
+
+
+uint8_t bscan_tunneled_ir_width[4] = {5}; /* overridden by assignment in riscv_init_target */
+struct scan_field _bscan_tunnel_data_register_select_dmi[] = {
+ {
+ .num_bits = 3,
+ .out_value = bscan_zero,
+ .in_value = NULL,
+ },
+ {
+ .num_bits = 5, /* initialized in riscv_init_target to ir width of DM */
+ .out_value = ir_dbus,
+ .in_value = NULL,
+ },
+ {
+ .num_bits = 7,
+ .out_value = bscan_tunneled_ir_width,
+ .in_value = NULL,
+ },
+ {
+ .num_bits = 1,
+ .out_value = bscan_zero,
+ .in_value = NULL,
+ }
+};
+
+struct scan_field _bscan_tunnel_nested_tap_select_dmi[] = {
+ {
+ .num_bits = 1,
+ .out_value = bscan_zero,
+ .in_value = NULL,
+ },
+ {
+ .num_bits = 7,
+ .out_value = bscan_tunneled_ir_width,
+ .in_value = NULL,
+ },
+ {
+ .num_bits = 0, /* initialized in riscv_init_target to ir width of DM */
+ .out_value = ir_dbus,
+ .in_value = NULL,
+ },
+ {
+ .num_bits = 3,
+ .out_value = bscan_zero,
+ .in_value = NULL,
+ }
+};
+struct scan_field *bscan_tunnel_nested_tap_select_dmi = _bscan_tunnel_nested_tap_select_dmi;
+uint32_t bscan_tunnel_nested_tap_select_dmi_num_fields = DIM(_bscan_tunnel_nested_tap_select_dmi);
+
+struct scan_field *bscan_tunnel_data_register_select_dmi = _bscan_tunnel_data_register_select_dmi;
+uint32_t bscan_tunnel_data_register_select_dmi_num_fields = DIM(_bscan_tunnel_data_register_select_dmi);
+
struct trigger {
uint64_t address;
uint32_t length;
@@ -186,6 +205,12 @@ int riscv_command_timeout_sec = DEFAULT_COMMAND_TIMEOUT_SEC;
int riscv_reset_timeout_sec = DEFAULT_RESET_TIMEOUT_SEC;
bool riscv_prefer_sba;
+bool riscv_enable_virt2phys = true;
+bool riscv_ebreakm = true;
+bool riscv_ebreaks = true;
+bool riscv_ebreaku = true;
+
+bool riscv_enable_virtual;
typedef struct {
uint16_t low, high;
@@ -199,12 +224,159 @@ range_t *expose_csr;
/* Same, but for custom registers. */
range_t *expose_custom;
+static enum {
+ RO_NORMAL,
+ RO_REVERSED
+} resume_order;
+
+virt2phys_info_t sv32 = {
+ .name = "Sv32",
+ .va_bits = 32,
+ .level = 2,
+ .pte_shift = 2,
+ .vpn_shift = {12, 22},
+ .vpn_mask = {0x3ff, 0x3ff},
+ .pte_ppn_shift = {10, 20},
+ .pte_ppn_mask = {0x3ff, 0xfff},
+ .pa_ppn_shift = {12, 22},
+ .pa_ppn_mask = {0x3ff, 0xfff},
+};
+
+virt2phys_info_t sv39 = {
+ .name = "Sv39",
+ .va_bits = 39,
+ .level = 3,
+ .pte_shift = 3,
+ .vpn_shift = {12, 21, 30},
+ .vpn_mask = {0x1ff, 0x1ff, 0x1ff},
+ .pte_ppn_shift = {10, 19, 28},
+ .pte_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
+ .pa_ppn_shift = {12, 21, 30},
+ .pa_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
+};
+
+virt2phys_info_t sv48 = {
+ .name = "Sv48",
+ .va_bits = 48,
+ .level = 4,
+ .pte_shift = 3,
+ .vpn_shift = {12, 21, 30, 39},
+ .vpn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ff},
+ .pte_ppn_shift = {10, 19, 28, 37},
+ .pte_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
+ .pa_ppn_shift = {12, 21, 30, 39},
+ .pa_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
+};
+
+static int riscv_resume_go_all_harts(struct target *target);
+
+void select_dmi_via_bscan(struct target *target)
+{
+ jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
+ if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
+ jtag_add_dr_scan(target->tap, bscan_tunnel_data_register_select_dmi_num_fields,
+ bscan_tunnel_data_register_select_dmi, TAP_IDLE);
+ else /* BSCAN_TUNNEL_NESTED_TAP */
+ jtag_add_dr_scan(target->tap, bscan_tunnel_nested_tap_select_dmi_num_fields,
+ bscan_tunnel_nested_tap_select_dmi, TAP_IDLE);
+}
+
+uint32_t dtmcontrol_scan_via_bscan(struct target *target, uint32_t out)
+{
+ /* On BSCAN TAP: Select IR=USER4, issue tunneled IR scan via BSCAN TAP's DR */
+ uint8_t tunneled_ir_width[4] = {bscan_tunnel_ir_width};
+ uint8_t tunneled_dr_width[4] = {32};
+ uint8_t out_value[5] = {0};
+ uint8_t in_value[5] = {0};
+
+ buf_set_u32(out_value, 0, 32, out);
+ struct scan_field tunneled_ir[4] = {};
+ struct scan_field tunneled_dr[4] = {};
+
+ if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
+ tunneled_ir[0].num_bits = 3;
+ tunneled_ir[0].out_value = bscan_zero;
+ tunneled_ir[0].in_value = NULL;
+ tunneled_ir[1].num_bits = bscan_tunnel_ir_width;
+ tunneled_ir[1].out_value = ir_dtmcontrol;
+ tunneled_ir[1].in_value = NULL;
+ tunneled_ir[2].num_bits = 7;
+ tunneled_ir[2].out_value = tunneled_ir_width;
+ tunneled_ir[2].in_value = NULL;
+ tunneled_ir[3].num_bits = 1;
+ tunneled_ir[3].out_value = bscan_zero;
+ tunneled_ir[3].in_value = NULL;
+
+ tunneled_dr[0].num_bits = 3;
+ tunneled_dr[0].out_value = bscan_zero;
+ tunneled_dr[0].in_value = NULL;
+ tunneled_dr[1].num_bits = 32 + 1;
+ tunneled_dr[1].out_value = out_value;
+ tunneled_dr[1].in_value = in_value;
+ tunneled_dr[2].num_bits = 7;
+ tunneled_dr[2].out_value = tunneled_dr_width;
+ tunneled_dr[2].in_value = NULL;
+ tunneled_dr[3].num_bits = 1;
+ tunneled_dr[3].out_value = bscan_one;
+ tunneled_dr[3].in_value = NULL;
+ } else {
+ /* BSCAN_TUNNEL_NESTED_TAP */
+ tunneled_ir[3].num_bits = 3;
+ tunneled_ir[3].out_value = bscan_zero;
+ tunneled_ir[3].in_value = NULL;
+ tunneled_ir[2].num_bits = bscan_tunnel_ir_width;
+ tunneled_ir[2].out_value = ir_dtmcontrol;
+ tunneled_ir[1].in_value = NULL;
+ tunneled_ir[1].num_bits = 7;
+ tunneled_ir[1].out_value = tunneled_ir_width;
+ tunneled_ir[2].in_value = NULL;
+ tunneled_ir[0].num_bits = 1;
+ tunneled_ir[0].out_value = bscan_zero;
+ tunneled_ir[0].in_value = NULL;
+
+ tunneled_dr[3].num_bits = 3;
+ tunneled_dr[3].out_value = bscan_zero;
+ tunneled_dr[3].in_value = NULL;
+ tunneled_dr[2].num_bits = 32 + 1;
+ tunneled_dr[2].out_value = out_value;
+ tunneled_dr[2].in_value = in_value;
+ tunneled_dr[1].num_bits = 7;
+ tunneled_dr[1].out_value = tunneled_dr_width;
+ tunneled_dr[1].in_value = NULL;
+ tunneled_dr[0].num_bits = 1;
+ tunneled_dr[0].out_value = bscan_one;
+ tunneled_dr[0].in_value = NULL;
+ }
+ jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
+ jtag_add_dr_scan(target->tap, DIM(tunneled_ir), tunneled_ir, TAP_IDLE);
+ jtag_add_dr_scan(target->tap, DIM(tunneled_dr), tunneled_dr, TAP_IDLE);
+ select_dmi_via_bscan(target);
+
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("failed jtag scan: %d", retval);
+ return retval;
+ }
+ /* Note the starting offset is bit 1, not bit 0. In BSCAN tunnel, there is a one-bit TCK skew between
+ output and input */
+ uint32_t in = buf_get_u32(in_value, 1, 32);
+ LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
+
+ return in;
+}
+
+
+
static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
{
struct scan_field field;
uint8_t in_value[4];
uint8_t out_value[4] = { 0 };
+ if (bscan_tunnel_ir_width != 0)
+ return dtmcontrol_scan_via_bscan(target, out);
+
+
buf_set_u32(out_value, 0, 32, out);
jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
@@ -264,6 +436,15 @@ static int riscv_init_target(struct command_context *cmd_ctx,
select_dbus.num_bits = target->tap->ir_length;
select_idcode.num_bits = target->tap->ir_length;
+ if (bscan_tunnel_ir_width != 0) {
+ select_user4.num_bits = target->tap->ir_length;
+ bscan_tunneled_ir_width[0] = bscan_tunnel_ir_width;
+ if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
+ bscan_tunnel_data_register_select_dmi[1].num_bits = bscan_tunnel_ir_width;
+ else /* BSCAN_TUNNEL_NESTED_TAP */
+ bscan_tunnel_nested_tap_select_dmi[2].num_bits = bscan_tunnel_ir_width;
+ }
+
riscv_semihosting_init(target);
target->debug_reason = DBG_REASON_DBGRQ;
@@ -302,12 +483,6 @@ static void riscv_deinit_target(struct target *target)
target->arch_info = NULL;
}
-static int oldriscv_halt(struct target *target)
-{
- struct target_type *tt = get_target_type(target);
- return tt->halt(target);
-}
-
static void trigger_from_breakpoint(struct trigger *trigger,
const struct breakpoint *breakpoint)
{
@@ -691,6 +866,8 @@ int riscv_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoi
{
struct watchpoint *wp = target->watchpoints;
+ if (riscv_rtos_enabled(target))
+ riscv_set_current_hartid(target, target->rtos->current_thread - 1);
LOG_DEBUG("Current hartid = %d", riscv_current_hartid(target));
/*TODO instead of disassembling the instruction that we think caused the
@@ -825,13 +1002,126 @@ static int old_or_new_riscv_poll(struct target *target)
return riscv_openocd_poll(target);
}
-static int old_or_new_riscv_halt(struct target *target)
+int halt_prep(struct target *target)
{
RISCV_INFO(r);
- if (r->is_halted == NULL)
- return oldriscv_halt(target);
- else
- return riscv_openocd_halt(target);
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ if (!riscv_hart_enabled(target, i))
+ continue;
+
+ LOG_DEBUG("[%s] prep hart, debug_reason=%d", target_name(target),
+ target->debug_reason);
+ if (riscv_set_current_hartid(target, i) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_is_halted(target)) {
+ LOG_DEBUG("Hart %d is already halted (reason=%d).", i,
+ target->debug_reason);
+ } else {
+ if (r->halt_prep(target) != ERROR_OK)
+ return ERROR_FAIL;
+ r->prepped = true;
+ }
+ }
+ return ERROR_OK;
+}
+
+int riscv_halt_go_all_harts(struct target *target)
+{
+ RISCV_INFO(r);
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ if (!riscv_hart_enabled(target, i))
+ continue;
+
+ if (riscv_set_current_hartid(target, i) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_is_halted(target)) {
+ LOG_DEBUG("Hart %d is already halted.", i);
+ } else {
+ if (r->halt_go(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+
+ riscv_invalidate_register_cache(target);
+
+ return ERROR_OK;
+}
+
+int halt_go(struct target *target)
+{
+ riscv_info_t *r = riscv_info(target);
+ int result;
+ if (r->is_halted == NULL) {
+ struct target_type *tt = get_target_type(target);
+ result = tt->halt(target);
+ } else {
+ result = riscv_halt_go_all_harts(target);
+ }
+ target->state = TARGET_HALTED;
+ if (target->debug_reason == DBG_REASON_NOTHALTED)
+ target->debug_reason = DBG_REASON_DBGRQ;
+
+ return result;
+}
+
+static int halt_finish(struct target *target)
+{
+ return target_call_event_callbacks(target, TARGET_EVENT_HALTED);
+}
+
+int riscv_halt(struct target *target)
+{
+ RISCV_INFO(r);
+
+ if (r->is_halted == NULL) {
+ struct target_type *tt = get_target_type(target);
+ return tt->halt(target);
+ }
+
+ LOG_DEBUG("[%d] halting all harts", target->coreid);
+
+ int result = ERROR_OK;
+ if (target->smp) {
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (halt_prep(t) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ riscv_info_t *i = riscv_info(t);
+ if (i->prepped) {
+ if (halt_go(t) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (halt_finish(t) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (halt_prep(target) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (halt_go(target) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (halt_finish(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (riscv_rtos_enabled(target)) {
+ if (r->rtos_hartid != -1) {
+ LOG_DEBUG("halt requested on RTOS hartid %d", r->rtos_hartid);
+ target->rtos->current_threadid = r->rtos_hartid + 1;
+ target->rtos->current_thread = r->rtos_hartid + 1;
+ } else
+ LOG_DEBUG("halt requested, but no known RTOS hartid");
+ }
+
+ return result;
}
static int riscv_assert_reset(struct target *target)
@@ -849,44 +1139,243 @@ static int riscv_deassert_reset(struct target *target)
return tt->deassert_reset(target);
}
+int riscv_resume_prep_all_harts(struct target *target)
+{
+ RISCV_INFO(r);
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ if (!riscv_hart_enabled(target, i))
+ continue;
-static int oldriscv_resume(struct target *target, int current, uint32_t address,
- int handle_breakpoints, int debug_execution)
+ LOG_DEBUG("prep hart %d", i);
+ if (riscv_set_current_hartid(target, i) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_is_halted(target)) {
+ if (r->resume_prep(target) != ERROR_OK)
+ return ERROR_FAIL;
+ } else {
+ LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
+ }
+ }
+
+ LOG_DEBUG("[%d] mark as prepped", target->coreid);
+ r->prepped = true;
+
+ return ERROR_OK;
+}
+
+/* state must be riscv_reg_t state[RISCV_MAX_HWBPS] = {0}; */
+static int disable_triggers(struct target *target, riscv_reg_t *state)
{
- struct target_type *tt = get_target_type(target);
- return tt->resume(target, current, address, handle_breakpoints,
- debug_execution);
+ RISCV_INFO(r);
+
+ LOG_DEBUG("deal with triggers");
+
+ if (riscv_enumerate_triggers(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ int hartid = riscv_current_hartid(target);
+ if (r->manual_hwbp_set) {
+ /* Look at every trigger that may have been set. */
+ riscv_reg_t tselect;
+ if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
+ return ERROR_FAIL;
+ for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
+ return ERROR_FAIL;
+ riscv_reg_t tdata1;
+ if (riscv_get_register(target, &tdata1, GDB_REGNO_TDATA1) != ERROR_OK)
+ return ERROR_FAIL;
+ if (tdata1 & MCONTROL_DMODE(riscv_xlen(target))) {
+ state[t] = tdata1;
+ if (riscv_set_register(target, GDB_REGNO_TDATA1, 0) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
+ return ERROR_FAIL;
+
+ } else {
+ /* Just go through the triggers we manage. */
+ struct watchpoint *watchpoint = target->watchpoints;
+ int i = 0;
+ while (watchpoint) {
+ LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
+ state[i] = watchpoint->set;
+ if (watchpoint->set) {
+ if (riscv_remove_watchpoint(target, watchpoint) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ watchpoint = watchpoint->next;
+ i++;
+ }
+ }
+
+ return ERROR_OK;
}
-static int old_or_new_riscv_resume(struct target *target, int current,
+static int enable_triggers(struct target *target, riscv_reg_t *state)
+{
+ RISCV_INFO(r);
+
+ int hartid = riscv_current_hartid(target);
+
+ if (r->manual_hwbp_set) {
+ /* Look at every trigger that may have been set. */
+ riscv_reg_t tselect;
+ if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
+ return ERROR_FAIL;
+ for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
+ if (state[t] != 0) {
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_set_register(target, GDB_REGNO_TDATA1, state[t]) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
+ return ERROR_FAIL;
+
+ } else {
+ struct watchpoint *watchpoint = target->watchpoints;
+ int i = 0;
+ while (watchpoint) {
+ LOG_DEBUG("watchpoint %d: cleared=%" PRId64, i, state[i]);
+ if (state[i]) {
+ if (riscv_add_watchpoint(target, watchpoint) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ watchpoint = watchpoint->next;
+ i++;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+/**
+ * Get everything ready to resume.
+ */
+static int resume_prep(struct target *target, int current,
target_addr_t address, int handle_breakpoints, int debug_execution)
{
+ RISCV_INFO(r);
+ LOG_DEBUG("[%d]", target->coreid);
+
+ if (!current)
+ riscv_set_register(target, GDB_REGNO_PC, address);
+
+ if (target->debug_reason == DBG_REASON_WATCHPOINT) {
+ /* To be able to run off a trigger, disable all the triggers, step, and
+ * then resume as usual. */
+ riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
+
+ if (disable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (old_or_new_riscv_step(target, true, 0, false) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (enable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (r->is_halted) {
+ if (riscv_resume_prep_all_harts(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ LOG_DEBUG("[%d] mark as prepped", target->coreid);
+ r->prepped = true;
+
+ return ERROR_OK;
+}
+
+/**
+ * Resume all the harts that have been prepped, as close to instantaneous as
+ * possible.
+ */
+static int resume_go(struct target *target, int current,
+ target_addr_t address, int handle_breakpoints, int debug_execution)
+{
+ riscv_info_t *r = riscv_info(target);
+ int result;
+ if (r->is_halted == NULL) {
+ struct target_type *tt = get_target_type(target);
+ result = tt->resume(target, current, address, handle_breakpoints,
+ debug_execution);
+ } else {
+ result = riscv_resume_go_all_harts(target);
+ }
+
+ return result;
+}
+
+static int resume_finish(struct target *target)
+{
+ register_cache_invalidate(target->reg_cache);
+
+ target->state = TARGET_RUNNING;
+ target->debug_reason = DBG_REASON_NOTHALTED;
+ return target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
+}
+
+/**
+ * @par single_hart When true, only resume a single hart even if SMP is
+ * configured. This is used to run algorithms on just one hart.
+ */
+int riscv_resume(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints,
+ int debug_execution,
+ bool single_hart)
+{
LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
- if (target->smp) {
- struct target_list *targets = target->head;
- int result = ERROR_OK;
- while (targets) {
- struct target *t = targets->target;
- riscv_info_t *r = riscv_info(t);
- if (r->is_halted == NULL) {
- if (oldriscv_resume(t, current, address, handle_breakpoints,
+ int result = ERROR_OK;
+ if (target->smp && !single_hart) {
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (resume_prep(t, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ riscv_info_t *i = riscv_info(t);
+ if (i->prepped) {
+ if (resume_go(t, current, address, handle_breakpoints,
debug_execution) != ERROR_OK)
result = ERROR_FAIL;
- } else {
- if (riscv_openocd_resume(t, current, address,
- handle_breakpoints, debug_execution) != ERROR_OK)
- result = ERROR_FAIL;
}
- targets = targets->next;
}
- return result;
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (resume_finish(t) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (resume_prep(target, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (resume_go(target, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (resume_finish(target) != ERROR_OK)
+ return ERROR_FAIL;
}
- RISCV_INFO(r);
- if (r->is_halted == NULL)
- return oldriscv_resume(target, current, address, handle_breakpoints, debug_execution);
- else
- return riscv_openocd_resume(target, current, address, handle_breakpoints, debug_execution);
+ return result;
+}
+
+static int riscv_target_resume(struct target *target, int current, target_addr_t address,
+ int handle_breakpoints, int debug_execution)
+{
+ return riscv_resume(target, current, address, handle_breakpoints,
+ debug_execution, false);
}
static int riscv_select_current_hart(struct target *target)
@@ -900,20 +1389,229 @@ static int riscv_select_current_hart(struct target *target)
return riscv_set_current_hartid(target, target->coreid);
}
+static int riscv_mmu(struct target *target, int *enabled)
+{
+ if (!riscv_enable_virt2phys) {
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ if (riscv_rtos_enabled(target))
+ riscv_set_current_hartid(target, target->rtos->current_thread - 1);
+
+ /* Don't use MMU in explicit or effective M (machine) mode */
+ riscv_reg_t priv;
+ if (riscv_get_register(target, &priv, GDB_REGNO_PRIV) != ERROR_OK) {
+ LOG_ERROR("Failed to read priv register.");
+ return ERROR_FAIL;
+ }
+
+ riscv_reg_t mstatus;
+ if (riscv_get_register(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) {
+ LOG_ERROR("Failed to read mstatus register.");
+ return ERROR_FAIL;
+ }
+
+ if ((get_field(mstatus, MSTATUS_MPRV) ? get_field(mstatus, MSTATUS_MPP) : priv) == PRV_M) {
+ LOG_DEBUG("SATP/MMU ignored in Machine mode (mstatus=0x%" PRIx64 ").", mstatus);
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ riscv_reg_t satp;
+ if (riscv_get_register(target, &satp, GDB_REGNO_SATP) != ERROR_OK) {
+ LOG_DEBUG("Couldn't read SATP.");
+ /* If we can't read SATP, then there must not be an MMU. */
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ if (get_field(satp, RISCV_SATP_MODE(riscv_xlen(target))) == SATP_MODE_OFF) {
+ LOG_DEBUG("MMU is disabled.");
+ *enabled = 0;
+ } else {
+ LOG_DEBUG("MMU is enabled.");
+ *enabled = 1;
+ }
+
+ return ERROR_OK;
+}
+
+static int riscv_address_translate(struct target *target,
+ target_addr_t virtual, target_addr_t *physical)
+{
+ RISCV_INFO(r);
+ riscv_reg_t satp_value;
+ int mode;
+ uint64_t ppn_value;
+ target_addr_t table_address;
+ virt2phys_info_t *info;
+ uint64_t pte;
+ int i;
+
+ if (riscv_rtos_enabled(target))
+ riscv_set_current_hartid(target, target->rtos->current_thread - 1);
+
+ int result = riscv_get_register(target, &satp_value, GDB_REGNO_SATP);
+ if (result != ERROR_OK)
+ return result;
+
+ unsigned xlen = riscv_xlen(target);
+ mode = get_field(satp_value, RISCV_SATP_MODE(xlen));
+ switch (mode) {
+ case SATP_MODE_SV32:
+ info = &sv32;
+ break;
+ case SATP_MODE_SV39:
+ info = &sv39;
+ break;
+ case SATP_MODE_SV48:
+ info = &sv48;
+ break;
+ case SATP_MODE_OFF:
+ LOG_ERROR("No translation or protection." \
+ " (satp: 0x%" PRIx64 ")", satp_value);
+ return ERROR_FAIL;
+ default:
+ LOG_ERROR("The translation mode is not supported." \
+ " (satp: 0x%" PRIx64 ")", satp_value);
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("virtual=0x%" TARGET_PRIxADDR "; mode=%s", virtual, info->name);
+
+ /* verify bits xlen-1:va_bits-1 are all equal */
+ target_addr_t mask = ((target_addr_t)1 << (xlen - (info->va_bits - 1))) - 1;
+ target_addr_t masked_msbs = (virtual >> (info->va_bits - 1)) & mask;
+ if (masked_msbs != 0 && masked_msbs != mask) {
+ LOG_ERROR("Virtual address 0x%" TARGET_PRIxADDR " is not sign-extended "
+ "for %s mode.", virtual, info->name);
+ return ERROR_FAIL;
+ }
+
+ ppn_value = get_field(satp_value, RISCV_SATP_PPN(xlen));
+ table_address = ppn_value << RISCV_PGSHIFT;
+ i = info->level - 1;
+ while (i >= 0) {
+ uint64_t vpn = virtual >> info->vpn_shift[i];
+ vpn &= info->vpn_mask[i];
+ target_addr_t pte_address = table_address +
+ (vpn << info->pte_shift);
+ uint8_t buffer[8];
+ assert(info->pte_shift <= 3);
+ int retval = r->read_memory(target, pte_address,
+ 4, (1 << info->pte_shift) / 4, buffer, 4);
+ if (retval != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (info->pte_shift == 2)
+ pte = buf_get_u32(buffer, 0, 32);
+ else
+ pte = buf_get_u64(buffer, 0, 64);
+
+ LOG_DEBUG("i=%d; PTE @0x%" TARGET_PRIxADDR " = 0x%" PRIx64, i,
+ pte_address, pte);
+
+ if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W)))
+ return ERROR_FAIL;
+
+ if ((pte & PTE_R) || (pte & PTE_X)) /* Found leaf PTE. */
+ break;
+
+ i--;
+ if (i < 0)
+ break;
+ ppn_value = pte >> PTE_PPN_SHIFT;
+ table_address = ppn_value << RISCV_PGSHIFT;
+ }
+
+ if (i < 0) {
+ LOG_ERROR("Couldn't find the PTE.");
+ return ERROR_FAIL;
+ }
+
+ /* Make sure to clear out the high bits that may be set. */
+ *physical = virtual & (((target_addr_t)1 << info->va_bits) - 1);
+
+ while (i < info->level) {
+ ppn_value = pte >> info->pte_ppn_shift[i];
+ ppn_value &= info->pte_ppn_mask[i];
+ *physical &= ~(((target_addr_t)info->pa_ppn_mask[i]) <<
+ info->pa_ppn_shift[i]);
+ *physical |= (ppn_value << info->pa_ppn_shift[i]);
+ i++;
+ }
+ LOG_DEBUG("0x%" TARGET_PRIxADDR " -> 0x%" TARGET_PRIxADDR, virtual,
+ *physical);
+
+ return ERROR_OK;
+}
+
+static int riscv_virt2phys(struct target *target, target_addr_t virtual, target_addr_t *physical)
+{
+ int enabled;
+ if (riscv_mmu(target, &enabled) == ERROR_OK) {
+ if (!enabled)
+ return ERROR_FAIL;
+
+ if (riscv_address_translate(target, virtual, physical) == ERROR_OK)
+ return ERROR_OK;
+ }
+
+ return ERROR_FAIL;
+}
+
+static int riscv_read_phys_memory(struct target *target, target_addr_t phys_address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ RISCV_INFO(r);
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ return r->read_memory(target, phys_address, size, count, buffer, size);
+}
+
static int riscv_read_memory(struct target *target, target_addr_t address,
uint32_t size, uint32_t count, uint8_t *buffer)
{
+ if (count == 0) {
+ LOG_WARNING("0-length read from 0x%" TARGET_PRIxADDR, address);
+ return ERROR_OK;
+ }
+
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ target_addr_t physical_addr;
+ if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
+ address = physical_addr;
+
+ RISCV_INFO(r);
+ return r->read_memory(target, address, size, count, buffer, size);
+}
+
+static int riscv_write_phys_memory(struct target *target, target_addr_t phys_address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
if (riscv_select_current_hart(target) != ERROR_OK)
return ERROR_FAIL;
struct target_type *tt = get_target_type(target);
- return tt->read_memory(target, address, size, count, buffer);
+ return tt->write_memory(target, phys_address, size, count, buffer);
}
static int riscv_write_memory(struct target *target, target_addr_t address,
uint32_t size, uint32_t count, const uint8_t *buffer)
{
+ if (count == 0) {
+ LOG_WARNING("0-length write to 0x%" TARGET_PRIxADDR, address);
+ return ERROR_OK;
+ }
+
if (riscv_select_current_hart(target) != ERROR_OK)
return ERROR_FAIL;
+
+ target_addr_t physical_addr;
+ if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
+ address = physical_addr;
+
struct target_type *tt = get_target_type(target);
return tt->write_memory(target, address, size, count, buffer);
}
@@ -954,20 +1652,26 @@ static int riscv_get_gdb_reg_list_internal(struct target *target,
assert(!target->reg_cache->reg_list[i].valid ||
target->reg_cache->reg_list[i].size > 0);
(*reg_list)[i] = &target->reg_cache->reg_list[i];
- if (read && !target->reg_cache->reg_list[i].valid) {
+ if (read &&
+ target->reg_cache->reg_list[i].exist &&
+ !target->reg_cache->reg_list[i].valid) {
if (target->reg_cache->reg_list[i].type->get(
&target->reg_cache->reg_list[i]) != ERROR_OK)
- /* This function is called when first connecting to gdb,
- * resulting in an attempt to read all kinds of registers which
- * probably will fail. Ignore these failures, and when
- * encountered stop reading to save time. */
- read = false;
+ return ERROR_FAIL;
}
}
return ERROR_OK;
}
+static int riscv_get_gdb_reg_list_noread(struct target *target,
+ struct reg **reg_list[], int *reg_list_size,
+ enum target_register_class reg_class)
+{
+ return riscv_get_gdb_reg_list_internal(target, reg_list, reg_list_size,
+ reg_class, false);
+}
+
static int riscv_get_gdb_reg_list(struct target *target,
struct reg **reg_list[], int *reg_list_size,
enum target_register_class reg_class)
@@ -989,6 +1693,7 @@ static int riscv_run_algorithm(struct target *target, int num_mem_params,
target_addr_t exit_point, int timeout_ms, void *arch_info)
{
riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ int hartid = riscv_current_hartid(target);
if (num_mem_params > 0) {
LOG_ERROR("Memory parameters are not supported for RISC-V algorithms.");
@@ -1005,12 +1710,10 @@ static int riscv_run_algorithm(struct target *target, int num_mem_params,
if (!reg_pc || reg_pc->type->get(reg_pc) != ERROR_OK)
return ERROR_FAIL;
uint64_t saved_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
+ LOG_DEBUG("saved_pc=0x%" PRIx64, saved_pc);
uint64_t saved_regs[32];
for (int i = 0; i < num_reg_params; i++) {
- if (reg_params[i].direction == PARAM_IN)
- continue;
-
LOG_DEBUG("save %s", reg_params[i].reg_name);
struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
if (!r) {
@@ -1032,8 +1735,11 @@ static int riscv_run_algorithm(struct target *target, int num_mem_params,
if (r->type->get(r) != ERROR_OK)
return ERROR_FAIL;
saved_regs[r->number] = buf_get_u64(r->value, 0, r->size);
- if (r->type->set(r, reg_params[i].value) != ERROR_OK)
- return ERROR_FAIL;
+
+ if (reg_params[i].direction == PARAM_OUT || reg_params[i].direction == PARAM_IN_OUT) {
+ if (r->type->set(r, reg_params[i].value) != ERROR_OK)
+ return ERROR_FAIL;
+ }
}
@@ -1059,7 +1765,7 @@ static int riscv_run_algorithm(struct target *target, int num_mem_params,
/* Run algorithm */
LOG_DEBUG("resume at 0x%" TARGET_PRIxADDR, entry_point);
- if (oldriscv_resume(target, 0, entry_point, 0, 0) != ERROR_OK)
+ if (riscv_resume(target, 0, entry_point, 0, 0, true) != ERROR_OK)
return ERROR_FAIL;
int64_t start = timeval_ms();
@@ -1067,11 +1773,28 @@ static int riscv_run_algorithm(struct target *target, int num_mem_params,
LOG_DEBUG("poll()");
int64_t now = timeval_ms();
if (now - start > timeout_ms) {
- LOG_ERROR("Algorithm timed out after %d ms.", timeout_ms);
- LOG_ERROR(" now = 0x%08x", (uint32_t) now);
- LOG_ERROR(" start = 0x%08x", (uint32_t) start);
- oldriscv_halt(target);
+ LOG_ERROR("Algorithm timed out after %" PRId64 " ms.", now - start);
+ riscv_halt(target);
old_or_new_riscv_poll(target);
+ enum gdb_regno regnums[] = {
+ GDB_REGNO_RA, GDB_REGNO_SP, GDB_REGNO_GP, GDB_REGNO_TP,
+ GDB_REGNO_T0, GDB_REGNO_T1, GDB_REGNO_T2, GDB_REGNO_FP,
+ GDB_REGNO_S1, GDB_REGNO_A0, GDB_REGNO_A1, GDB_REGNO_A2,
+ GDB_REGNO_A3, GDB_REGNO_A4, GDB_REGNO_A5, GDB_REGNO_A6,
+ GDB_REGNO_A7, GDB_REGNO_S2, GDB_REGNO_S3, GDB_REGNO_S4,
+ GDB_REGNO_S5, GDB_REGNO_S6, GDB_REGNO_S7, GDB_REGNO_S8,
+ GDB_REGNO_S9, GDB_REGNO_S10, GDB_REGNO_S11, GDB_REGNO_T3,
+ GDB_REGNO_T4, GDB_REGNO_T5, GDB_REGNO_T6,
+ GDB_REGNO_PC,
+ GDB_REGNO_MSTATUS, GDB_REGNO_MEPC, GDB_REGNO_MCAUSE,
+ };
+ for (unsigned i = 0; i < DIM(regnums); i++) {
+ enum gdb_regno regno = regnums[i];
+ riscv_reg_t reg_value;
+ if (riscv_get_register(target, &reg_value, regno) != ERROR_OK)
+ break;
+ LOG_ERROR("%s = 0x%" PRIx64, gdb_regno_name(regno), reg_value);
+ }
return ERROR_TARGET_TIMEOUT;
}
@@ -1080,10 +1803,14 @@ static int riscv_run_algorithm(struct target *target, int num_mem_params,
return result;
}
+ /* The current hart id might have been changed in poll(). */
+ if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
+ return ERROR_FAIL;
+
if (reg_pc->type->get(reg_pc) != ERROR_OK)
return ERROR_FAIL;
uint64_t final_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
- if (final_pc != exit_point) {
+ if (exit_point && final_pc != exit_point) {
LOG_ERROR("PC ended up at 0x%" PRIx64 " instead of 0x%"
TARGET_PRIxADDR, final_pc, exit_point);
return ERROR_FAIL;
@@ -1101,26 +1828,32 @@ static int riscv_run_algorithm(struct target *target, int num_mem_params,
return ERROR_FAIL;
for (int i = 0; i < num_reg_params; i++) {
+ if (reg_params[i].direction == PARAM_IN ||
+ reg_params[i].direction == PARAM_IN_OUT) {
+ struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
+ if (r->type->get(r) != ERROR_OK) {
+ LOG_ERROR("get(%s) failed", r->name);
+ return ERROR_FAIL;
+ }
+ buf_cpy(r->value, reg_params[i].value, reg_params[i].size);
+ }
LOG_DEBUG("restore %s", reg_params[i].reg_name);
struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
buf_set_u64(buf, 0, info->xlen[0], saved_regs[r->number]);
- if (r->type->set(r, buf) != ERROR_OK)
+ if (r->type->set(r, buf) != ERROR_OK) {
+ LOG_ERROR("set(%s) failed", r->name);
return ERROR_FAIL;
+ }
}
return ERROR_OK;
}
-/* Should run code on the target to perform CRC of
-memory. Not yet implemented.
-*/
-
static int riscv_checksum_memory(struct target *target,
target_addr_t address, uint32_t count,
uint32_t *checksum)
{
- *checksum = 0xFFFFFFFF;
- return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ return ERROR_FAIL;
}
/*** OpenOCD Helper Functions ***/
@@ -1149,15 +1882,16 @@ static enum riscv_poll_hart riscv_poll_hart(struct target *target, int hartid)
} else if (target->state != TARGET_RUNNING && !halted) {
LOG_DEBUG(" triggered running");
target->state = TARGET_RUNNING;
+ target->debug_reason = DBG_REASON_NOTHALTED;
return RPH_DISCOVERED_RUNNING;
}
return RPH_NO_CHANGE;
}
-int set_debug_reason(struct target *target, int hartid)
+int set_debug_reason(struct target *target, enum riscv_halt_reason halt_reason)
{
- switch (riscv_halt_reason(target, hartid)) {
+ switch (halt_reason) {
case RISCV_HALT_BREAKPOINT:
target->debug_reason = DBG_REASON_BREAKPOINT;
break;
@@ -1165,6 +1899,7 @@ int set_debug_reason(struct target *target, int hartid)
target->debug_reason = DBG_REASON_WATCHPOINT;
break;
case RISCV_HALT_INTERRUPT:
+ case RISCV_HALT_GROUP:
target->debug_reason = DBG_REASON_DBGRQ;
break;
case RISCV_HALT_SINGLESTEP:
@@ -1176,6 +1911,7 @@ int set_debug_reason(struct target *target, int hartid)
case RISCV_HALT_ERROR:
return ERROR_FAIL;
}
+ LOG_DEBUG("[%s] debug_reason=%d", target_name(target), target->debug_reason);
return ERROR_OK;
}
@@ -1205,70 +1941,92 @@ int riscv_openocd_poll(struct target *target)
}
LOG_DEBUG(" hart %d halted", halted_hart);
- /* If we're here then at least one hart triggered. That means
- * we want to go and halt _every_ hart in the system, as that's
- * the invariant we hold here. Some harts might have already
- * halted (as we're either in single-step mode or they also
- * triggered a breakpoint), so don't attempt to halt those
- * harts. */
- for (int i = 0; i < riscv_count_harts(target); ++i)
- riscv_halt_one_hart(target, i);
+ target->state = TARGET_HALTED;
+ enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
+ if (set_debug_reason(target, halt_reason) != ERROR_OK)
+ return ERROR_FAIL;
+
+ target->rtos->current_threadid = halted_hart + 1;
+ target->rtos->current_thread = halted_hart + 1;
+ riscv_set_rtos_hartid(target, halted_hart);
+
+ /* If we're here then at least one hart triggered. That means we want
+ * to go and halt _every_ hart (configured with -rtos riscv) in the
+ * system, as that's the invariant we hold here. Some harts might have
+ * already halted (as we're either in single-step mode or they also
+ * triggered a breakpoint), so don't attempt to halt those harts.
+ * riscv_halt() will do all that for us. */
+ riscv_halt(target);
} else if (target->smp) {
- bool halt_discovered = false;
- bool newly_halted[128] = {0};
+ unsigned halts_discovered = 0;
+ unsigned total_targets = 0;
+ bool newly_halted[RISCV_MAX_HARTS] = {0};
+ unsigned should_remain_halted = 0;
+ unsigned should_resume = 0;
unsigned i = 0;
for (struct target_list *list = target->head; list != NULL;
list = list->next, i++) {
+ total_targets++;
struct target *t = list->target;
riscv_info_t *r = riscv_info(t);
assert(i < DIM(newly_halted));
enum riscv_poll_hart out = riscv_poll_hart(t, r->current_hartid);
switch (out) {
- case RPH_NO_CHANGE:
- break;
- case RPH_DISCOVERED_RUNNING:
- t->state = TARGET_RUNNING;
- break;
- case RPH_DISCOVERED_HALTED:
- halt_discovered = true;
- newly_halted[i] = true;
- t->state = TARGET_HALTED;
- if (set_debug_reason(t, r->current_hartid) != ERROR_OK)
- return ERROR_FAIL;
- break;
- case RPH_ERROR:
+ case RPH_NO_CHANGE:
+ break;
+ case RPH_DISCOVERED_RUNNING:
+ t->state = TARGET_RUNNING;
+ t->debug_reason = DBG_REASON_NOTHALTED;
+ break;
+ case RPH_DISCOVERED_HALTED:
+ halts_discovered++;
+ newly_halted[i] = true;
+ t->state = TARGET_HALTED;
+ enum riscv_halt_reason halt_reason =
+ riscv_halt_reason(t, r->current_hartid);
+ if (set_debug_reason(t, halt_reason) != ERROR_OK)
return ERROR_FAIL;
- }
- }
- if (halt_discovered) {
- LOG_DEBUG("Halt other targets in this SMP group.");
- i = 0;
- for (struct target_list *list = target->head; list != NULL;
- list = list->next, i++) {
- struct target *t = list->target;
- riscv_info_t *r = riscv_info(t);
- if (t->state != TARGET_HALTED) {
- if (riscv_halt_one_hart(t, r->current_hartid) != ERROR_OK)
- return ERROR_FAIL;
- t->state = TARGET_HALTED;
- if (set_debug_reason(t, r->current_hartid) != ERROR_OK)
- return ERROR_FAIL;
- newly_halted[i] = true;
+ if (halt_reason == RISCV_HALT_BREAKPOINT) {
+ int retval;
+ switch (riscv_semihosting(t, &retval)) {
+ case SEMI_NONE:
+ case SEMI_WAITING:
+ /* This hart should remain halted. */
+ should_remain_halted++;
+ break;
+ case SEMI_HANDLED:
+ /* This hart should be resumed, along with any other
+ * harts that halted due to haltgroups. */
+ should_resume++;
+ break;
+ case SEMI_ERROR:
+ return retval;
+ }
+ } else if (halt_reason != RISCV_HALT_GROUP) {
+ should_remain_halted++;
}
- }
+ break;
- /* Now that we have all our ducks in a row, tell the higher layers
- * what just happened. */
- i = 0;
- for (struct target_list *list = target->head; list != NULL;
- list = list->next, i++) {
- struct target *t = list->target;
- if (newly_halted[i])
- target_call_event_callbacks(t, TARGET_EVENT_HALTED);
+ case RPH_ERROR:
+ return ERROR_FAIL;
}
}
+
+ LOG_DEBUG("should_remain_halted=%d, should_resume=%d",
+ should_remain_halted, should_resume);
+ if (should_remain_halted && should_resume) {
+ LOG_WARNING("%d harts should remain halted, and %d should resume.",
+ should_remain_halted, should_resume);
+ }
+ if (should_remain_halted) {
+ LOG_DEBUG("halt all");
+ riscv_halt(target);
+ } else if (should_resume) {
+ LOG_DEBUG("resume all");
+ riscv_resume(target, true, 0, 0, 0, false);
+ }
return ERROR_OK;
} else {
@@ -1281,128 +2039,32 @@ int riscv_openocd_poll(struct target *target)
halted_hart = riscv_current_hartid(target);
LOG_DEBUG(" hart %d halted", halted_hart);
- }
- target->state = TARGET_HALTED;
- if (set_debug_reason(target, halted_hart) != ERROR_OK)
- return ERROR_FAIL;
-
- if (riscv_rtos_enabled(target)) {
- target->rtos->current_threadid = halted_hart + 1;
- target->rtos->current_thread = halted_hart + 1;
- riscv_set_rtos_hartid(target, halted_hart);
+ enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
+ if (set_debug_reason(target, halt_reason) != ERROR_OK)
+ return ERROR_FAIL;
+ target->state = TARGET_HALTED;
}
- target->state = TARGET_HALTED;
-
if (target->debug_reason == DBG_REASON_BREAKPOINT) {
int retval;
- if (riscv_semihosting(target, &retval) != 0)
- return retval;
- }
-
- target_call_event_callbacks(target, TARGET_EVENT_HALTED);
- return ERROR_OK;
-}
-
-int riscv_openocd_halt(struct target *target)
-{
- RISCV_INFO(r);
- int result;
-
- LOG_DEBUG("[%d] halting all harts", target->coreid);
-
- if (target->smp) {
- LOG_DEBUG("Halt other targets in this SMP group.");
- struct target_list *targets = target->head;
- result = ERROR_OK;
- while (targets) {
- struct target *t = targets->target;
- targets = targets->next;
- if (t->state != TARGET_HALTED) {
- if (riscv_halt_all_harts(t) != ERROR_OK)
- result = ERROR_FAIL;
- }
+ switch (riscv_semihosting(target, &retval)) {
+ case SEMI_NONE:
+ case SEMI_WAITING:
+ target_call_event_callbacks(target, TARGET_EVENT_HALTED);
+ break;
+ case SEMI_HANDLED:
+ if (riscv_resume(target, true, 0, 0, 0, false) != ERROR_OK)
+ return ERROR_FAIL;
+ break;
+ case SEMI_ERROR:
+ return retval;
}
} else {
- result = riscv_halt_all_harts(target);
+ target_call_event_callbacks(target, TARGET_EVENT_HALTED);
}
- if (riscv_rtos_enabled(target)) {
- if (r->rtos_hartid != -1) {
- LOG_DEBUG("halt requested on RTOS hartid %d", r->rtos_hartid);
- target->rtos->current_threadid = r->rtos_hartid + 1;
- target->rtos->current_thread = r->rtos_hartid + 1;
- } else
- LOG_DEBUG("halt requested, but no known RTOS hartid");
- }
-
- target->state = TARGET_HALTED;
- target->debug_reason = DBG_REASON_DBGRQ;
- target_call_event_callbacks(target, TARGET_EVENT_HALTED);
- return result;
-}
-
-int riscv_openocd_resume(
- struct target *target,
- int current,
- target_addr_t address,
- int handle_breakpoints,
- int debug_execution)
-{
- LOG_DEBUG("debug_reason=%d", target->debug_reason);
-
- if (!current)
- riscv_set_register(target, GDB_REGNO_PC, address);
-
- if (target->debug_reason == DBG_REASON_WATCHPOINT) {
- /* To be able to run off a trigger, disable all the triggers, step, and
- * then resume as usual. */
- struct watchpoint *watchpoint = target->watchpoints;
- bool trigger_temporarily_cleared[RISCV_MAX_HWBPS] = {0};
-
- int i = 0;
- int result = ERROR_OK;
- while (watchpoint && result == ERROR_OK) {
- LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
- trigger_temporarily_cleared[i] = watchpoint->set;
- if (watchpoint->set)
- result = riscv_remove_watchpoint(target, watchpoint);
- watchpoint = watchpoint->next;
- i++;
- }
-
- if (result == ERROR_OK)
- result = riscv_step_rtos_hart(target);
-
- watchpoint = target->watchpoints;
- i = 0;
- while (watchpoint) {
- LOG_DEBUG("watchpoint %d: cleared=%d", i, trigger_temporarily_cleared[i]);
- if (trigger_temporarily_cleared[i]) {
- if (result == ERROR_OK)
- result = riscv_add_watchpoint(target, watchpoint);
- else
- riscv_add_watchpoint(target, watchpoint);
- }
- watchpoint = watchpoint->next;
- i++;
- }
-
- if (result != ERROR_OK)
- return result;
- }
-
- int out = riscv_resume_all_harts(target);
- if (out != ERROR_OK) {
- LOG_ERROR("unable to resume all harts");
- return out;
- }
-
- register_cache_invalidate(target->reg_cache);
- target->state = TARGET_RUNNING;
- target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
- return out;
+ return ERROR_OK;
}
int riscv_openocd_step(struct target *target, int current,
@@ -1413,6 +2075,10 @@ int riscv_openocd_step(struct target *target, int current,
if (!current)
riscv_set_register(target, GDB_REGNO_PC, address);
+ riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
+ if (disable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+
int out = riscv_step_rtos_hart(target);
if (out != ERROR_OK) {
LOG_ERROR("unable to step rtos hart");
@@ -1420,6 +2086,10 @@ int riscv_openocd_step(struct target *target, int current,
}
register_cache_invalidate(target->reg_cache);
+
+ if (enable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+
target->state = TARGET_RUNNING;
target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
target->state = TARGET_HALTED;
@@ -1491,6 +2161,16 @@ COMMAND_HANDLER(riscv_set_prefer_sba)
return ERROR_OK;
}
+COMMAND_HANDLER(riscv_set_enable_virtual)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virtual);
+ return ERROR_OK;
+}
+
void parse_error(const char *string, char c, unsigned position)
{
char buf[position+2];
@@ -1559,6 +2239,8 @@ int parse_ranges(range_t **ranges, const char **argv)
if (pass == 0) {
free(*ranges);
*ranges = calloc(range + 2, sizeof(range_t));
+ if (!*ranges)
+ return ERROR_FAIL;
} else {
(*ranges)[range].low = 1;
(*ranges)[range].high = 0;
@@ -1752,55 +2434,147 @@ COMMAND_HANDLER(riscv_set_ir)
uint32_t value;
COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
- if (!strcmp(CMD_ARGV[0], "idcode")) {
+ if (!strcmp(CMD_ARGV[0], "idcode"))
buf_set_u32(ir_idcode, 0, 32, value);
- return ERROR_OK;
- } else if (!strcmp(CMD_ARGV[0], "dtmcs")) {
+ else if (!strcmp(CMD_ARGV[0], "dtmcs"))
buf_set_u32(ir_dtmcontrol, 0, 32, value);
- return ERROR_OK;
- } else if (!strcmp(CMD_ARGV[0], "dmi")) {
+ else if (!strcmp(CMD_ARGV[0], "dmi"))
buf_set_u32(ir_dbus, 0, 32, value);
- return ERROR_OK;
+ else
+ return ERROR_FAIL;
+
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_resume_order)
+{
+ if (CMD_ARGC > 1) {
+ LOG_ERROR("Command takes at most one argument");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ if (!strcmp(CMD_ARGV[0], "normal")) {
+ resume_order = RO_NORMAL;
+ } else if (!strcmp(CMD_ARGV[0], "reversed")) {
+ resume_order = RO_REVERSED;
} else {
+ LOG_ERROR("Unsupported resume order: %s", CMD_ARGV[0]);
return ERROR_FAIL;
}
+
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_use_bscan_tunnel)
+{
+ int irwidth = 0;
+ int tunnel_type = BSCAN_TUNNEL_NESTED_TAP;
+
+ if (CMD_ARGC > 2) {
+ LOG_ERROR("Command takes at most two arguments");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ } else if (CMD_ARGC == 1) {
+ COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
+ } else if (CMD_ARGC == 2) {
+ COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
+ COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], tunnel_type);
+ }
+ if (tunnel_type == BSCAN_TUNNEL_NESTED_TAP)
+ LOG_INFO("Nested Tap based Bscan Tunnel Selected");
+ else if (tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
+ LOG_INFO("Simple Register based Bscan Tunnel Selected");
+ else
+ LOG_INFO("Invalid Tunnel type selected ! : selecting default Nested Tap Type");
+
+ bscan_tunnel_type = tunnel_type;
+ bscan_tunnel_ir_width = irwidth;
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_set_enable_virt2phys)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virt2phys);
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_set_ebreakm)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreakm);
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_set_ebreaks)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaks);
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_set_ebreaku)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaku);
+ return ERROR_OK;
}
static const struct command_registration riscv_exec_command_handlers[] = {
{
.name = "test_compliance",
.handler = riscv_test_compliance,
+ .usage = "",
.mode = COMMAND_EXEC,
- .usage = "riscv test_compliance",
.help = "Runs a basic compliance test suite against the RISC-V Debug Spec."
},
{
.name = "set_command_timeout_sec",
.handler = riscv_set_command_timeout_sec,
.mode = COMMAND_ANY,
- .usage = "riscv set_command_timeout_sec [sec]",
+ .usage = "[sec]",
.help = "Set the wall-clock timeout (in seconds) for individual commands"
},
{
.name = "set_reset_timeout_sec",
.handler = riscv_set_reset_timeout_sec,
.mode = COMMAND_ANY,
- .usage = "riscv set_reset_timeout_sec [sec]",
+ .usage = "[sec]",
.help = "Set the wall-clock timeout (in seconds) after reset is deasserted"
},
{
.name = "set_prefer_sba",
.handler = riscv_set_prefer_sba,
.mode = COMMAND_ANY,
- .usage = "riscv set_prefer_sba on|off",
+ .usage = "on|off",
.help = "When on, prefer to use System Bus Access to access memory. "
- "When off, prefer to use the Program Buffer to access memory."
+ "When off (default), prefer to use the Program Buffer to access memory."
+ },
+ {
+ .name = "set_enable_virtual",
+ .handler = riscv_set_enable_virtual,
+ .mode = COMMAND_ANY,
+ .usage = "on|off",
+ .help = "When on, memory accesses are performed on physical or virtual "
+ "memory depending on the current system configuration. "
+ "When off (default), all memory accessses are performed on physical memory."
},
{
.name = "expose_csrs",
.handler = riscv_set_expose_csrs,
.mode = COMMAND_ANY,
- .usage = "riscv expose_csrs n0[-m0][,n1[-m1]]...",
+ .usage = "n0[-m0][,n1[-m1]]...",
.help = "Configure a list of inclusive ranges for CSRs to expose in "
"addition to the standard ones. This must be executed before "
"`init`."
@@ -1809,7 +2583,7 @@ static const struct command_registration riscv_exec_command_handlers[] = {
.name = "expose_custom",
.handler = riscv_set_expose_custom,
.mode = COMMAND_ANY,
- .usage = "riscv expose_custom n0[-m0][,n1[-m1]]...",
+ .usage = "n0[-m0][,n1[-m1]]...",
.help = "Configure a list of inclusive ranges for custom registers to "
"expose. custom0 is accessed as abstract register number 0xc000, "
"etc. This must be executed before `init`."
@@ -1817,36 +2591,36 @@ static const struct command_registration riscv_exec_command_handlers[] = {
{
.name = "authdata_read",
.handler = riscv_authdata_read,
+ .usage = "",
.mode = COMMAND_ANY,
- .usage = "riscv authdata_read",
.help = "Return the 32-bit value read from authdata."
},
{
.name = "authdata_write",
.handler = riscv_authdata_write,
.mode = COMMAND_ANY,
- .usage = "riscv authdata_write value",
+ .usage = "value",
.help = "Write the 32-bit value to authdata."
},
{
.name = "dmi_read",
.handler = riscv_dmi_read,
.mode = COMMAND_ANY,
- .usage = "riscv dmi_read address",
+ .usage = "address",
.help = "Perform a 32-bit DMI read at address, returning the value."
},
{
.name = "dmi_write",
.handler = riscv_dmi_write,
.mode = COMMAND_ANY,
- .usage = "riscv dmi_write address value",
+ .usage = "address value",
.help = "Perform a 32-bit DMI write of value at address."
},
{
.name = "test_sba_config_reg",
.handler = riscv_test_sba_config_reg,
.mode = COMMAND_ANY,
- .usage = "riscv test_sba_config_reg legal_address num_words "
+ .usage = "legal_address num_words "
"illegal_address run_sbbusyerror_test[on/off]",
.help = "Perform a series of tests on the SBCS register. "
"Inputs are a legal, 128-byte aligned address and a number of words to "
@@ -1859,19 +2633,71 @@ static const struct command_registration riscv_exec_command_handlers[] = {
.name = "reset_delays",
.handler = riscv_reset_delays,
.mode = COMMAND_ANY,
- .usage = "reset_delays [wait]",
+ .usage = "[wait]",
.help = "OpenOCD learns how many Run-Test/Idle cycles are required "
"between scans to avoid encountering the target being busy. This "
"command resets those learned values after `wait` scans. It's only "
"useful for testing OpenOCD itself."
},
{
+ .name = "resume_order",
+ .handler = riscv_resume_order,
+ .mode = COMMAND_ANY,
+ .usage = "normal|reversed",
+ .help = "Choose the order that harts are resumed in when `hasel` is not "
+ "supported. Normal order is from lowest hart index to highest. "
+ "Reversed order is from highest hart index to lowest."
+ },
+ {
.name = "set_ir",
.handler = riscv_set_ir,
.mode = COMMAND_ANY,
- .usage = "riscv set_ir_idcode [idcode|dtmcs|dmi] value",
+ .usage = "[idcode|dtmcs|dmi] value",
.help = "Set IR value for specified JTAG register."
},
+ {
+ .name = "use_bscan_tunnel",
+ .handler = riscv_use_bscan_tunnel,
+ .mode = COMMAND_ANY,
+ .usage = "value [type]",
+ .help = "Enable or disable use of a BSCAN tunnel to reach DM. Supply "
+ "the width of the DM transport TAP's instruction register to "
+ "enable. Supply a value of 0 to disable. Pass A second argument "
+ "(optional) to indicate Bscan Tunnel Type {0:(default) NESTED_TAP , "
+ "1: DATA_REGISTER}"
+ },
+ {
+ .name = "set_enable_virt2phys",
+ .handler = riscv_set_enable_virt2phys,
+ .mode = COMMAND_ANY,
+ .usage = "on|off",
+ .help = "When on (default), enable translation from virtual address to "
+ "physical address."
+ },
+ {
+ .name = "set_ebreakm",
+ .handler = riscv_set_ebreakm,
+ .mode = COMMAND_ANY,
+ .usage = "on|off",
+ .help = "Control dcsr.ebreakm. When off, M-mode ebreak instructions "
+ "don't trap to OpenOCD. Defaults to on."
+ },
+ {
+ .name = "set_ebreaks",
+ .handler = riscv_set_ebreaks,
+ .mode = COMMAND_ANY,
+ .usage = "on|off",
+ .help = "Control dcsr.ebreaks. When off, S-mode ebreak instructions "
+ "don't trap to OpenOCD. Defaults to on."
+ },
+ {
+ .name = "set_ebreaku",
+ .handler = riscv_set_ebreaku,
+ .mode = COMMAND_ANY,
+ .usage = "on|off",
+ .help = "Control dcsr.ebreaku. When off, U-mode ebreak instructions "
+ "don't trap to OpenOCD. Defaults to on."
+ },
COMMAND_REGISTRATION_DONE
};
@@ -1908,7 +2734,7 @@ const struct command_registration riscv_command_handlers[] = {
COMMAND_REGISTRATION_DONE
};
-unsigned riscv_address_bits(struct target *target)
+static unsigned riscv_xlen_nonconst(struct target *target)
{
return riscv_xlen(target);
}
@@ -1923,8 +2749,8 @@ struct target_type riscv_target = {
/* poll current target status */
.poll = old_or_new_riscv_poll,
- .halt = old_or_new_riscv_halt,
- .resume = old_or_new_riscv_resume,
+ .halt = riscv_halt,
+ .resume = riscv_target_resume,
.step = old_or_new_riscv_step,
.assert_reset = riscv_assert_reset,
@@ -1932,10 +2758,16 @@ struct target_type riscv_target = {
.read_memory = riscv_read_memory,
.write_memory = riscv_write_memory,
+ .read_phys_memory = riscv_read_phys_memory,
+ .write_phys_memory = riscv_write_phys_memory,
.checksum_memory = riscv_checksum_memory,
+ .mmu = riscv_mmu,
+ .virt2phys = riscv_virt2phys,
+
.get_gdb_reg_list = riscv_get_gdb_reg_list,
+ .get_gdb_reg_list_noread = riscv_get_gdb_reg_list_noread,
.add_breakpoint = riscv_add_breakpoint,
.remove_breakpoint = riscv_remove_breakpoint,
@@ -1950,7 +2782,7 @@ struct target_type riscv_target = {
.commands = riscv_command_handlers,
- .address_bits = riscv_address_bits
+ .address_bits = riscv_xlen_nonconst,
};
/*** RISC-V Interface ***/
@@ -1964,72 +2796,52 @@ void riscv_info_init(struct target *target, riscv_info_t *r)
memset(r->trigger_unique_id, 0xff, sizeof(r->trigger_unique_id));
- for (size_t h = 0; h < RISCV_MAX_HARTS; ++h) {
+ for (size_t h = 0; h < RISCV_MAX_HARTS; ++h)
r->xlen[h] = -1;
-
- for (size_t e = 0; e < RISCV_MAX_REGISTERS; ++e)
- r->valid_saved_registers[h][e] = false;
- }
}
-int riscv_halt_all_harts(struct target *target)
-{
- for (int i = 0; i < riscv_count_harts(target); ++i) {
- if (!riscv_hart_enabled(target, i))
- continue;
-
- riscv_halt_one_hart(target, i);
- }
-
- riscv_invalidate_register_cache(target);
-
- return ERROR_OK;
-}
-
-int riscv_halt_one_hart(struct target *target, int hartid)
+static int riscv_resume_go_all_harts(struct target *target)
{
RISCV_INFO(r);
- LOG_DEBUG("halting hart %d", hartid);
- if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
- return ERROR_FAIL;
- if (riscv_is_halted(target)) {
- LOG_DEBUG(" hart %d requested halt, but was already halted", hartid);
- return ERROR_OK;
- }
- int result = r->halt_current_hart(target);
- register_cache_invalidate(target->reg_cache);
- return result;
-}
+ /* Dummy variables to make mingw32-gcc happy. */
+ int first = 0;
+ int last = 1;
+ int step = 1;
+ switch (resume_order) {
+ case RO_NORMAL:
+ first = 0;
+ last = riscv_count_harts(target) - 1;
+ step = 1;
+ break;
+ case RO_REVERSED:
+ first = riscv_count_harts(target) - 1;
+ last = 0;
+ step = -1;
+ break;
+ default:
+ assert(0);
+ }
-int riscv_resume_all_harts(struct target *target)
-{
- for (int i = 0; i < riscv_count_harts(target); ++i) {
+ for (int i = first; i != last + step; i += step) {
if (!riscv_hart_enabled(target, i))
continue;
- riscv_resume_one_hart(target, i);
+ LOG_DEBUG("resuming hart %d", i);
+ if (riscv_set_current_hartid(target, i) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_is_halted(target)) {
+ if (r->resume_go(target) != ERROR_OK)
+ return ERROR_FAIL;
+ } else {
+ LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
+ }
}
riscv_invalidate_register_cache(target);
return ERROR_OK;
}
-int riscv_resume_one_hart(struct target *target, int hartid)
-{
- RISCV_INFO(r);
- LOG_DEBUG("resuming hart %d", hartid);
- if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
- return ERROR_FAIL;
- if (!riscv_is_halted(target)) {
- LOG_DEBUG(" hart %d requested resume, but was already resumed", hartid);
- return ERROR_OK;
- }
-
- r->on_resume(target);
- return r->resume_current_hart(target);
-}
-
int riscv_step_rtos_hart(struct target *target)
{
RISCV_INFO(r);
@@ -2075,7 +2887,7 @@ bool riscv_supports_extension(struct target *target, int hartid, char letter)
return r->misa[hartid] & (1 << num);
}
-int riscv_xlen(const struct target *target)
+unsigned riscv_xlen(const struct target *target)
{
return riscv_xlen_of_hart(target, riscv_current_hartid(target));
}
@@ -2087,7 +2899,6 @@ int riscv_xlen_of_hart(const struct target *target, int hartid)
return r->xlen[hartid];
}
-extern struct rtos_type riscv_rtos;
bool riscv_rtos_enabled(const struct target *target)
{
return false;
@@ -2152,9 +2963,9 @@ int riscv_count_harts(struct target *target)
if (target == NULL)
return 1;
RISCV_INFO(r);
- if (r == NULL)
+ if (r == NULL || r->hart_count == NULL)
return 1;
- return r->hart_count;
+ return r->hart_count(target);
}
bool riscv_has_register(struct target *target, int hartid, int regid)
@@ -2163,6 +2974,55 @@ bool riscv_has_register(struct target *target, int hartid, int regid)
}
/**
+ * If write is true:
+ * return true iff we are guaranteed that the register will contain exactly
+ * the value we just wrote when it's read.
+ * If write is false:
+ * return true iff we are guaranteed that the register will read the same
+ * value in the future as the value we just read.
+ */
+static bool gdb_regno_cacheable(enum gdb_regno regno, bool write)
+{
+ /* GPRs, FPRs, vector registers are just normal data stores. */
+ if (regno <= GDB_REGNO_XPR31 ||
+ (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31) ||
+ (regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31))
+ return true;
+
+ /* Most CSRs won't change value on us, but we can't assume it about rbitrary
+ * CSRs. */
+ switch (regno) {
+ case GDB_REGNO_DPC:
+ return true;
+
+ case GDB_REGNO_VSTART:
+ case GDB_REGNO_VXSAT:
+ case GDB_REGNO_VXRM:
+ case GDB_REGNO_VLENB:
+ case GDB_REGNO_VL:
+ case GDB_REGNO_VTYPE:
+ case GDB_REGNO_MISA:
+ case GDB_REGNO_DCSR:
+ case GDB_REGNO_DSCRATCH0:
+ case GDB_REGNO_MSTATUS:
+ case GDB_REGNO_MEPC:
+ case GDB_REGNO_MCAUSE:
+ case GDB_REGNO_SATP:
+ /*
+ * WARL registers might not contain the value we just wrote, but
+ * these ones won't spontaneously change their value either. *
+ */
+ return !write;
+
+ case GDB_REGNO_TSELECT: /* I think this should be above, but then it doesn't work. */
+ case GDB_REGNO_TDATA1: /* Changes value when tselect is changed. */
+ case GDB_REGNO_TDATA2: /* Changse value when tselect is changed. */
+ default:
+ return false;
+ }
+}
+
+/**
* This function is called when the debug user wants to change the value of a
* register. The new value may be cached, and may not be written until the hart
* is resumed. */
@@ -2177,7 +3037,23 @@ int riscv_set_register_on_hart(struct target *target, int hartid,
RISCV_INFO(r);
LOG_DEBUG("{%d} %s <- %" PRIx64, hartid, gdb_regno_name(regid), value);
assert(r->set_register);
- return r->set_register(target, hartid, regid, value);
+
+ /* TODO: Hack to deal with gdb that thinks these registers still exist. */
+ if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 && value == 0 &&
+ riscv_supports_extension(target, hartid, 'E'))
+ return ERROR_OK;
+
+ struct reg *reg = &target->reg_cache->reg_list[regid];
+ buf_set_u64(reg->value, 0, reg->size, value);
+
+ int result = r->set_register(target, hartid, regid, value);
+ if (result == ERROR_OK)
+ reg->valid = gdb_regno_cacheable(regid, true);
+ else
+ reg->valid = false;
+ LOG_DEBUG("[%s]{%d} wrote 0x%" PRIx64 " to %s valid=%d",
+ target_name(target), hartid, value, reg->name, reg->valid);
+ return result;
}
int riscv_get_register(struct target *target, riscv_reg_t *value,
@@ -2193,14 +3069,31 @@ int riscv_get_register_on_hart(struct target *target, riscv_reg_t *value,
RISCV_INFO(r);
struct reg *reg = &target->reg_cache->reg_list[regid];
+ if (!reg->exist) {
+ LOG_DEBUG("[%s]{%d} %s does not exist.",
+ target_name(target), hartid, gdb_regno_name(regid));
+ return ERROR_FAIL;
+ }
if (reg && reg->valid && hartid == riscv_current_hartid(target)) {
*value = buf_get_u64(reg->value, 0, reg->size);
+ LOG_DEBUG("{%d} %s: %" PRIx64 " (cached)", hartid,
+ gdb_regno_name(regid), *value);
+ return ERROR_OK;
+ }
+
+ /* TODO: Hack to deal with gdb that thinks these registers still exist. */
+ if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 &&
+ riscv_supports_extension(target, hartid, 'E')) {
+ *value = 0;
return ERROR_OK;
}
int result = r->get_register(target, value, hartid, regid);
+ if (result == ERROR_OK)
+ reg->valid = gdb_regno_cacheable(regid, false);
+
LOG_DEBUG("{%d} %s: %" PRIx64, hartid, gdb_regno_name(regid), *value);
return result;
}
@@ -2311,7 +3204,9 @@ int riscv_enumerate_triggers(struct target *target)
for (unsigned t = 0; t < RISCV_MAX_TRIGGERS; ++t) {
r->trigger_count[hartid] = t;
- riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, t);
+ /* If we can't write tselect, then this hart does not support triggers. */
+ if (riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, t) != ERROR_OK)
+ break;
uint64_t tselect_rb;
result = riscv_get_register_on_hart(target, &tselect_rb, hartid,
GDB_REGNO_TSELECT);
@@ -2329,6 +3224,8 @@ int riscv_enumerate_triggers(struct target *target)
return result;
int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
+ if (type == 0)
+ break;
switch (type) {
case 1:
/* On these older cores we don't support software using
@@ -2357,10 +3254,68 @@ const char *gdb_regno_name(enum gdb_regno regno)
switch (regno) {
case GDB_REGNO_ZERO:
return "zero";
+ case GDB_REGNO_RA:
+ return "ra";
+ case GDB_REGNO_SP:
+ return "sp";
+ case GDB_REGNO_GP:
+ return "gp";
+ case GDB_REGNO_TP:
+ return "tp";
+ case GDB_REGNO_T0:
+ return "t0";
+ case GDB_REGNO_T1:
+ return "t1";
+ case GDB_REGNO_T2:
+ return "t2";
case GDB_REGNO_S0:
return "s0";
case GDB_REGNO_S1:
return "s1";
+ case GDB_REGNO_A0:
+ return "a0";
+ case GDB_REGNO_A1:
+ return "a1";
+ case GDB_REGNO_A2:
+ return "a2";
+ case GDB_REGNO_A3:
+ return "a3";
+ case GDB_REGNO_A4:
+ return "a4";
+ case GDB_REGNO_A5:
+ return "a5";
+ case GDB_REGNO_A6:
+ return "a6";
+ case GDB_REGNO_A7:
+ return "a7";
+ case GDB_REGNO_S2:
+ return "s2";
+ case GDB_REGNO_S3:
+ return "s3";
+ case GDB_REGNO_S4:
+ return "s4";
+ case GDB_REGNO_S5:
+ return "s5";
+ case GDB_REGNO_S6:
+ return "s6";
+ case GDB_REGNO_S7:
+ return "s7";
+ case GDB_REGNO_S8:
+ return "s8";
+ case GDB_REGNO_S9:
+ return "s9";
+ case GDB_REGNO_S10:
+ return "s10";
+ case GDB_REGNO_S11:
+ return "s11";
+ case GDB_REGNO_T3:
+ return "t3";
+ case GDB_REGNO_T4:
+ return "t4";
+ case GDB_REGNO_T5:
+ return "t5";
+ case GDB_REGNO_T6:
+ return "t6";
case GDB_REGNO_PC:
return "pc";
case GDB_REGNO_FPR0:
@@ -2381,12 +3336,86 @@ const char *gdb_regno_name(enum gdb_regno regno)
return "dpc";
case GDB_REGNO_DCSR:
return "dcsr";
- case GDB_REGNO_DSCRATCH:
- return "dscratch";
+ case GDB_REGNO_DSCRATCH0:
+ return "dscratch0";
case GDB_REGNO_MSTATUS:
return "mstatus";
+ case GDB_REGNO_MEPC:
+ return "mepc";
+ case GDB_REGNO_MCAUSE:
+ return "mcause";
case GDB_REGNO_PRIV:
return "priv";
+ case GDB_REGNO_SATP:
+ return "satp";
+ case GDB_REGNO_VTYPE:
+ return "vtype";
+ case GDB_REGNO_VL:
+ return "vl";
+ case GDB_REGNO_V0:
+ return "v0";
+ case GDB_REGNO_V1:
+ return "v1";
+ case GDB_REGNO_V2:
+ return "v2";
+ case GDB_REGNO_V3:
+ return "v3";
+ case GDB_REGNO_V4:
+ return "v4";
+ case GDB_REGNO_V5:
+ return "v5";
+ case GDB_REGNO_V6:
+ return "v6";
+ case GDB_REGNO_V7:
+ return "v7";
+ case GDB_REGNO_V8:
+ return "v8";
+ case GDB_REGNO_V9:
+ return "v9";
+ case GDB_REGNO_V10:
+ return "v10";
+ case GDB_REGNO_V11:
+ return "v11";
+ case GDB_REGNO_V12:
+ return "v12";
+ case GDB_REGNO_V13:
+ return "v13";
+ case GDB_REGNO_V14:
+ return "v14";
+ case GDB_REGNO_V15:
+ return "v15";
+ case GDB_REGNO_V16:
+ return "v16";
+ case GDB_REGNO_V17:
+ return "v17";
+ case GDB_REGNO_V18:
+ return "v18";
+ case GDB_REGNO_V19:
+ return "v19";
+ case GDB_REGNO_V20:
+ return "v20";
+ case GDB_REGNO_V21:
+ return "v21";
+ case GDB_REGNO_V22:
+ return "v22";
+ case GDB_REGNO_V23:
+ return "v23";
+ case GDB_REGNO_V24:
+ return "v24";
+ case GDB_REGNO_V25:
+ return "v25";
+ case GDB_REGNO_V26:
+ return "v26";
+ case GDB_REGNO_V27:
+ return "v27";
+ case GDB_REGNO_V28:
+ return "v28";
+ case GDB_REGNO_V29:
+ return "v29";
+ case GDB_REGNO_V30:
+ return "v30";
+ case GDB_REGNO_V31:
+ return "v31";
default:
if (regno <= GDB_REGNO_XPR31)
sprintf(buf, "x%d", regno - GDB_REGNO_ZERO);
@@ -2404,20 +3433,29 @@ static int register_get(struct reg *reg)
{
riscv_reg_info_t *reg_info = reg->arch_info;
struct target *target = reg_info->target;
- uint64_t value;
- int result = riscv_get_register(target, &value, reg->number);
- if (result != ERROR_OK)
- return result;
- buf_set_u64(reg->value, 0, reg->size, value);
- /* CSRs (and possibly other extension) registers may change value at any
- * time. */
- if (reg->number <= GDB_REGNO_XPR31 ||
- (reg->number >= GDB_REGNO_FPR0 && reg->number <= GDB_REGNO_FPR31) ||
- reg->number == GDB_REGNO_PC)
- reg->valid = true;
- LOG_DEBUG("[%d]{%d} read 0x%" PRIx64 " from %s (valid=%d)",
- target->coreid, riscv_current_hartid(target), value, reg->name,
- reg->valid);
+ RISCV_INFO(r);
+
+ if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
+ if (!r->get_register_buf) {
+ LOG_ERROR("Reading register %s not supported on this RISC-V target.",
+ gdb_regno_name(reg->number));
+ return ERROR_FAIL;
+ }
+
+ if (r->get_register_buf(target, reg->value, reg->number) != ERROR_OK)
+ return ERROR_FAIL;
+ } else {
+ uint64_t value;
+ int result = riscv_get_register(target, &value, reg->number);
+ if (result != ERROR_OK)
+ return result;
+ buf_set_u64(reg->value, 0, reg->size, value);
+ }
+ reg->valid = gdb_regno_cacheable(reg->number, false);
+ char *str = buf_to_str(reg->value, reg->size, 16);
+ LOG_DEBUG("[%d]{%d} read 0x%s from %s (valid=%d)", target->coreid,
+ riscv_current_hartid(target), str, reg->name, reg->valid);
+ free(str);
return ERROR_OK;
}
@@ -2425,22 +3463,42 @@ static int register_set(struct reg *reg, uint8_t *buf)
{
riscv_reg_info_t *reg_info = reg->arch_info;
struct target *target = reg_info->target;
+ RISCV_INFO(r);
+
+ char *str = buf_to_str(buf, reg->size, 16);
+ LOG_DEBUG("[%d]{%d} write 0x%s to %s (valid=%d)", target->coreid,
+ riscv_current_hartid(target), str, reg->name, reg->valid);
+ free(str);
+
+ memcpy(reg->value, buf, DIV_ROUND_UP(reg->size, 8));
+ reg->valid = gdb_regno_cacheable(reg->number, true);
+
+ if (reg->number == GDB_REGNO_TDATA1 ||
+ reg->number == GDB_REGNO_TDATA2) {
+ r->manual_hwbp_set = true;
+ /* When enumerating triggers, we clear any triggers with DMODE set,
+ * assuming they were left over from a previous debug session. So make
+ * sure that is done before a user might be setting their own triggers.
+ */
+ if (riscv_enumerate_triggers(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
+ if (!r->set_register_buf) {
+ LOG_ERROR("Writing register %s not supported on this RISC-V target.",
+ gdb_regno_name(reg->number));
+ return ERROR_FAIL;
+ }
+
+ if (r->set_register_buf(target, reg->number, reg->value) != ERROR_OK)
+ return ERROR_FAIL;
+ } else {
+ uint64_t value = buf_get_u64(buf, 0, reg->size);
+ if (riscv_set_register(target, reg->number, value) != ERROR_OK)
+ return ERROR_FAIL;
+ }
- uint64_t value = buf_get_u64(buf, 0, reg->size);
-
- LOG_DEBUG("[%d]{%d} write 0x%" PRIx64 " to %s (valid=%d)",
- target->coreid, riscv_current_hartid(target), value, reg->name,
- reg->valid);
- struct reg *r = &target->reg_cache->reg_list[reg->number];
- /* CSRs (and possibly other extension) registers may change value at any
- * time. */
- if (reg->number <= GDB_REGNO_XPR31 ||
- (reg->number >= GDB_REGNO_FPR0 && reg->number <= GDB_REGNO_FPR31) ||
- reg->number == GDB_REGNO_PC)
- r->valid = true;
- memcpy(r->value, buf, (r->size + 7) / 8);
-
- riscv_set_register(target, reg->number, value);
return ERROR_OK;
}
@@ -2466,6 +3524,8 @@ int riscv_init_registers(struct target *target)
riscv_free_registers(target);
target->reg_cache = calloc(1, sizeof(*target->reg_cache));
+ if (!target->reg_cache)
+ return ERROR_FAIL;
target->reg_cache->name = "RISC-V Registers";
target->reg_cache->num_regs = GDB_REGNO_COUNT;
@@ -2483,13 +3543,19 @@ int riscv_init_registers(struct target *target)
target->reg_cache->reg_list =
calloc(target->reg_cache->num_regs, sizeof(struct reg));
+ if (!target->reg_cache->reg_list)
+ return ERROR_FAIL;
const unsigned int max_reg_name_len = 12;
free(info->reg_names);
info->reg_names =
calloc(target->reg_cache->num_regs, max_reg_name_len);
+ if (!info->reg_names)
+ return ERROR_FAIL;
char *reg_name = info->reg_names;
+ int hartid = riscv_current_hartid(target);
+
static struct reg_feature feature_cpu = {
.name = "org.gnu.gdb.riscv.cpu"
};
@@ -2499,6 +3565,9 @@ int riscv_init_registers(struct target *target)
static struct reg_feature feature_csr = {
.name = "org.gnu.gdb.riscv.csr"
};
+ static struct reg_feature feature_vector = {
+ .name = "org.gnu.gdb.riscv.vector"
+ };
static struct reg_feature feature_virtual = {
.name = "org.gnu.gdb.riscv.virtual"
};
@@ -2506,14 +3575,117 @@ int riscv_init_registers(struct target *target)
.name = "org.gnu.gdb.riscv.custom"
};
- static struct reg_data_type type_ieee_single = {
- .type = REG_TYPE_IEEE_SINGLE,
- .id = "ieee_single"
+ /* These types are built into gdb. */
+ static struct reg_data_type type_ieee_single = { .type = REG_TYPE_IEEE_SINGLE, .id = "ieee_single" };
+ static struct reg_data_type type_ieee_double = { .type = REG_TYPE_IEEE_DOUBLE, .id = "ieee_double" };
+ static struct reg_data_type_union_field single_double_fields[] = {
+ {"float", &type_ieee_single, single_double_fields + 1},
+ {"double", &type_ieee_double, NULL},
+ };
+ static struct reg_data_type_union single_double_union = {
+ .fields = single_double_fields
};
- static struct reg_data_type type_ieee_double = {
- .type = REG_TYPE_IEEE_DOUBLE,
- .id = "ieee_double"
+ static struct reg_data_type type_ieee_single_double = {
+ .type = REG_TYPE_ARCH_DEFINED,
+ .id = "FPU_FD",
+ .type_class = REG_TYPE_CLASS_UNION,
+ .reg_type_union = &single_double_union
};
+ static struct reg_data_type type_uint8 = { .type = REG_TYPE_UINT8, .id = "uint8" };
+ static struct reg_data_type type_uint16 = { .type = REG_TYPE_UINT16, .id = "uint16" };
+ static struct reg_data_type type_uint32 = { .type = REG_TYPE_UINT32, .id = "uint32" };
+ static struct reg_data_type type_uint64 = { .type = REG_TYPE_UINT64, .id = "uint64" };
+ static struct reg_data_type type_uint128 = { .type = REG_TYPE_UINT128, .id = "uint128" };
+
+ /* This is roughly the XML we want:
+ * <vector id="bytes" type="uint8" count="16"/>
+ * <vector id="shorts" type="uint16" count="8"/>
+ * <vector id="words" type="uint32" count="4"/>
+ * <vector id="longs" type="uint64" count="2"/>
+ * <vector id="quads" type="uint128" count="1"/>
+ * <union id="riscv_vector_type">
+ * <field name="b" type="bytes"/>
+ * <field name="s" type="shorts"/>
+ * <field name="w" type="words"/>
+ * <field name="l" type="longs"/>
+ * <field name="q" type="quads"/>
+ * </union>
+ */
+
+ info->vector_uint8.type = &type_uint8;
+ info->vector_uint8.count = info->vlenb[hartid];
+ info->type_uint8_vector.type = REG_TYPE_ARCH_DEFINED;
+ info->type_uint8_vector.id = "bytes";
+ info->type_uint8_vector.type_class = REG_TYPE_CLASS_VECTOR;
+ info->type_uint8_vector.reg_type_vector = &info->vector_uint8;
+
+ info->vector_uint16.type = &type_uint16;
+ info->vector_uint16.count = info->vlenb[hartid] / 2;
+ info->type_uint16_vector.type = REG_TYPE_ARCH_DEFINED;
+ info->type_uint16_vector.id = "shorts";
+ info->type_uint16_vector.type_class = REG_TYPE_CLASS_VECTOR;
+ info->type_uint16_vector.reg_type_vector = &info->vector_uint16;
+
+ info->vector_uint32.type = &type_uint32;
+ info->vector_uint32.count = info->vlenb[hartid] / 4;
+ info->type_uint32_vector.type = REG_TYPE_ARCH_DEFINED;
+ info->type_uint32_vector.id = "words";
+ info->type_uint32_vector.type_class = REG_TYPE_CLASS_VECTOR;
+ info->type_uint32_vector.reg_type_vector = &info->vector_uint32;
+
+ info->vector_uint64.type = &type_uint64;
+ info->vector_uint64.count = info->vlenb[hartid] / 8;
+ info->type_uint64_vector.type = REG_TYPE_ARCH_DEFINED;
+ info->type_uint64_vector.id = "longs";
+ info->type_uint64_vector.type_class = REG_TYPE_CLASS_VECTOR;
+ info->type_uint64_vector.reg_type_vector = &info->vector_uint64;
+
+ info->vector_uint128.type = &type_uint128;
+ info->vector_uint128.count = info->vlenb[hartid] / 16;
+ info->type_uint128_vector.type = REG_TYPE_ARCH_DEFINED;
+ info->type_uint128_vector.id = "quads";
+ info->type_uint128_vector.type_class = REG_TYPE_CLASS_VECTOR;
+ info->type_uint128_vector.reg_type_vector = &info->vector_uint128;
+
+ info->vector_fields[0].name = "b";
+ info->vector_fields[0].type = &info->type_uint8_vector;
+ if (info->vlenb[hartid] >= 2) {
+ info->vector_fields[0].next = info->vector_fields + 1;
+ info->vector_fields[1].name = "s";
+ info->vector_fields[1].type = &info->type_uint16_vector;
+ } else {
+ info->vector_fields[0].next = NULL;
+ }
+ if (info->vlenb[hartid] >= 4) {
+ info->vector_fields[1].next = info->vector_fields + 2;
+ info->vector_fields[2].name = "w";
+ info->vector_fields[2].type = &info->type_uint32_vector;
+ } else {
+ info->vector_fields[1].next = NULL;
+ }
+ if (info->vlenb[hartid] >= 8) {
+ info->vector_fields[2].next = info->vector_fields + 3;
+ info->vector_fields[3].name = "l";
+ info->vector_fields[3].type = &info->type_uint64_vector;
+ } else {
+ info->vector_fields[2].next = NULL;
+ }
+ if (info->vlenb[hartid] >= 16) {
+ info->vector_fields[3].next = info->vector_fields + 4;
+ info->vector_fields[4].name = "q";
+ info->vector_fields[4].type = &info->type_uint128_vector;
+ } else {
+ info->vector_fields[3].next = NULL;
+ }
+ info->vector_fields[4].next = NULL;
+
+ info->vector_union.fields = info->vector_fields;
+
+ info->type_vector.type = REG_TYPE_ARCH_DEFINED;
+ info->type_vector.id = "riscv_vector";
+ info->type_vector.type_class = REG_TYPE_CLASS_UNION;
+ info->type_vector.reg_type_union = &info->vector_union;
+
struct csr_info csr_info[] = {
#define DECLARE_CSR(name, number) { number, #name },
#include "encoding.h"
@@ -2527,6 +3699,8 @@ int riscv_init_registers(struct target *target)
int custom_within_range = 0;
riscv_reg_info_t *shared_reg_info = calloc(1, sizeof(riscv_reg_info_t));
+ if (!shared_reg_info)
+ return ERROR_FAIL;
shared_reg_info->target = target;
/* When gdb requests register N, gdb_get_register_packet() assumes that this
@@ -2547,6 +3721,11 @@ int riscv_init_registers(struct target *target)
* target is in theory allowed to change XLEN on us. But I expect a lot
* of other things to break in that case as well. */
if (number <= GDB_REGNO_XPR31) {
+ r->exist = number <= GDB_REGNO_XPR15 ||
+ !riscv_supports_extension(target, hartid, 'E');
+ /* TODO: For now we fake that all GPRs exist because otherwise gdb
+ * doesn't work. */
+ r->exist = true;
r->caller_save = true;
switch (number) {
case GDB_REGNO_ZERO:
@@ -2655,12 +3834,13 @@ int riscv_init_registers(struct target *target)
r->feature = &feature_cpu;
} else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
r->caller_save = true;
- if (riscv_supports_extension(target, riscv_current_hartid(target),
- 'D')) {
- r->reg_data_type = &type_ieee_double;
+ if (riscv_supports_extension(target, hartid, 'D')) {
r->size = 64;
- } else if (riscv_supports_extension(target,
- riscv_current_hartid(target), 'F')) {
+ if (riscv_supports_extension(target, hartid, 'F'))
+ r->reg_data_type = &type_ieee_single_double;
+ else
+ r->reg_data_type = &type_ieee_double;
+ } else if (riscv_supports_extension(target, hartid, 'F')) {
r->reg_data_type = &type_ieee_single;
r->size = 32;
} else {
@@ -2791,8 +3971,7 @@ int riscv_init_registers(struct target *target)
case CSR_FFLAGS:
case CSR_FRM:
case CSR_FCSR:
- r->exist = riscv_supports_extension(target,
- riscv_current_hartid(target), 'F');
+ r->exist = riscv_supports_extension(target, hartid, 'F');
r->group = "float";
r->feature = &feature_fpu;
break;
@@ -2806,18 +3985,19 @@ int riscv_init_registers(struct target *target)
case CSR_SCAUSE:
case CSR_STVAL:
case CSR_SATP:
- r->exist = riscv_supports_extension(target,
- riscv_current_hartid(target), 'S');
+ r->exist = riscv_supports_extension(target, hartid, 'S');
break;
case CSR_MEDELEG:
case CSR_MIDELEG:
/* "In systems with only M-mode, or with both M-mode and
* U-mode but without U-mode trap support, the medeleg and
* mideleg registers should not exist." */
- r->exist = riscv_supports_extension(target, riscv_current_hartid(target), 'S') ||
- riscv_supports_extension(target, riscv_current_hartid(target), 'N');
+ r->exist = riscv_supports_extension(target, hartid, 'S') ||
+ riscv_supports_extension(target, hartid, 'N');
break;
+ case CSR_PMPCFG1:
+ case CSR_PMPCFG3:
case CSR_CYCLEH:
case CSR_TIMEH:
case CSR_INSTRETH:
@@ -2883,6 +4063,15 @@ int riscv_init_registers(struct target *target)
case CSR_MHPMCOUNTER31H:
r->exist = riscv_xlen(target) == 32;
break;
+
+ case CSR_VSTART:
+ case CSR_VXSAT:
+ case CSR_VXRM:
+ case CSR_VL:
+ case CSR_VTYPE:
+ case CSR_VLENB:
+ r->exist = riscv_supports_extension(target, hartid, 'V');
+ break;
}
if (!r->exist && expose_csr) {
@@ -2901,7 +4090,16 @@ int riscv_init_registers(struct target *target)
r->feature = &feature_virtual;
r->size = 8;
- } else {
+ } else if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31) {
+ r->caller_save = false;
+ r->exist = riscv_supports_extension(target, hartid, 'V') && info->vlenb[hartid];
+ r->size = info->vlenb[hartid] * 8;
+ sprintf(reg_name, "v%d", number - GDB_REGNO_V0);
+ r->group = "vector";
+ r->feature = &feature_vector;
+ r->reg_data_type = &info->type_vector;
+
+ } else if (number >= GDB_REGNO_COUNT) {
/* Custom registers. */
assert(expose_custom);
@@ -2912,7 +4110,8 @@ int riscv_init_registers(struct target *target)
r->group = "custom";
r->feature = &feature_custom;
r->arch_info = calloc(1, sizeof(riscv_reg_info_t));
- assert(r->arch_info);
+ if (!r->arch_info)
+ return ERROR_FAIL;
((riscv_reg_info_t *) r->arch_info)->target = target;
((riscv_reg_info_t *) r->arch_info)->custom_number = custom_number;
sprintf(reg_name, "custom%d", custom_number);
@@ -2934,3 +4133,43 @@ int riscv_init_registers(struct target *target)
return ERROR_OK;
}
+
+
+void riscv_add_bscan_tunneled_scan(struct target *target, struct scan_field *field,
+ riscv_bscan_tunneled_scan_context_t *ctxt)
+{
+ jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
+
+ memset(ctxt->tunneled_dr, 0, sizeof(ctxt->tunneled_dr));
+ if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
+ ctxt->tunneled_dr[3].num_bits = 1;
+ ctxt->tunneled_dr[3].out_value = bscan_one;
+ ctxt->tunneled_dr[2].num_bits = 7;
+ ctxt->tunneled_dr_width = field->num_bits;
+ ctxt->tunneled_dr[2].out_value = &ctxt->tunneled_dr_width;
+ /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
+ scanning num_bits + 1, and then will right shift the input field after executing the queues */
+
+ ctxt->tunneled_dr[1].num_bits = field->num_bits + 1;
+ ctxt->tunneled_dr[1].out_value = field->out_value;
+ ctxt->tunneled_dr[1].in_value = field->in_value;
+
+ ctxt->tunneled_dr[0].num_bits = 3;
+ ctxt->tunneled_dr[0].out_value = bscan_zero;
+ } else {
+ /* BSCAN_TUNNEL_NESTED_TAP */
+ ctxt->tunneled_dr[0].num_bits = 1;
+ ctxt->tunneled_dr[0].out_value = bscan_one;
+ ctxt->tunneled_dr[1].num_bits = 7;
+ ctxt->tunneled_dr_width = field->num_bits;
+ ctxt->tunneled_dr[1].out_value = &ctxt->tunneled_dr_width;
+ /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
+ scanning num_bits + 1, and then will right shift the input field after executing the queues */
+ ctxt->tunneled_dr[2].num_bits = field->num_bits + 1;
+ ctxt->tunneled_dr[2].out_value = field->out_value;
+ ctxt->tunneled_dr[2].in_value = field->in_value;
+ ctxt->tunneled_dr[3].num_bits = 3;
+ ctxt->tunneled_dr[3].out_value = bscan_zero;
+ }
+ jtag_add_dr_scan(target->tap, ARRAY_SIZE(ctxt->tunneled_dr), ctxt->tunneled_dr, TAP_IDLE);
+}
diff --git a/src/target/riscv/riscv.h b/src/target/riscv/riscv.h
index ba50d2c..7e74cf7 100644
--- a/src/target/riscv/riscv.h
+++ b/src/target/riscv/riscv.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
#ifndef RISCV_H
#define RISCV_H
@@ -6,9 +8,11 @@ struct riscv_program;
#include <stdint.h>
#include "opcodes.h"
#include "gdb_regs.h"
+#include "jtag/jtag.h"
+#include "target/register.h"
/* The register cache is statically allocated. */
-#define RISCV_MAX_HARTS 32
+#define RISCV_MAX_HARTS 1024
#define RISCV_MAX_REGISTERS 5000
#define RISCV_MAX_TRIGGERS 32
#define RISCV_MAX_HWBPS 16
@@ -16,6 +20,12 @@ struct riscv_program;
#define DEFAULT_COMMAND_TIMEOUT_SEC 2
#define DEFAULT_RESET_TIMEOUT_SEC 30
+#define RISCV_SATP_MODE(xlen) ((xlen) == 32 ? SATP32_MODE : SATP64_MODE)
+#define RISCV_SATP_PPN(xlen) ((xlen) == 32 ? SATP32_PPN : SATP64_PPN)
+#define RISCV_PGSHIFT 12
+
+# define PG_MAX_LEVEL 4
+
extern struct target_type riscv011_target;
extern struct target_type riscv013_target;
@@ -32,6 +42,7 @@ enum riscv_halt_reason {
RISCV_HALT_SINGLESTEP,
RISCV_HALT_TRIGGER,
RISCV_HALT_UNKNOWN,
+ RISCV_HALT_GROUP,
RISCV_HALT_ERROR
};
@@ -46,9 +57,6 @@ typedef struct {
struct command_context *cmd_ctx;
void *version_specific;
- /* The number of harts on this system. */
- int hart_count;
-
/* The hart that the RTOS thinks is currently being debugged. */
int rtos_hartid;
@@ -58,11 +66,6 @@ typedef struct {
* every function than an actual */
int current_hartid;
- /* Enough space to store all the registers we might need to save. */
- /* FIXME: This should probably be a bunch of register caches. */
- uint64_t saved_registers[RISCV_MAX_HARTS][RISCV_MAX_REGISTERS];
- bool valid_saved_registers[RISCV_MAX_HARTS][RISCV_MAX_REGISTERS];
-
/* OpenOCD's register cache points into here. This is not per-hart because
* we just invalidate the entire cache when we change which hart is
* selected. */
@@ -75,6 +78,8 @@ typedef struct {
/* It's possible that each core has a different supported ISA set. */
int xlen[RISCV_MAX_HARTS];
riscv_reg_t misa[RISCV_MAX_HARTS];
+ /* Cached value of vlenb. 0 if vlenb is not readable for some reason. */
+ unsigned vlenb[RISCV_MAX_HARTS];
/* The number of triggers per hart. */
unsigned trigger_count[RISCV_MAX_HARTS];
@@ -100,19 +105,33 @@ typedef struct {
* delays, causing them to be relearned. Used for testing. */
int reset_delays_wait;
+ /* This target has been prepped and is ready to step/resume. */
+ bool prepped;
+ /* This target was selected using hasel. */
+ bool selected;
+
/* Helper functions that target the various RISC-V debug spec
* implementations. */
int (*get_register)(struct target *target,
riscv_reg_t *value, int hid, int rid);
int (*set_register)(struct target *target, int hartid, int regid,
uint64_t value);
+ int (*get_register_buf)(struct target *target, uint8_t *buf, int regno);
+ int (*set_register_buf)(struct target *target, int regno,
+ const uint8_t *buf);
int (*select_current_hart)(struct target *target);
bool (*is_halted)(struct target *target);
- int (*halt_current_hart)(struct target *target);
- int (*resume_current_hart)(struct target *target);
+ /* Resume this target, as well as every other prepped target that can be
+ * resumed near-simultaneously. Clear the prepped flag on any target that
+ * was resumed. */
+ int (*resume_go)(struct target *target);
int (*step_current_hart)(struct target *target);
int (*on_halt)(struct target *target);
- int (*on_resume)(struct target *target);
+ /* Get this target as ready as possible to resume, without actually
+ * resuming. */
+ int (*resume_prep)(struct target *target);
+ int (*halt_prep)(struct target *target);
+ int (*halt_go)(struct target *target);
int (*on_step)(struct target *target);
enum riscv_halt_reason (*halt_reason)(struct target *target);
int (*write_debug_buffer)(struct target *target, unsigned index,
@@ -134,8 +153,52 @@ typedef struct {
uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
int (*test_compliance)(struct target *target);
+
+ int (*read_memory)(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
+
+ /* How many harts are attached to the DM that this target is attached to? */
+ int (*hart_count)(struct target *target);
+ unsigned (*data_bits)(struct target *target);
+
+ /* Storage for vector register types. */
+ struct reg_data_type_vector vector_uint8;
+ struct reg_data_type_vector vector_uint16;
+ struct reg_data_type_vector vector_uint32;
+ struct reg_data_type_vector vector_uint64;
+ struct reg_data_type_vector vector_uint128;
+ struct reg_data_type type_uint8_vector;
+ struct reg_data_type type_uint16_vector;
+ struct reg_data_type type_uint32_vector;
+ struct reg_data_type type_uint64_vector;
+ struct reg_data_type type_uint128_vector;
+ struct reg_data_type_union_field vector_fields[5];
+ struct reg_data_type_union vector_union;
+ struct reg_data_type type_vector;
+
+ /* Set when trigger registers are changed by the user. This indicates we eed
+ * to beware that we may hit a trigger that we didn't realize had been set. */
+ bool manual_hwbp_set;
} riscv_info_t;
+typedef struct {
+ uint8_t tunneled_dr_width;
+ struct scan_field tunneled_dr[4];
+} riscv_bscan_tunneled_scan_context_t;
+
+typedef struct {
+ const char *name;
+ int level;
+ unsigned va_bits;
+ unsigned pte_shift;
+ unsigned vpn_shift[PG_MAX_LEVEL];
+ unsigned vpn_mask[PG_MAX_LEVEL];
+ unsigned pte_ppn_shift[PG_MAX_LEVEL];
+ unsigned pte_ppn_mask[PG_MAX_LEVEL];
+ unsigned pa_ppn_shift[PG_MAX_LEVEL];
+ unsigned pa_ppn_mask[PG_MAX_LEVEL];
+} virt2phys_info_t;
+
/* Wall-clock timeout for a command/access. Settable via RISC-V Target commands.*/
extern int riscv_command_timeout_sec;
@@ -144,6 +207,11 @@ extern int riscv_reset_timeout_sec;
extern bool riscv_prefer_sba;
+extern bool riscv_enable_virtual;
+extern bool riscv_ebreakm;
+extern bool riscv_ebreaks;
+extern bool riscv_ebreaku;
+
/* Everything needs the RISC-V specific info structure, so here's a nice macro
* that provides that. */
static inline riscv_info_t *riscv_info(const struct target *target) __attribute__((unused));
@@ -158,17 +226,28 @@ extern struct scan_field select_dbus;
extern uint8_t ir_idcode[4];
extern struct scan_field select_idcode;
+extern struct scan_field select_user4;
+extern struct scan_field *bscan_tunneled_select_dmi;
+extern uint32_t bscan_tunneled_select_dmi_num_fields;
+typedef enum { BSCAN_TUNNEL_NESTED_TAP, BSCAN_TUNNEL_DATA_REGISTER } bscan_tunnel_type_t;
+extern int bscan_tunnel_ir_width;
+extern bscan_tunnel_type_t bscan_tunnel_type;
+
+uint32_t dtmcontrol_scan_via_bscan(struct target *target, uint32_t out);
+void select_dmi_via_bscan(struct target *target);
+
/*** OpenOCD Interface */
int riscv_openocd_poll(struct target *target);
-int riscv_openocd_halt(struct target *target);
+int riscv_halt(struct target *target);
-int riscv_openocd_resume(
+int riscv_resume(
struct target *target,
int current,
target_addr_t address,
int handle_breakpoints,
- int debug_execution
+ int debug_execution,
+ bool single_hart
);
int riscv_openocd_step(
@@ -186,14 +265,6 @@ int riscv_openocd_deassert_reset(struct target *target);
/* Initializes the shared RISC-V structure. */
void riscv_info_init(struct target *target, riscv_info_t *r);
-/* Run control, possibly for multiple harts. The _all_harts versions resume
- * all the enabled harts, which when running in RTOS mode is all the harts on
- * the system. */
-int riscv_halt_all_harts(struct target *target);
-int riscv_halt_one_hart(struct target *target, int hartid);
-int riscv_resume_all_harts(struct target *target);
-int riscv_resume_one_hart(struct target *target, int hartid);
-
/* Steps the hart that's currently selected in the RTOS, or if there is no RTOS
* then the only hart. */
int riscv_step_rtos_hart(struct target *target);
@@ -201,7 +272,7 @@ int riscv_step_rtos_hart(struct target *target);
bool riscv_supports_extension(struct target *target, int hartid, char letter);
/* Returns XLEN for the given (or current) hart. */
-int riscv_xlen(const struct target *target);
+unsigned riscv_xlen(const struct target *target);
int riscv_xlen_of_hart(const struct target *target, int hartid);
bool riscv_rtos_enabled(const struct target *target);
@@ -226,12 +297,14 @@ int riscv_count_harts(struct target *target);
/* Returns TRUE if the target has the given register on the given hart. */
bool riscv_has_register(struct target *target, int hartid, int regid);
-/* Returns the value of the given register on the given hart. 32-bit registers
- * are zero extended to 64 bits. */
+/** Set register, updating the cache. */
int riscv_set_register(struct target *target, enum gdb_regno i, riscv_reg_t v);
+/** Set register, updating the cache. */
int riscv_set_register_on_hart(struct target *target, int hid, enum gdb_regno rid, uint64_t v);
+/** Get register, from the cache if it's in there. */
int riscv_get_register(struct target *target, riscv_reg_t *value,
enum gdb_regno r);
+/** Get register, from the cache if it's in there. */
int riscv_get_register_on_hart(struct target *target, riscv_reg_t *value,
int hartid, enum gdb_regno regid);
@@ -272,6 +345,15 @@ int riscv_hit_watchpoint(struct target *target, struct watchpoint **hit_wp_addre
int riscv_init_registers(struct target *target);
void riscv_semihosting_init(struct target *target);
-int riscv_semihosting(struct target *target, int *retval);
+typedef enum {
+ SEMI_NONE, /* Not halted for a semihosting call. */
+ SEMI_HANDLED, /* Call handled, and target was resumed. */
+ SEMI_WAITING, /* Call handled, target is halted waiting until we can resume. */
+ SEMI_ERROR /* Something went wrong. */
+} semihosting_result_t;
+semihosting_result_t riscv_semihosting(struct target *target, int *retval);
+
+void riscv_add_bscan_tunneled_scan(struct target *target, struct scan_field *field,
+ riscv_bscan_tunneled_scan_context_t *ctxt);
#endif
diff --git a/src/target/riscv/riscv_semihosting.c b/src/target/riscv/riscv_semihosting.c
index c4b6653..99d6c77 100644
--- a/src/target/riscv/riscv_semihosting.c
+++ b/src/target/riscv/riscv_semihosting.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
/***************************************************************************
* Copyright (C) 2018 by Liviu Ionescu *
* ilg@livius.net *
@@ -60,35 +62,35 @@ void riscv_semihosting_init(struct target *target)
/**
* Check for and process a semihosting request using the ARM protocol). This
* is meant to be called when the target is stopped due to a debug mode entry.
- * If the value 0 is returned then there was nothing to process. A non-zero
- * return value signifies that a request was processed and the target resumed,
- * or an error was encountered, in which case the caller must return
- * immediately.
*
* @param target Pointer to the target to process.
* @param retval Pointer to a location where the return code will be stored
* @return non-zero value if a request was processed or an error encountered
*/
-int riscv_semihosting(struct target *target, int *retval)
+semihosting_result_t riscv_semihosting(struct target *target, int *retval)
{
struct semihosting *semihosting = target->semihosting;
- if (!semihosting)
- return 0;
+ if (!semihosting) {
+ LOG_DEBUG(" -> NONE (!semihosting)");
+ return SEMI_NONE;
+ }
- if (!semihosting->is_active)
- return 0;
+ if (!semihosting->is_active) {
+ LOG_DEBUG(" -> NONE (!semihosting->is_active)");
+ return SEMI_NONE;
+ }
- riscv_reg_t dpc;
- int result = riscv_get_register(target, &dpc, GDB_REGNO_DPC);
+ riscv_reg_t pc;
+ int result = riscv_get_register(target, &pc, GDB_REGNO_PC);
if (result != ERROR_OK)
- return 0;
+ return SEMI_ERROR;
uint8_t tmp[12];
/* Read the current instruction, including the bracketing */
- *retval = target_read_memory(target, dpc - 4, 2, 6, tmp);
+ *retval = target_read_memory(target, pc - 4, 2, 6, tmp);
if (*retval != ERROR_OK)
- return 0;
+ return SEMI_ERROR;
/*
* The instructions that trigger a semihosting call,
@@ -101,12 +103,12 @@ int riscv_semihosting(struct target *target, int *retval)
uint32_t pre = target_buffer_get_u32(target, tmp);
uint32_t ebreak = target_buffer_get_u32(target, tmp + 4);
uint32_t post = target_buffer_get_u32(target, tmp + 8);
- LOG_DEBUG("check %08x %08x %08x from 0x%" PRIx64 "-4", pre, ebreak, post, dpc);
+ LOG_DEBUG("check %08x %08x %08x from 0x%" PRIx64 "-4", pre, ebreak, post, pc);
if (pre != 0x01f01013 || ebreak != 0x00100073 || post != 0x40705013) {
-
/* Not the magic sequence defining semihosting. */
- return 0;
+ LOG_DEBUG(" -> NONE (no magic)");
+ return SEMI_NONE;
}
/*
@@ -114,18 +116,21 @@ int riscv_semihosting(struct target *target, int *retval)
* operation to complete.
*/
if (!semihosting->hit_fileio) {
-
/* RISC-V uses A0 and A1 to pass function arguments */
riscv_reg_t r0;
riscv_reg_t r1;
result = riscv_get_register(target, &r0, GDB_REGNO_A0);
- if (result != ERROR_OK)
- return 0;
+ if (result != ERROR_OK) {
+ LOG_DEBUG(" -> ERROR (couldn't read a0)");
+ return SEMI_ERROR;
+ }
result = riscv_get_register(target, &r1, GDB_REGNO_A1);
- if (result != ERROR_OK)
- return 0;
+ if (result != ERROR_OK) {
+ LOG_DEBUG(" -> ERROR (couldn't read a1)");
+ return SEMI_ERROR;
+ }
semihosting->op = r0;
semihosting->param = r1;
@@ -136,11 +141,12 @@ int riscv_semihosting(struct target *target, int *retval)
*retval = semihosting_common(target);
if (*retval != ERROR_OK) {
LOG_ERROR("Failed semihosting operation");
- return 0;
+ return SEMI_ERROR;
}
} else {
/* Unknown operation number, not a semihosting call. */
- return 0;
+ LOG_DEBUG(" -> NONE (unknown operation number)");
+ return SEMI_NONE;
}
}
@@ -150,16 +156,16 @@ int riscv_semihosting(struct target *target, int *retval)
*/
if (semihosting->is_resumable && !semihosting->hit_fileio) {
/* Resume right after the EBREAK 4 bytes instruction. */
- *retval = target_resume(target, 0, dpc+4, 0, 0);
- if (*retval != ERROR_OK) {
- LOG_ERROR("Failed to resume target");
- return 0;
- }
+ *retval = riscv_set_register(target, GDB_REGNO_PC, pc + 4);
+ if (*retval != ERROR_OK)
+ return SEMI_ERROR;
- return 1;
+ LOG_DEBUG(" -> HANDLED");
+ return SEMI_HANDLED;
}
- return 0;
+ LOG_DEBUG(" -> WAITING");
+ return SEMI_WAITING;
}
/* -------------------------------------------------------------------------
@@ -171,7 +177,7 @@ int riscv_semihosting(struct target *target, int *retval)
*/
static int riscv_semihosting_setup(struct target *target, int enable)
{
- LOG_DEBUG("enable=%d", enable);
+ LOG_DEBUG("[%s] enable=%d", target_name(target), enable);
struct semihosting *semihosting = target->semihosting;
if (semihosting)