aboutsummaryrefslogtreecommitdiff
path: root/src/target
diff options
context:
space:
mode:
Diffstat (limited to 'src/target')
-rw-r--r--src/target/aarch64.c8
-rw-r--r--src/target/aarch64.h4
-rw-r--r--src/target/armv8_cache.c7
-rw-r--r--src/target/breakpoints.c32
-rw-r--r--src/target/cortex_a.c19
-rw-r--r--src/target/dsp563xx.c2
-rw-r--r--src/target/espressif/Makefile.am6
-rw-r--r--src/target/espressif/esp.c10
-rw-r--r--src/target/espressif/esp.h2
-rw-r--r--src/target/espressif/esp32.c4
-rw-r--r--src/target/espressif/esp32s2.c4
-rw-r--r--src/target/espressif/esp32s3.c4
-rw-r--r--src/target/espressif/esp_algorithm.c595
-rw-r--r--src/target/espressif/esp_algorithm.h420
-rw-r--r--src/target/espressif/esp_xtensa.c8
-rw-r--r--src/target/espressif/esp_xtensa_algorithm.c140
-rw-r--r--src/target/espressif/esp_xtensa_algorithm.h19
-rw-r--r--src/target/espressif/esp_xtensa_smp.c80
-rw-r--r--src/target/espressif/esp_xtensa_smp.h8
-rw-r--r--src/target/mips32.h6
-rw-r--r--src/target/mips32_pracc.c7
-rw-r--r--src/target/mips_m4k.c6
-rw-r--r--src/target/smp.c3
-rw-r--r--src/target/target.c110
-rw-r--r--src/target/target.h4
-rw-r--r--src/target/xtensa/xtensa.c252
-rw-r--r--src/target/xtensa/xtensa.h25
-rw-r--r--src/target/xtensa/xtensa_debug_module.c38
-rw-r--r--src/target/xtensa/xtensa_debug_module.h40
29 files changed, 1742 insertions, 121 deletions
diff --git a/src/target/aarch64.c b/src/target/aarch64.c
index db60243..1c056a0 100644
--- a/src/target/aarch64.c
+++ b/src/target/aarch64.c
@@ -105,7 +105,7 @@ static int aarch64_restore_system_control_reg(struct target *target)
if (target_mode != ARM_MODE_ANY)
armv8_dpm_modeswitch(&armv8->dpm, target_mode);
- retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
+ retval = armv8->dpm.instr_write_data_r0_64(&armv8->dpm, instr, aarch64->system_control_reg);
if (retval != ERROR_OK)
return retval;
@@ -182,7 +182,7 @@ static int aarch64_mmu_modify(struct target *target, int enable)
if (target_mode != ARM_MODE_ANY)
armv8_dpm_modeswitch(&armv8->dpm, target_mode);
- retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
+ retval = armv8->dpm.instr_write_data_r0_64(&armv8->dpm, instr,
aarch64->system_control_reg_curr);
if (target_mode != ARM_MODE_ANY)
@@ -1055,14 +1055,14 @@ static int aarch64_post_debug_entry(struct target *target)
if (target_mode != ARM_MODE_ANY)
armv8_dpm_modeswitch(&armv8->dpm, target_mode);
- retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
+ retval = armv8->dpm.instr_read_data_r0_64(&armv8->dpm, instr, &aarch64->system_control_reg);
if (retval != ERROR_OK)
return retval;
if (target_mode != ARM_MODE_ANY)
armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
- LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
+ LOG_DEBUG("System_register: %8.8" PRIx64, aarch64->system_control_reg);
aarch64->system_control_reg_curr = aarch64->system_control_reg;
if (armv8->armv8_mmu.armv8_cache.info == -1) {
diff --git a/src/target/aarch64.h b/src/target/aarch64.h
index 2721fe7..b265e82 100644
--- a/src/target/aarch64.h
+++ b/src/target/aarch64.h
@@ -43,8 +43,8 @@ struct aarch64_common {
struct armv8_common armv8_common;
/* Context information */
- uint32_t system_control_reg;
- uint32_t system_control_reg_curr;
+ uint64_t system_control_reg;
+ uint64_t system_control_reg_curr;
/* Breakpoint register pairs */
int brp_num_context;
diff --git a/src/target/armv8_cache.c b/src/target/armv8_cache.c
index cf71119..66d4e00 100644
--- a/src/target/armv8_cache.c
+++ b/src/target/armv8_cache.c
@@ -243,7 +243,7 @@ static int armv8_flush_all_data(struct target *target)
foreach_smp_target(head, target->smp_targets) {
struct target *curr = head->target;
if (curr->state == TARGET_HALTED) {
- LOG_INFO("Wait flushing data l1 on core %" PRId32, curr->coreid);
+ LOG_TARGET_INFO(curr, "Wait flushing data l1.");
retval = _armv8_flush_all_data(curr);
}
}
@@ -286,8 +286,9 @@ static struct armv8_cachesize decode_cache_reg(uint32_t cache_reg)
size.index = (cache_reg >> 13) & 0x7fff;
size.way = ((cache_reg >> 3) & 0x3ff);
- while (((size.way << i) & 0x80000000) == 0)
- i++;
+ if (size.way != 0)
+ while (((size.way << i) & 0x80000000) == 0)
+ i++;
size.way_shift = i;
return size;
diff --git a/src/target/breakpoints.c b/src/target/breakpoints.c
index d069c6b..77f7673 100644
--- a/src/target/breakpoints.c
+++ b/src/target/breakpoints.c
@@ -53,7 +53,7 @@ static int breakpoint_add_internal(struct target *target,
* breakpoint" ... check all the parameters before
* succeeding.
*/
- LOG_ERROR("Duplicate Breakpoint address: " TARGET_ADDR_FMT " (BP %" PRIu32 ")",
+ LOG_TARGET_ERROR(target, "Duplicate Breakpoint address: " TARGET_ADDR_FMT " (BP %" PRIu32 ")",
address, breakpoint->unique_id);
return ERROR_TARGET_DUPLICATE_BREAKPOINT;
}
@@ -84,16 +84,15 @@ static int breakpoint_add_internal(struct target *target,
default:
reason = "unknown reason";
fail:
- LOG_ERROR("can't add breakpoint: %s", reason);
+ LOG_TARGET_ERROR(target, "can't add breakpoint: %s", reason);
free((*breakpoint_p)->orig_instr);
free(*breakpoint_p);
*breakpoint_p = NULL;
return retval;
}
- LOG_DEBUG("[%d] added %s breakpoint at " TARGET_ADDR_FMT
+ LOG_TARGET_DEBUG(target, "added %s breakpoint at " TARGET_ADDR_FMT
" of length 0x%8.8x, (BPID: %" PRIu32 ")",
- target->coreid,
breakpoint_type_strings[(*breakpoint_p)->type],
(*breakpoint_p)->address, (*breakpoint_p)->length,
(*breakpoint_p)->unique_id);
@@ -135,14 +134,14 @@ static int context_breakpoint_add_internal(struct target *target,
(*breakpoint_p)->unique_id = bpwp_unique_id++;
retval = target_add_context_breakpoint(target, *breakpoint_p);
if (retval != ERROR_OK) {
- LOG_ERROR("could not add breakpoint");
+ LOG_TARGET_ERROR(target, "could not add breakpoint");
free((*breakpoint_p)->orig_instr);
free(*breakpoint_p);
*breakpoint_p = NULL;
return retval;
}
- LOG_DEBUG("added %s Context breakpoint at 0x%8.8" PRIx32 " of length 0x%8.8x, (BPID: %" PRIu32 ")",
+ LOG_TARGET_DEBUG(target, "added %s Context breakpoint at 0x%8.8" PRIx32 " of length 0x%8.8x, (BPID: %" PRIu32 ")",
breakpoint_type_strings[(*breakpoint_p)->type],
(*breakpoint_p)->asid, (*breakpoint_p)->length,
(*breakpoint_p)->unique_id);
@@ -166,11 +165,11 @@ static int hybrid_breakpoint_add_internal(struct target *target,
* breakpoint" ... check all the parameters before
* succeeding.
*/
- LOG_ERROR("Duplicate Hybrid Breakpoint asid: 0x%08" PRIx32 " (BP %" PRIu32 ")",
+ LOG_TARGET_ERROR(target, "Duplicate Hybrid Breakpoint asid: 0x%08" PRIx32 " (BP %" PRIu32 ")",
asid, breakpoint->unique_id);
return ERROR_TARGET_DUPLICATE_BREAKPOINT;
} else if ((breakpoint->address == address) && (breakpoint->asid == 0)) {
- LOG_ERROR("Duplicate Breakpoint IVA: " TARGET_ADDR_FMT " (BP %" PRIu32 ")",
+ LOG_TARGET_ERROR(target, "Duplicate Breakpoint IVA: " TARGET_ADDR_FMT " (BP %" PRIu32 ")",
address, breakpoint->unique_id);
return ERROR_TARGET_DUPLICATE_BREAKPOINT;
@@ -191,13 +190,13 @@ static int hybrid_breakpoint_add_internal(struct target *target,
retval = target_add_hybrid_breakpoint(target, *breakpoint_p);
if (retval != ERROR_OK) {
- LOG_ERROR("could not add breakpoint");
+ LOG_TARGET_ERROR(target, "could not add breakpoint");
free((*breakpoint_p)->orig_instr);
free(*breakpoint_p);
*breakpoint_p = NULL;
return retval;
}
- LOG_DEBUG(
+ LOG_TARGET_DEBUG(target,
"added %s Hybrid breakpoint at address " TARGET_ADDR_FMT " of length 0x%8.8x, (BPID: %" PRIu32 ")",
breakpoint_type_strings[(*breakpoint_p)->type],
(*breakpoint_p)->address,
@@ -307,7 +306,7 @@ static int breakpoint_free(struct target *data_target, struct target *breakpoint
return retval;
}
- LOG_DEBUG("free BPID: %" PRIu32 " --> %d", breakpoint->unique_id, retval);
+ LOG_TARGET_DEBUG(data_target, "free BPID: %" PRIu32 " --> %d", breakpoint->unique_id, retval);
(*breakpoint_p) = breakpoint->next;
free(breakpoint->orig_instr);
free(breakpoint);
@@ -445,7 +444,7 @@ static int watchpoint_free(struct target *target, struct watchpoint *watchpoint_
return retval;
}
- LOG_DEBUG("free WPID: %d --> %d", watchpoint->unique_id, retval);
+ LOG_TARGET_DEBUG(target, "free WPID: %d --> %d", watchpoint->unique_id, retval);
(*watchpoint_p) = watchpoint->next;
free(watchpoint);
@@ -556,7 +555,7 @@ static int watchpoint_add_internal(struct target *target, target_addr_t address,
|| watchpoint->value != value
|| watchpoint->mask != mask
|| watchpoint->rw != rw) {
- LOG_ERROR("address " TARGET_ADDR_FMT
+ LOG_TARGET_ERROR(target, "address " TARGET_ADDR_FMT
" already has watchpoint %d",
address, watchpoint->unique_id);
return ERROR_FAIL;
@@ -590,7 +589,7 @@ static int watchpoint_add_internal(struct target *target, target_addr_t address,
default:
reason = "unrecognized error";
bye:
- LOG_ERROR("can't add %s watchpoint at " TARGET_ADDR_FMT ", %s",
+ LOG_TARGET_ERROR(target, "can't add %s watchpoint at " TARGET_ADDR_FMT ", %s",
watchpoint_rw_strings[(*watchpoint_p)->rw],
address, reason);
free(*watchpoint_p);
@@ -598,9 +597,8 @@ bye:
return retval;
}
- LOG_DEBUG("[%d] added %s watchpoint at " TARGET_ADDR_FMT
+ LOG_TARGET_DEBUG(target, "added %s watchpoint at " TARGET_ADDR_FMT
" of length 0x%8.8" PRIx32 " (WPID: %d)",
- target->coreid,
watchpoint_rw_strings[(*watchpoint_p)->rw],
(*watchpoint_p)->address,
(*watchpoint_p)->length,
@@ -718,7 +716,7 @@ int watchpoint_hit(struct target *target, enum watchpoint_rw *rw,
*rw = hit_watchpoint->rw;
*address = hit_watchpoint->address;
- LOG_DEBUG("Found hit watchpoint at " TARGET_ADDR_FMT " (WPID: %d)",
+ LOG_TARGET_DEBUG(target, "Found hit watchpoint at " TARGET_ADDR_FMT " (WPID: %d)",
hit_watchpoint->address,
hit_watchpoint->unique_id);
diff --git a/src/target/cortex_a.c b/src/target/cortex_a.c
index ba3349d..7fa0c4e 100644
--- a/src/target/cortex_a.c
+++ b/src/target/cortex_a.c
@@ -2989,29 +2989,29 @@ static int cortex_a_examine_first(struct target *target)
armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
if (retval != ERROR_OK)
return retval;
- LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
+ LOG_TARGET_DEBUG(target, "DBGPRSR 0x%" PRIx32, dbg_osreg);
if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
- LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
+ LOG_TARGET_ERROR(target, "powered down!");
target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
return ERROR_TARGET_INIT_FAILED;
}
if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
- LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
+ LOG_TARGET_DEBUG(target, "was reset!");
/* Read DBGOSLSR and check if OSLK is implemented */
retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
if (retval != ERROR_OK)
return retval;
- LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
+ LOG_TARGET_DEBUG(target, "DBGOSLSR 0x%" PRIx32, dbg_osreg);
/* check if OS Lock is implemented */
if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
/* check if OS Lock is set */
if (dbg_osreg & OSLSR_OSLK) {
- LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
+ LOG_TARGET_DEBUG(target, "OSLock set! Trying to unlock");
retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
armv7a->debug_base + CPUDBG_OSLAR,
@@ -3022,8 +3022,7 @@ static int cortex_a_examine_first(struct target *target)
/* if we fail to access the register or cannot reset the OSLK bit, bail out */
if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
- LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
- target->coreid);
+ LOG_TARGET_ERROR(target, "OSLock sticky, core not powered?");
target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
return ERROR_TARGET_INIT_FAILED;
}
@@ -3036,13 +3035,11 @@ static int cortex_a_examine_first(struct target *target)
return retval;
if (dbg_idpfr1 & 0x000000f0) {
- LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
- target->coreid);
+ LOG_TARGET_DEBUG(target, "has security extensions");
armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
}
if (dbg_idpfr1 & 0x0000f000) {
- LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
- target->coreid);
+ LOG_TARGET_DEBUG(target, "has virtualization extensions");
/*
* overwrite and simplify the checks.
* virtualization extensions require implementation of security extension
diff --git a/src/target/dsp563xx.c b/src/target/dsp563xx.c
index 16fd149..1e71803 100644
--- a/src/target/dsp563xx.c
+++ b/src/target/dsp563xx.c
@@ -912,7 +912,7 @@ static int dsp563xx_examine(struct target *target)
{
uint32_t chip;
- if (target->tap->hasidcode == false) {
+ if (!target->tap->has_idcode) {
LOG_ERROR("no IDCODE present on device");
return ERROR_COMMAND_SYNTAX_ERROR;
}
diff --git a/src/target/espressif/Makefile.am b/src/target/espressif/Makefile.am
index 776818f..cf82ee9 100644
--- a/src/target/espressif/Makefile.am
+++ b/src/target/espressif/Makefile.am
@@ -10,6 +10,8 @@ noinst_LTLIBRARIES += %D%/libespressif.la
%D%/esp_xtensa_semihosting.h \
%D%/esp_xtensa_apptrace.c \
%D%/esp_xtensa_apptrace.h \
+ %D%/esp_xtensa_algorithm.c \
+ %D%/esp_xtensa_algorithm.h \
%D%/esp32_apptrace.c \
%D%/esp32_apptrace.h \
%D%/esp32.c \
@@ -21,4 +23,6 @@ noinst_LTLIBRARIES += %D%/libespressif.la
%D%/esp32_sysview.h \
%D%/segger_sysview.h \
%D%/esp_semihosting.c \
- %D%/esp_semihosting.h
+ %D%/esp_semihosting.h \
+ %D%/esp_algorithm.c \
+ %D%/esp_algorithm.h
diff --git a/src/target/espressif/esp.c b/src/target/espressif/esp.c
index 9583d64..600f6d7 100644
--- a/src/target/espressif/esp.c
+++ b/src/target/espressif/esp.c
@@ -14,6 +14,16 @@
#include "target/target.h"
#include "esp.h"
+int esp_common_init(struct esp_common *esp, const struct esp_algorithm_hw *algo_hw)
+{
+ if (!esp)
+ return ERROR_FAIL;
+
+ esp->algo_hw = algo_hw;
+
+ return ERROR_OK;
+}
+
int esp_dbgstubs_table_read(struct target *target, struct esp_dbg_stubs *dbg_stubs)
{
uint32_t table_size, table_start_id, desc_entry_id, gcov_entry_id;
diff --git a/src/target/espressif/esp.h b/src/target/espressif/esp.h
index 3ba2b8b..6e0a2d2 100644
--- a/src/target/espressif/esp.h
+++ b/src/target/espressif/esp.h
@@ -77,9 +77,11 @@ struct esp_dbg_stubs {
};
struct esp_common {
+ const struct esp_algorithm_hw *algo_hw;
struct esp_dbg_stubs dbg_stubs;
};
+int esp_common_init(struct esp_common *esp, const struct esp_algorithm_hw *algo_hw);
int esp_dbgstubs_table_read(struct target *target, struct esp_dbg_stubs *dbg_stubs);
#endif /* OPENOCD_TARGET_ESP_H */
diff --git a/src/target/espressif/esp32.c b/src/target/espressif/esp32.c
index b510f28..324aa39 100644
--- a/src/target/espressif/esp32.c
+++ b/src/target/espressif/esp32.c
@@ -484,6 +484,10 @@ struct target_type esp32_target = {
.get_gdb_arch = xtensa_get_gdb_arch,
.get_gdb_reg_list = xtensa_get_gdb_reg_list,
+ .run_algorithm = xtensa_run_algorithm,
+ .start_algorithm = xtensa_start_algorithm,
+ .wait_algorithm = xtensa_wait_algorithm,
+
.add_breakpoint = esp_xtensa_breakpoint_add,
.remove_breakpoint = esp_xtensa_breakpoint_remove,
diff --git a/src/target/espressif/esp32s2.c b/src/target/espressif/esp32s2.c
index dadc130..2abde47 100644
--- a/src/target/espressif/esp32s2.c
+++ b/src/target/espressif/esp32s2.c
@@ -521,6 +521,10 @@ struct target_type esp32s2_target = {
.get_gdb_arch = xtensa_get_gdb_arch,
.get_gdb_reg_list = xtensa_get_gdb_reg_list,
+ .run_algorithm = xtensa_run_algorithm,
+ .start_algorithm = xtensa_start_algorithm,
+ .wait_algorithm = xtensa_wait_algorithm,
+
.add_breakpoint = esp_xtensa_breakpoint_add,
.remove_breakpoint = esp_xtensa_breakpoint_remove,
diff --git a/src/target/espressif/esp32s3.c b/src/target/espressif/esp32s3.c
index 5036956..22e1630 100644
--- a/src/target/espressif/esp32s3.c
+++ b/src/target/espressif/esp32s3.c
@@ -405,6 +405,10 @@ struct target_type esp32s3_target = {
.get_gdb_arch = xtensa_get_gdb_arch,
.get_gdb_reg_list = xtensa_get_gdb_reg_list,
+ .run_algorithm = xtensa_run_algorithm,
+ .start_algorithm = xtensa_start_algorithm,
+ .wait_algorithm = xtensa_wait_algorithm,
+
.add_breakpoint = esp_xtensa_breakpoint_add,
.remove_breakpoint = esp_xtensa_breakpoint_remove,
diff --git a/src/target/espressif/esp_algorithm.c b/src/target/espressif/esp_algorithm.c
new file mode 100644
index 0000000..79f610b
--- /dev/null
+++ b/src/target/espressif/esp_algorithm.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/***************************************************************************
+ * Espressif chips common algorithm API for OpenOCD *
+ * Copyright (C) 2022 Espressif Systems Ltd. *
+ ***************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <helper/align.h>
+#include <target/algorithm.h>
+#include <target/target.h>
+#include "esp_algorithm.h"
+
+#define DEFAULT_ALGORITHM_TIMEOUT_MS 40000 /* ms */
+
+static int esp_algorithm_read_stub_logs(struct target *target, struct esp_algorithm_stub *stub)
+{
+ if (!stub || stub->log_buff_addr == 0 || stub->log_buff_size == 0)
+ return ERROR_FAIL;
+
+ uint32_t len = 0;
+ int retval = target_read_u32(target, stub->log_buff_addr, &len);
+ if (retval != ERROR_OK)
+ return retval;
+
+ /* sanity check. log_buff_size = sizeof(len) + sizeof(log_buff) */
+ if (len == 0 || len > stub->log_buff_size - 4)
+ return ERROR_FAIL;
+
+ uint8_t *log_buff = calloc(1, len);
+ if (!log_buff) {
+ LOG_ERROR("Failed to allocate memory for the stub log!");
+ return ERROR_FAIL;
+ }
+ retval = target_read_memory(target, stub->log_buff_addr + 4, 1, len, log_buff);
+ if (retval == ERROR_OK)
+ LOG_OUTPUT("%*.*s", len, len, log_buff);
+ free(log_buff);
+ return retval;
+}
+
+static int esp_algorithm_run_image(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ va_list ap)
+{
+ struct working_area **mem_handles = NULL;
+
+ if (!run || !run->hw)
+ return ERROR_FAIL;
+
+ int retval = run->hw->algo_init(target, run, num_args, ap);
+ if (retval != ERROR_OK)
+ return retval;
+
+ /* allocate memory arguments and fill respective reg params */
+ if (run->mem_args.count > 0) {
+ mem_handles = calloc(run->mem_args.count, sizeof(*mem_handles));
+ if (!mem_handles) {
+ LOG_ERROR("Failed to alloc target mem handles!");
+ retval = ERROR_FAIL;
+ goto _cleanup;
+ }
+ /* alloc memory args target buffers */
+ for (uint32_t i = 0; i < run->mem_args.count; i++) {
+ /* small hack: if we need to update some reg param this field holds
+ * appropriate user argument number, */
+ /* otherwise should hold UINT_MAX */
+ uint32_t usr_param_num = run->mem_args.params[i].address;
+ static struct working_area *area;
+ retval = target_alloc_working_area(target, run->mem_args.params[i].size, &area);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to alloc target buffer!");
+ retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ goto _cleanup;
+ }
+ mem_handles[i] = area;
+ run->mem_args.params[i].address = area->address;
+ if (usr_param_num != UINT_MAX) /* if we need update some register param with mem param value */
+ esp_algorithm_user_arg_set_uint(run, usr_param_num, run->mem_args.params[i].address);
+ }
+ }
+
+ if (run->usr_func_init) {
+ retval = run->usr_func_init(target, run, run->usr_func_arg);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to prepare algorithm host side args stub (%d)!", retval);
+ goto _cleanup;
+ }
+ }
+
+ LOG_DEBUG("Algorithm start @ " TARGET_ADDR_FMT ", stack %d bytes @ " TARGET_ADDR_FMT,
+ run->stub.tramp_mapped_addr, run->stack_size, run->stub.stack_addr);
+ retval = target_start_algorithm(target,
+ run->mem_args.count, run->mem_args.params,
+ run->reg_args.count, run->reg_args.params,
+ run->stub.tramp_mapped_addr, 0,
+ run->stub.ainfo);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to start algorithm (%d)!", retval);
+ goto _cleanup;
+ }
+
+ if (run->usr_func) {
+ /* give target algorithm stub time to init itself, then user func can communicate to it safely */
+ alive_sleep(100);
+ retval = run->usr_func(target, run->usr_func_arg);
+ if (retval != ERROR_OK)
+ LOG_ERROR("Failed to exec algorithm user func (%d)!", retval);
+ }
+ uint32_t timeout_ms = 0; /* do not wait if 'usr_func' returned error */
+ if (retval == ERROR_OK)
+ timeout_ms = run->timeout_ms ? run->timeout_ms : DEFAULT_ALGORITHM_TIMEOUT_MS;
+ LOG_DEBUG("Wait algorithm completion");
+ retval = target_wait_algorithm(target,
+ run->mem_args.count, run->mem_args.params,
+ run->reg_args.count, run->reg_args.params,
+ 0, timeout_ms,
+ run->stub.ainfo);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to wait algorithm (%d)!", retval);
+ /* target has been forced to stop in target_wait_algorithm() */
+ }
+ esp_algorithm_read_stub_logs(target, &run->stub);
+
+ if (run->usr_func_done)
+ run->usr_func_done(target, run, run->usr_func_arg);
+
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Algorithm run failed (%d)!", retval);
+ } else {
+ run->ret_code = esp_algorithm_user_arg_get_uint(run, 0);
+ LOG_DEBUG("Got algorithm RC 0x%" PRIx32, run->ret_code);
+ }
+
+_cleanup:
+ /* free memory arguments */
+ if (mem_handles) {
+ for (uint32_t i = 0; i < run->mem_args.count; i++) {
+ if (mem_handles[i])
+ target_free_working_area(target, mem_handles[i]);
+ }
+ free(mem_handles);
+ }
+ run->hw->algo_cleanup(target, run);
+
+ return retval;
+}
+
+static int esp_algorithm_run_debug_stub(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ va_list ap)
+{
+ if (!run || !run->hw)
+ return ERROR_FAIL;
+
+ int retval = run->hw->algo_init(target, run, num_args, ap);
+ if (retval != ERROR_OK)
+ return retval;
+
+ LOG_DEBUG("Algorithm start @ " TARGET_ADDR_FMT ", stack %d bytes @ " TARGET_ADDR_FMT,
+ run->stub.tramp_mapped_addr, run->stack_size, run->stub.stack_addr);
+ retval = target_start_algorithm(target,
+ run->mem_args.count, run->mem_args.params,
+ run->reg_args.count, run->reg_args.params,
+ run->stub.tramp_mapped_addr, 0,
+ run->stub.ainfo);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to start algorithm (%d)!", retval);
+ goto _cleanup;
+ }
+
+ uint32_t timeout_ms = 0; /* do not wait if 'usr_func' returned error */
+ if (retval == ERROR_OK)
+ timeout_ms = run->timeout_ms ? run->timeout_ms : DEFAULT_ALGORITHM_TIMEOUT_MS;
+ LOG_DEBUG("Wait algorithm completion");
+ retval = target_wait_algorithm(target,
+ run->mem_args.count, run->mem_args.params,
+ run->reg_args.count, run->reg_args.params,
+ 0, timeout_ms,
+ run->stub.ainfo);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to wait algorithm (%d)!", retval);
+ /* target has been forced to stop in target_wait_algorithm() */
+ }
+
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Algorithm run failed (%d)!", retval);
+ } else {
+ run->ret_code = esp_algorithm_user_arg_get_uint(run, 0);
+ LOG_DEBUG("Got algorithm RC 0x%" PRIx32, run->ret_code);
+ }
+
+_cleanup:
+ run->hw->algo_cleanup(target, run);
+
+ return retval;
+}
+
+static void reverse_binary(const uint8_t *src, uint8_t *dest, size_t length)
+{
+ size_t remaining = length % 4;
+ size_t offset = 0;
+ size_t aligned_len = ALIGN_UP(length, 4);
+
+ if (remaining > 0) {
+ /* Put extra bytes to the beginning with padding */
+ memset(dest + remaining, 0xFF, 4 - remaining);
+ for (size_t i = 0; i < remaining; i++)
+ dest[i] = src[length - remaining + i];
+ length -= remaining; /* reverse the others */
+ offset = 4;
+ }
+
+ for (size_t i = offset; i < aligned_len; i += 4) {
+ dest[i + 0] = src[length - i + offset - 4];
+ dest[i + 1] = src[length - i + offset - 3];
+ dest[i + 2] = src[length - i + offset - 2];
+ dest[i + 3] = src[length - i + offset - 1];
+ }
+}
+
+static int load_section_from_image(struct target *target,
+ struct esp_algorithm_run_data *run,
+ int section_num,
+ bool reverse)
+{
+ if (!run)
+ return ERROR_FAIL;
+
+ struct imagesection *section = &run->image.image.sections[section_num];
+ uint32_t sec_wr = 0;
+ uint8_t buf[1024];
+
+ assert(sizeof(buf) % 4 == 0);
+
+ while (sec_wr < section->size) {
+ uint32_t nb = section->size - sec_wr > sizeof(buf) ? sizeof(buf) : section->size - sec_wr;
+ size_t size_read = 0;
+ int retval = image_read_section(&run->image.image, section_num, sec_wr, nb, buf, &size_read);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to read stub section (%d)!", retval);
+ return retval;
+ }
+
+ if (reverse) {
+ size_t aligned_len = ALIGN_UP(size_read, 4);
+ uint8_t reversed_buf[aligned_len];
+
+ /* Send original size to allow padding */
+ reverse_binary(buf, reversed_buf, size_read);
+
+ /*
+ The address range accessed via the instruction bus is in reverse order (word-wise) compared to access
+ via the data bus. That is to say, address
+ 0x3FFE_0000 and 0x400B_FFFC access the same word
+ 0x3FFE_0004 and 0x400B_FFF8 access the same word
+ 0x3FFE_0008 and 0x400B_FFF4 access the same word
+ ...
+ The data bus and instruction bus of the CPU are still both little-endian,
+ so the byte order of individual words is not reversed between address spaces.
+ For example, address
+ 0x3FFE_0000 accesses the least significant byte in the word accessed by 0x400B_FFFC.
+ 0x3FFE_0001 accesses the second least significant byte in the word accessed by 0x400B_FFFC.
+ 0x3FFE_0002 accesses the second most significant byte in the word accessed by 0x400B_FFFC.
+ For more details, please refer to ESP32 TRM, Internal SRAM1 section.
+ */
+ retval = target_write_buffer(target, run->image.dram_org - sec_wr - aligned_len, aligned_len, reversed_buf);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to write stub section!");
+ return retval;
+ }
+ } else {
+ retval = target_write_buffer(target, section->base_address + sec_wr, size_read, buf);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to write stub section!");
+ return retval;
+ }
+ }
+
+ sec_wr += size_read;
+ }
+
+ return ERROR_OK;
+}
+
+/*
+ * Configuration:
+ * ----------------------------
+ * The linker scripts defines the memory layout for the stub code.
+ * The OpenOCD script specifies the workarea address and it's size
+ * Sections defined in the linker are organized to share the same addresses with the workarea.
+ * code and data sections are located in Internal SRAM1 and OpenOCD fills these sections using the data bus.
+ */
+int esp_algorithm_load_func_image(struct target *target, struct esp_algorithm_run_data *run)
+{
+ int retval;
+ size_t tramp_sz = 0;
+ const uint8_t *tramp = NULL;
+ struct duration algo_time;
+ bool alloc_code_working_area = true;
+
+ if (!run || !run->hw)
+ return ERROR_FAIL;
+
+ if (duration_start(&algo_time) != 0) {
+ LOG_ERROR("Failed to start algo time measurement!");
+ return ERROR_FAIL;
+ }
+
+ if (run->hw->stub_tramp_get) {
+ tramp = run->hw->stub_tramp_get(target, &tramp_sz);
+ if (!tramp)
+ return ERROR_FAIL;
+ }
+
+ LOG_DEBUG("stub: base 0x%x, start 0x%" PRIx32 ", %d sections",
+ run->image.image.base_address_set ? (unsigned int)run->image.image.base_address : 0,
+ run->image.image.start_address,
+ run->image.image.num_sections);
+ run->stub.entry = run->image.image.start_address;
+
+ /* [code + trampoline] + <padding> + [data] */
+
+ /* ESP32 has reversed memory region. It will use the last part of DRAM, the others will use the first part.
+ * To avoid complexity for the backup/restore process, we will allocate a workarea for all IRAM region from
+ * the beginning. In that case no need to have a padding area.
+ */
+ if (run->image.reverse) {
+ if (target_alloc_working_area(target, run->image.iram_len, &run->stub.code) != ERROR_OK) {
+ LOG_ERROR("no working area available, can't alloc space for stub code!");
+ retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ goto _on_error;
+ }
+ alloc_code_working_area = false;
+ }
+
+ uint32_t code_size = 0;
+
+ /* Load code section */
+ for (unsigned int i = 0; i < run->image.image.num_sections; i++) {
+ struct imagesection *section = &run->image.image.sections[i];
+
+ if (section->size == 0)
+ continue;
+
+ if (section->flags & ESP_IMAGE_ELF_PHF_EXEC) {
+ LOG_DEBUG("addr " TARGET_ADDR_FMT ", sz %d, flags %" PRIx64,
+ section->base_address, section->size, section->flags);
+
+ if (alloc_code_working_area) {
+ retval = target_alloc_working_area(target, section->size, &run->stub.code);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("no working area available, can't alloc space for stub code!");
+ retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ goto _on_error;
+ }
+ }
+
+ if (section->base_address == 0) {
+ section->base_address = run->stub.code->address;
+ /* sanity check, stub is compiled to be run from working area */
+ } else if (run->stub.code->address != section->base_address) {
+ LOG_ERROR("working area " TARGET_ADDR_FMT " and stub code section " TARGET_ADDR_FMT
+ " address mismatch!",
+ section->base_address,
+ run->stub.code->address);
+ retval = ERROR_FAIL;
+ goto _on_error;
+ }
+
+ retval = load_section_from_image(target, run, i, run->image.reverse);
+ if (retval != ERROR_OK)
+ goto _on_error;
+
+ code_size += ALIGN_UP(section->size, 4);
+ break; /* Stub has one executable text section */
+ }
+ }
+
+ /* If exists, load trampoline to the code area */
+ if (tramp) {
+ if (run->stub.tramp_addr == 0) {
+ if (alloc_code_working_area) {
+ /* alloc trampoline in code working area */
+ if (target_alloc_working_area(target, tramp_sz, &run->stub.tramp) != ERROR_OK) {
+ LOG_ERROR("no working area available, can't alloc space for stub jumper!");
+ retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ goto _on_error;
+ }
+ run->stub.tramp_addr = run->stub.tramp->address;
+ }
+ }
+
+ size_t al_tramp_size = ALIGN_UP(tramp_sz, 4);
+
+ if (run->image.reverse) {
+ target_addr_t reversed_tramp_addr = run->image.dram_org - code_size;
+ uint8_t reversed_tramp[al_tramp_size];
+
+ /* Send original size to allow padding */
+ reverse_binary(tramp, reversed_tramp, tramp_sz);
+ run->stub.tramp_addr = reversed_tramp_addr - al_tramp_size;
+ LOG_DEBUG("Write reversed tramp to addr " TARGET_ADDR_FMT ", sz %zu", run->stub.tramp_addr, al_tramp_size);
+ retval = target_write_buffer(target, run->stub.tramp_addr, al_tramp_size, reversed_tramp);
+ } else {
+ LOG_DEBUG("Write tramp to addr " TARGET_ADDR_FMT ", sz %zu", run->stub.tramp_addr, tramp_sz);
+ retval = target_write_buffer(target, run->stub.tramp_addr, tramp_sz, tramp);
+ }
+
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to write stub jumper!");
+ goto _on_error;
+ }
+
+ run->stub.tramp_mapped_addr = run->image.iram_org + code_size;
+ code_size += al_tramp_size;
+ LOG_DEBUG("Tramp mapped to addr " TARGET_ADDR_FMT, run->stub.tramp_mapped_addr);
+ }
+
+ /* allocate dummy space until the data address */
+ if (alloc_code_working_area) {
+ /* we dont need to restore padding area. */
+ uint32_t backup_working_area_prev = target->backup_working_area;
+ target->backup_working_area = 0;
+ if (target_alloc_working_area(target, run->image.iram_len - code_size, &run->stub.padding) != ERROR_OK) {
+ LOG_ERROR("no working area available, can't alloc space for stub code!");
+ retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ goto _on_error;
+ }
+ target->backup_working_area = backup_working_area_prev;
+ }
+
+ /* Load the data section */
+ for (unsigned int i = 0; i < run->image.image.num_sections; i++) {
+ struct imagesection *section = &run->image.image.sections[i];
+
+ if (section->size == 0)
+ continue;
+
+ if (!(section->flags & ESP_IMAGE_ELF_PHF_EXEC)) {
+ LOG_DEBUG("addr " TARGET_ADDR_FMT ", sz %d, flags %" PRIx64, section->base_address, section->size,
+ section->flags);
+ /* target_alloc_working_area() aligns the whole working area size to 4-byte boundary.
+ We alloc one area for both DATA and BSS, so align each of them ourselves. */
+ uint32_t data_sec_sz = ALIGN_UP(section->size, 4);
+ LOG_DEBUG("DATA sec size %" PRIu32 " -> %" PRIu32, section->size, data_sec_sz);
+ uint32_t bss_sec_sz = ALIGN_UP(run->image.bss_size, 4);
+ LOG_DEBUG("BSS sec size %" PRIu32 " -> %" PRIu32, run->image.bss_size, bss_sec_sz);
+ if (target_alloc_working_area(target, data_sec_sz + bss_sec_sz, &run->stub.data) != ERROR_OK) {
+ LOG_ERROR("no working area available, can't alloc space for stub data!");
+ retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ goto _on_error;
+ }
+ if (section->base_address == 0) {
+ section->base_address = run->stub.data->address;
+ /* sanity check, stub is compiled to be run from working area */
+ } else if (run->stub.data->address != section->base_address) {
+ LOG_ERROR("working area " TARGET_ADDR_FMT
+ " and stub data section " TARGET_ADDR_FMT
+ " address mismatch!",
+ section->base_address,
+ run->stub.data->address);
+ retval = ERROR_FAIL;
+ goto _on_error;
+ }
+
+ retval = load_section_from_image(target, run, i, false);
+ if (retval != ERROR_OK)
+ goto _on_error;
+ }
+ }
+
+ /* stack */
+ if (run->stub.stack_addr == 0 && run->stack_size > 0) {
+ /* allocate stack in data working area */
+ if (target_alloc_working_area(target, run->stack_size, &run->stub.stack) != ERROR_OK) {
+ LOG_ERROR("no working area available, can't alloc stub stack!");
+ retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ goto _on_error;
+ }
+ run->stub.stack_addr = run->stub.stack->address + run->stack_size;
+ }
+
+ if (duration_measure(&algo_time) != 0) {
+ LOG_ERROR("Failed to stop algo run measurement!");
+ retval = ERROR_FAIL;
+ goto _on_error;
+ }
+ LOG_DEBUG("Stub loaded in %g ms", duration_elapsed(&algo_time) * 1000);
+ return ERROR_OK;
+
+_on_error:
+ esp_algorithm_unload_func_image(target, run);
+ return retval;
+}
+
+int esp_algorithm_unload_func_image(struct target *target, struct esp_algorithm_run_data *run)
+{
+ if (!run)
+ return ERROR_FAIL;
+
+ target_free_all_working_areas(target);
+
+ run->stub.tramp = NULL;
+ run->stub.stack = NULL;
+ run->stub.code = NULL;
+ run->stub.data = NULL;
+ run->stub.padding = NULL;
+
+ return ERROR_OK;
+}
+
+int esp_algorithm_exec_func_image_va(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ va_list ap)
+{
+ if (!run || !run->image.image.start_address_set || run->image.image.start_address == 0)
+ return ERROR_FAIL;
+
+ return esp_algorithm_run_image(target, run, num_args, ap);
+}
+
+int esp_algorithm_load_onboard_func(struct target *target, target_addr_t func_addr, struct esp_algorithm_run_data *run)
+{
+ int res;
+ const uint8_t *tramp = NULL;
+ size_t tramp_sz = 0;
+ struct duration algo_time;
+
+ if (!run || !run->hw)
+ return ERROR_FAIL;
+
+ if (duration_start(&algo_time) != 0) {
+ LOG_ERROR("Failed to start algo time measurement!");
+ return ERROR_FAIL;
+ }
+
+ if (run->hw->stub_tramp_get) {
+ tramp = run->hw->stub_tramp_get(target, &tramp_sz);
+ if (!tramp)
+ return ERROR_FAIL;
+ }
+
+ if (tramp_sz > run->on_board.code_buf_size) {
+ LOG_ERROR("Stub tramp size %zu bytes exceeds target buf size %d bytes!",
+ tramp_sz, run->on_board.code_buf_size);
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ if (run->stack_size > run->on_board.min_stack_size) {
+ LOG_ERROR("Algorithm stack size not fit into the allocated target stack!");
+ return ERROR_FAIL;
+ }
+
+ run->stub.stack_addr = run->on_board.min_stack_addr + run->stack_size;
+ run->stub.tramp_addr = run->on_board.code_buf_addr;
+ run->stub.tramp_mapped_addr = run->stub.tramp_addr;
+ run->stub.entry = func_addr;
+
+ if (tramp) {
+ res = target_write_buffer(target, run->stub.tramp_addr, tramp_sz, tramp);
+ if (res != ERROR_OK) {
+ LOG_ERROR("Failed to write stub jumper!");
+ esp_algorithm_unload_onboard_func(target, run);
+ return res;
+ }
+ }
+
+ if (duration_measure(&algo_time) != 0) {
+ LOG_ERROR("Failed to stop algo run measurement!");
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("Stub loaded in %g ms", duration_elapsed(&algo_time) * 1000);
+
+ return ERROR_OK;
+}
+
+int esp_algorithm_unload_onboard_func(struct target *target, struct esp_algorithm_run_data *run)
+{
+ return ERROR_OK;
+}
+
+int esp_algorithm_exec_onboard_func_va(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ va_list ap)
+{
+ return esp_algorithm_run_debug_stub(target, run, num_args, ap);
+}
diff --git a/src/target/espressif/esp_algorithm.h b/src/target/espressif/esp_algorithm.h
new file mode 100644
index 0000000..11d2757
--- /dev/null
+++ b/src/target/espressif/esp_algorithm.h
@@ -0,0 +1,420 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/***************************************************************************
+ * Espressif chips common algorithm API for OpenOCD *
+ * Copyright (C) 2022 Espressif Systems Ltd. *
+ ***************************************************************************/
+
+#ifndef OPENOCD_TARGET_ESP_ALGORITHM_H
+#define OPENOCD_TARGET_ESP_ALGORITHM_H
+
+#include "helper/log.h"
+#include "helper/binarybuffer.h"
+#include <helper/time_support.h>
+#include <target/algorithm.h>
+#include <target/image.h>
+
+/**
+ * API defined below allows executing pieces of code on target without breaking the execution of the running program.
+ * This functionality can be useful for various debugging and maintenance procedures.
+ * @note ESP flashing code to load flasher stub on target and write/read/erase flash.
+ * Also ESP GCOV command uses some of these functions to run onboard routines to dump coverage info.
+ * Stub entry function can take up to 5 arguments and should be of the following form:
+ *
+ * int stub_entry([uint32_t a1 [, uint32_t a2 [, uint32_t a3 [, uint32_t a4 [, uint32_t a5]]]]]);
+ *
+ * The general scheme of stub code execution is shown below.
+ *
+ * ------- ----------- (initial frame) ----
+ * | | -------(registers, stub entry, stub args)------> |trampoline | ---(stub args)---> | |
+ * | | | | | |
+ * |OpenOCD| <----------(stub-specific communications)---------------------------------------> |stub|
+ * | | | | | |
+ * | | <---------(target halted event, ret code)------- |tramp break| <---(ret code)---- | |
+ * ------- ----------- ----
+ *
+ * Procedure of executing stub on target includes:
+ * 1) User prepares struct esp_algorithm_run_data and calls one of algorithm_run_xxx() functions.
+ * 2) Routine allocates all necessary stub code and data sections.
+ * 3) If a user specifies an initializer func esp_algorithm_usr_func_init_t it is called just before the stub starts.
+ * 4) If user specifies stub communication func esp_algorithm_usr_func_t (@see esp_flash_write/read in ESP flash driver)
+ * it is called just after the stub starts. When communication with stub is finished this function must return.
+ * 5) OpenOCD waits for the stub to finish (hit exit breakpoint).
+ * 6) If the user specified arguments cleanup func esp_algorithm_usr_func_done_t,
+ * it is called just after the stub finishes.
+ *
+ * There are two options to run code on target under OpenOCD control:
+ * - Run externally compiled stub code.
+ * - Run onboard pre-compiled code. @note For ESP chips debug stubs must be enabled in target code @see ESP IDF docs.
+ * The main difference between the execution of external stub code and target built-in functions is that
+ * in the latter case working areas can not be used to allocate target memory for code and data because they can overlap
+ * with code and data involved in onboard function execution. For example, if memory allocated in the working area
+ * for the stub stack will overlap with some on-board data used by the stub the stack will get overwritten.
+ * The same stands for allocations in target code space.
+ *
+ * External Code Execution
+ * -----------------------
+ * To run external code on the target user should use esp_algorithm_run_func_image().
+ * In this case all necessary memory (code/data) is allocated in working areas that have fixed configuration
+ * defined in target TCL file. Stub code is actually a standalone program, so all its segments must have known
+ * addresses due to position-dependent code nature. So stub must be linked in such a way that its code segment
+ * starts at the beginning of the working area for code space defined in TCL. The same restriction must be applied
+ * to stub's data segment and base addresses of working area for data space. @see ESP stub flasher LD scripts.
+ * Also in order to simplify memory allocation BSS section must follow the DATA section in the stub image.
+ * The size of the BSS section must be specified in the bss_size field of struct algorithm_image.
+ * Sample stub memory map is shown below.
+ * ___________________________________________
+ * | data space working area start |
+ * | |
+ * | <stub .data segment> |
+ * |___________________________________________|
+ * | stub .bss start |
+ * | |
+ * | <stub .bss segment of size 'bss_size'> |
+ * |___________________________________________|
+ * | stub stack base |
+ * | |
+ * | <stub stack> |
+ * |___________________________________________|
+ * | |
+ * | <stub mem arg1> |
+ * |___________________________________________|
+ * | |
+ * | <stub mem arg2> |
+ * |___________________________________________|
+ * ___________________________________________
+ * | code space working area start |
+ * | |
+ * | <stub .text segment> |
+ * |___________________________________________|
+ * | |
+ * | <stub trampoline with exit breakpoint> |
+ * |___________________________________________|
+ *
+ * For example on how to execute external code with memory arguments @see esp_algo_flash_blank_check in
+ * ESP flash driver.
+ *
+ * On-Board Code Execution
+ * -----------------------
+ * To run on-board code on the target user should use esp_algorithm_run_onboard_func().
+ * On-board code execution process does not need to allocate target memory for stub code and data,
+ * Because the stub is pre-compiled to the code running on the target.
+ * But it still needs memory for stub trampoline, stack, and memory arguments.
+ * Working areas can not be used due to possible memory layout conflicts with on-board stub code and data.
+ * Debug stubs functionality provided by ESP IDF allows OpenOCD to overcome the above problem.
+ * It provides a special descriptor which provides info necessary to safely allocate memory on target.
+ * @see struct esp_dbg_stubs_desc.
+ * That info is also used to locate memory for stub trampoline code.
+ * User can execute target function at any address, but @see ESP IDF debug stubs also provide a way to pass to the host
+ * an entry address of pre-defined registered stub functions.
+ * For example of an on-board code execution @see esp32_cmd_gcov() in ESP32 apptrace module.
+*/
+
+/**
+ * Algorithm image data.
+ * Helper struct to work with algorithms consisting of code and data segments.
+ */
+struct esp_algorithm_image {
+ /** Image. */
+ struct image image;
+ /** BSS section size. */
+ uint32_t bss_size;
+ /** IRAM start address in the linker script */
+ uint32_t iram_org;
+ /** Total reserved IRAM size */
+ uint32_t iram_len;
+ /** DRAM start address in the linker script */
+ uint32_t dram_org;
+ /** Total reserved DRAM size */
+ uint32_t dram_len;
+ /** IRAM DRAM address range reversed or not */
+ bool reverse;
+};
+
+#define ESP_IMAGE_ELF_PHF_EXEC 0x1
+
+/**
+ * Algorithm stub data.
+ */
+struct esp_algorithm_stub {
+ /** Entry addr. */
+ target_addr_t entry;
+ /** Working area for code segment. */
+ struct working_area *code;
+ /** Working area for data segment. */
+ struct working_area *data;
+ /** Working area for trampoline. */
+ struct working_area *tramp;
+ /** Working area for padding between code and data area. */
+ struct working_area *padding;
+ /** Address of the target buffer for stub trampoline. If zero tramp->address will be used. */
+ target_addr_t tramp_addr;
+ /** Tramp code area will be filled from dbus.
+ * We need to map it to the ibus to be able to initialize PC register to start algorithm execution from.
+ */
+ target_addr_t tramp_mapped_addr;
+ /** Working area for stack. */
+ struct working_area *stack;
+ /** Address of the target buffer for stack. If zero tramp->address will be used. */
+ target_addr_t stack_addr;
+ /** Address of the log buffer */
+ target_addr_t log_buff_addr;
+ /** Size of the log buffer */
+ uint32_t log_buff_size;
+ /** Algorithm's arch-specific info. */
+ void *ainfo;
+};
+
+/**
+ * Algorithm stub in-memory arguments.
+ */
+struct esp_algorithm_mem_args {
+ /** Memory params. */
+ struct mem_param *params;
+ /** Number of memory params. */
+ uint32_t count;
+};
+
+/**
+ * Algorithm stub register arguments.
+ */
+struct esp_algorithm_reg_args {
+ /** Algorithm register params. User args start from user_first_reg_param */
+ struct reg_param *params;
+ /** Number of register params. */
+ uint32_t count;
+ /** The first several reg_params can be used by stub itself (e.g. for trampoline).
+ * This is the index of the first reg_param available for user to pass args to algorithm stub. */
+ uint32_t first_user_param;
+};
+
+struct esp_algorithm_run_data;
+
+/**
+ * @brief Algorithm run function.
+ *
+ * @param target Pointer to target.
+ * @param run Pointer to algo run data.
+ * @param arg Function specific argument.
+ *
+ * @return ERROR_OK on success, otherwise ERROR_XXX.
+ */
+typedef int (*esp_algorithm_func_t)(struct target *target, struct esp_algorithm_run_data *run, void *arg);
+
+/**
+ * @brief Host part of algorithm.
+ * This function will be called while stub is running on target.
+ * It can be used for communication with stub.
+ *
+ * @param target Pointer to target.
+ * @param usr_arg Function specific argument.
+ *
+ * @return ERROR_OK on success, otherwise ERROR_XXX.
+ */
+typedef int (*esp_algorithm_usr_func_t)(struct target *target, void *usr_arg);
+
+/**
+ * @brief Algorithm's arguments setup function.
+ * This function will be called just before stub start.
+ * It must return when all operations with running stub are completed.
+ * It can be used to prepare stub memory parameters.
+ *
+ * @param target Pointer to target.
+ * @param run Pointer to algo run data.
+ * @param usr_arg Function specific argument. The same as for esp_algorithm_usr_func_t.
+ *
+ * @return ERROR_OK on success, otherwise ERROR_XXX.
+ */
+typedef int (*esp_algorithm_usr_func_init_t)(struct target *target,
+ struct esp_algorithm_run_data *run,
+ void *usr_arg);
+
+/**
+ * @brief Algorithm's arguments cleanup function.
+ * This function will be called just after stub exit.
+ * It can be used to cleanup stub memory parameters.
+ *
+ * @param target Pointer to target.
+ * @param run Pointer to algo run data.
+ * @param usr_arg Function specific argument. The same as for esp_algorithm_usr_func_t.
+ *
+ * @return ERROR_OK on success, otherwise ERROR_XXX.
+ */
+typedef void (*esp_algorithm_usr_func_done_t)(struct target *target,
+ struct esp_algorithm_run_data *run,
+ void *usr_arg);
+
+struct esp_algorithm_hw {
+ int (*algo_init)(struct target *target, struct esp_algorithm_run_data *run, uint32_t num_args, va_list ap);
+ int (*algo_cleanup)(struct target *target, struct esp_algorithm_run_data *run);
+ const uint8_t *(*stub_tramp_get)(struct target *target, size_t *size);
+};
+
+/**
+ * Algorithm run data.
+ */
+struct esp_algorithm_run_data {
+ /** Algorithm completion timeout in ms. If 0, default value will be used */
+ uint32_t timeout_ms;
+ /** Algorithm stack size. */
+ uint32_t stack_size;
+ /** Algorithm register arguments. */
+ struct esp_algorithm_reg_args reg_args;
+ /** Algorithm memory arguments. */
+ struct esp_algorithm_mem_args mem_args;
+ /** Algorithm arch-specific info. For Xtensa this should point to struct xtensa_algorithm. */
+ void *arch_info;
+ /** Algorithm return code. */
+ int32_t ret_code;
+ /** Stub. */
+ struct esp_algorithm_stub stub;
+ union {
+ struct {
+ /** Size of the pre-alocated on-board buffer for stub's code. */
+ uint32_t code_buf_size;
+ /** Address of pre-compiled target buffer for stub trampoline. */
+ target_addr_t code_buf_addr;
+ /** Size of the pre-alocated on-board buffer for stub's stack. */
+ uint32_t min_stack_size;
+ /** Pre-compiled target buffer's addr for stack. */
+ target_addr_t min_stack_addr;
+ } on_board;
+ struct esp_algorithm_image image;
+ };
+ /** Host side algorithm function argument. */
+ void *usr_func_arg;
+ /** Host side algorithm function. */
+ esp_algorithm_usr_func_t usr_func;
+ /** Host side algorithm function setup routine. */
+ esp_algorithm_usr_func_init_t usr_func_init;
+ /** Host side algorithm function cleanup routine. */
+ esp_algorithm_usr_func_done_t usr_func_done;
+ /** Algorithm run function: see algorithm_run_xxx for example. */
+ esp_algorithm_func_t algo_func;
+ /** HW specific API */
+ const struct esp_algorithm_hw *hw;
+};
+
+int esp_algorithm_load_func_image(struct target *target, struct esp_algorithm_run_data *run);
+int esp_algorithm_unload_func_image(struct target *target, struct esp_algorithm_run_data *run);
+
+int esp_algorithm_exec_func_image_va(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ va_list ap);
+
+/**
+ * @brief Loads and runs stub from specified image.
+ * This function should be used to run external stub code on target.
+ *
+ * @param target Pointer to target.
+ * @param run Pointer to algo run data.
+ * @param num_args Number of stub arguments that follow.
+ *
+ * @return ERROR_OK on success, otherwise ERROR_XXX. Stub return code is in run->ret_code.
+ */
+static inline int esp_algorithm_run_func_image_va(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ va_list ap)
+{
+ int ret = esp_algorithm_load_func_image(target, run);
+ if (ret != ERROR_OK)
+ return ret;
+ ret = esp_algorithm_exec_func_image_va(target, run, num_args, ap);
+ int rc = esp_algorithm_unload_func_image(target, run);
+ return ret != ERROR_OK ? ret : rc;
+}
+
+static inline int esp_algorithm_run_func_image(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ ...)
+{
+ va_list ap;
+ va_start(ap, num_args);
+ int retval = esp_algorithm_run_func_image_va(target, run, num_args, ap);
+ va_end(ap);
+ return retval;
+}
+
+int esp_algorithm_load_onboard_func(struct target *target,
+ target_addr_t func_addr,
+ struct esp_algorithm_run_data *run);
+int esp_algorithm_unload_onboard_func(struct target *target, struct esp_algorithm_run_data *run);
+int esp_algorithm_exec_onboard_func_va(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t num_args,
+ va_list ap);
+
+/**
+ * @brief Runs pre-compiled on-board function.
+ * This function should be used to run on-board stub code.
+ *
+ * @param target Pointer to target.
+ * @param run Pointer to algo run data.
+ * @param func_entry Address of the function to run.
+ * @param num_args Number of function arguments that follow.
+ *
+ * @return ERROR_OK on success, otherwise ERROR_XXX. Stub return code is in run->ret_code.
+ */
+static inline int esp_algorithm_run_onboard_func_va(struct target *target,
+ struct esp_algorithm_run_data *run,
+ target_addr_t func_addr,
+ uint32_t num_args,
+ va_list ap)
+{
+ int ret = esp_algorithm_load_onboard_func(target, func_addr, run);
+ if (ret != ERROR_OK)
+ return ret;
+ ret = esp_algorithm_exec_onboard_func_va(target, run, num_args, ap);
+ if (ret != ERROR_OK)
+ return ret;
+ return esp_algorithm_unload_onboard_func(target, run);
+}
+
+static inline int esp_algorithm_run_onboard_func(struct target *target,
+ struct esp_algorithm_run_data *run,
+ target_addr_t func_addr,
+ uint32_t num_args,
+ ...)
+{
+ va_list ap;
+ va_start(ap, num_args);
+ int retval = esp_algorithm_run_onboard_func_va(target, run, func_addr, num_args, ap);
+ va_end(ap);
+ return retval;
+}
+
+/**
+ * @brief Set the value of an argument passed via registers to the stub main function.
+ */
+static inline void esp_algorithm_user_arg_set_uint(struct esp_algorithm_run_data *run,
+ int arg_num,
+ uint64_t val)
+{
+ struct reg_param *param = &run->reg_args.params[run->reg_args.first_user_param + arg_num];
+
+ assert(param->size <= 64);
+
+ if (param->size <= 32)
+ buf_set_u32(param->value, 0, param->size, val);
+ else
+ buf_set_u64(param->value, 0, param->size, val);
+}
+
+/**
+ * @brief Get the value of an argument passed via registers from the stub main function.
+ */
+static inline uint64_t esp_algorithm_user_arg_get_uint(struct esp_algorithm_run_data *run, int arg_num)
+{
+ struct reg_param *param = &run->reg_args.params[run->reg_args.first_user_param + arg_num];
+
+ assert(param->size <= 64);
+
+ if (param->size <= 32)
+ return buf_get_u32(param->value, 0, param->size);
+ return buf_get_u64(param->value, 0, param->size);
+}
+
+#endif /* OPENOCD_TARGET_ESP_ALGORITHM_H */
diff --git a/src/target/espressif/esp_xtensa.c b/src/target/espressif/esp_xtensa.c
index 0bd2cdd..11895d2 100644
--- a/src/target/espressif/esp_xtensa.c
+++ b/src/target/espressif/esp_xtensa.c
@@ -12,10 +12,12 @@
#include <stdbool.h>
#include <stdint.h>
#include <target/smp.h>
-#include "esp_xtensa_apptrace.h"
#include <target/register.h>
+#include "esp.h"
#include "esp_xtensa.h"
+#include "esp_xtensa_apptrace.h"
#include "esp_semihosting.h"
+#include "esp_xtensa_algorithm.h"
#define ESP_XTENSA_DBGSTUBS_UPDATE_DATA_ENTRY(_e_) \
do { \
@@ -68,6 +70,10 @@ int esp_xtensa_init_arch_info(struct target *target,
int ret = xtensa_init_arch_info(target, &esp_xtensa->xtensa, dm_cfg);
if (ret != ERROR_OK)
return ret;
+ ret = esp_common_init(&esp_xtensa->esp, &xtensa_algo_hw);
+ if (ret != ERROR_OK)
+ return ret;
+
esp_xtensa->semihost.ops = (struct esp_semihost_ops *)semihost_ops;
esp_xtensa->apptrace.hw = &esp_xtensa_apptrace_hw;
return ERROR_OK;
diff --git a/src/target/espressif/esp_xtensa_algorithm.c b/src/target/espressif/esp_xtensa_algorithm.c
new file mode 100644
index 0000000..68005cb
--- /dev/null
+++ b/src/target/espressif/esp_xtensa_algorithm.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/***************************************************************************
+ * Module to run arbitrary code on Xtensa using OpenOCD *
+ * Copyright (C) 2019 Espressif Systems Ltd. *
+ ***************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <target/xtensa/xtensa.h>
+#include "esp_xtensa_algorithm.h"
+
+static int esp_xtensa_algo_init(struct target *target, struct esp_algorithm_run_data *run,
+ uint32_t num_args, va_list ap);
+static int esp_xtensa_algo_cleanup(struct target *target, struct esp_algorithm_run_data *run);
+static const uint8_t *esp_xtensa_stub_tramp_get(struct target *target, size_t *size);
+
+const struct esp_algorithm_hw xtensa_algo_hw = {
+ .algo_init = esp_xtensa_algo_init,
+ .algo_cleanup = esp_xtensa_algo_cleanup,
+ .stub_tramp_get = esp_xtensa_stub_tramp_get,
+};
+
+/* Generated from contrib/loaders/trampoline/espressif/xtensa/esp_xtensa_stub_tramp_win.S */
+static const uint8_t esp_xtensa_stub_tramp_win[] = {
+#include "../../../contrib/loaders/trampoline/espressif/xtensa/esp_xtensa_stub_tramp_win.inc"
+};
+
+static const uint8_t *esp_xtensa_stub_tramp_get(struct target *target, size_t *size)
+{
+ struct xtensa *xtensa = target_to_xtensa(target);
+
+ if (!xtensa->core_config->windowed) {
+ LOG_ERROR("Running stubs is not supported for cores without windowed registers option!");
+ return NULL;
+ }
+ *size = sizeof(esp_xtensa_stub_tramp_win);
+ return esp_xtensa_stub_tramp_win;
+}
+
+static int esp_xtensa_algo_regs_init_start(struct target *target, struct esp_algorithm_run_data *run)
+{
+ uint32_t stack_addr = run->stub.stack_addr;
+
+ LOG_TARGET_DEBUG(target, "Check stack addr 0x%x", stack_addr);
+ if (stack_addr & 0xFUL) {
+ stack_addr &= ~0xFUL;
+ LOG_TARGET_DEBUG(target, "Adjust stack addr to 0x%x", stack_addr);
+ }
+ stack_addr -= 16;
+ struct reg_param *params = run->reg_args.params;
+ init_reg_param(&params[0], "a0", 32, PARAM_OUT); /*TODO: move to tramp */
+ init_reg_param(&params[1], "a1", 32, PARAM_OUT);
+ init_reg_param(&params[2], "a8", 32, PARAM_OUT);
+ init_reg_param(&params[3], "windowbase", 32, PARAM_OUT); /*TODO: move to tramp */
+ init_reg_param(&params[4], "windowstart", 32, PARAM_OUT); /*TODO: move to tramp */
+ init_reg_param(&params[5], "ps", 32, PARAM_OUT);
+ buf_set_u32(params[0].value, 0, 32, 0); /* a0 TODO: move to tramp */
+ buf_set_u32(params[1].value, 0, 32, stack_addr); /* a1 */
+ buf_set_u32(params[2].value, 0, 32, run->stub.entry); /* a8 */
+ buf_set_u32(params[3].value, 0, 32, 0x0); /* initial window base TODO: move to tramp */
+ buf_set_u32(params[4].value, 0, 32, 0x1); /* initial window start TODO: move to tramp */
+ buf_set_u32(params[5].value, 0, 32, 0x60025); /* enable WOE, UM and debug interrupts level (6) */
+ return ERROR_OK;
+}
+
+static int esp_xtensa_algo_init(struct target *target, struct esp_algorithm_run_data *run,
+ uint32_t num_args, va_list ap)
+{
+ enum xtensa_mode core_mode = XT_MODE_ANY;
+ static const char *const arg_regs[] = { "a2", "a3", "a4", "a5", "a6" };
+
+ if (!run)
+ return ERROR_FAIL;
+
+ if (num_args > ARRAY_SIZE(arg_regs)) {
+ LOG_ERROR("Too many algo user args %u! Max %zu args are supported.", num_args, ARRAY_SIZE(arg_regs));
+ return ERROR_FAIL;
+ }
+
+ struct xtensa_algorithm *ainfo = calloc(1, sizeof(struct xtensa_algorithm));
+ if (!ainfo) {
+ LOG_ERROR("Unable to allocate memory");
+ return ERROR_FAIL;
+ }
+
+ if (run->arch_info) {
+ struct xtensa_algorithm *xtensa_algo = run->arch_info;
+ core_mode = xtensa_algo->core_mode;
+ }
+
+ run->reg_args.first_user_param = ESP_XTENSA_STUB_ARGS_FUNC_START;
+ run->reg_args.count = run->reg_args.first_user_param + num_args;
+ if (num_args == 0)
+ run->reg_args.count++; /* a2 reg is used as the 1st arg and return code */
+ LOG_DEBUG("reg params count %d (%d/%d).",
+ run->reg_args.count,
+ run->reg_args.first_user_param,
+ num_args);
+ run->reg_args.params = calloc(run->reg_args.count, sizeof(struct reg_param));
+ if (!run->reg_args.params) {
+ free(ainfo);
+ LOG_ERROR("Unable to allocate memory");
+ return ERROR_FAIL;
+ }
+
+ esp_xtensa_algo_regs_init_start(target, run);
+
+ init_reg_param(&run->reg_args.params[run->reg_args.first_user_param + 0], "a2", 32, PARAM_IN_OUT);
+
+ if (num_args > 0) {
+ uint32_t arg = va_arg(ap, uint32_t);
+ esp_algorithm_user_arg_set_uint(run, 0, arg);
+ LOG_DEBUG("Set arg[0] = %d (%s)", arg, run->reg_args.params[run->reg_args.first_user_param + 0].reg_name);
+ } else {
+ esp_algorithm_user_arg_set_uint(run, 0, 0);
+ }
+
+ for (unsigned int i = 1; i < num_args; i++) {
+ uint32_t arg = va_arg(ap, uint32_t);
+ init_reg_param(&run->reg_args.params[run->reg_args.first_user_param + i], (char *)arg_regs[i], 32, PARAM_OUT);
+ esp_algorithm_user_arg_set_uint(run, i, arg);
+ LOG_DEBUG("Set arg[%d] = %d (%s)", i, arg, run->reg_args.params[run->reg_args.first_user_param + i].reg_name);
+ }
+
+ ainfo->core_mode = core_mode;
+ run->stub.ainfo = ainfo;
+ return ERROR_OK;
+}
+
+static int esp_xtensa_algo_cleanup(struct target *target, struct esp_algorithm_run_data *run)
+{
+ free(run->stub.ainfo);
+ for (uint32_t i = 0; i < run->reg_args.count; i++)
+ destroy_reg_param(&run->reg_args.params[i]);
+ free(run->reg_args.params);
+ return ERROR_OK;
+}
diff --git a/src/target/espressif/esp_xtensa_algorithm.h b/src/target/espressif/esp_xtensa_algorithm.h
new file mode 100644
index 0000000..36fa1a3
--- /dev/null
+++ b/src/target/espressif/esp_xtensa_algorithm.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/***************************************************************************
+ * Module to run arbitrary code on Xtensa using OpenOCD *
+ * Copyright (C) 2019 Espressif Systems Ltd. *
+ ***************************************************************************/
+
+#ifndef OPENOCD_TARGET_ESP_XTENSA_ALGO_H
+#define OPENOCD_TARGET_ESP_XTENSA_ALGO_H
+
+#include <target/xtensa/xtensa.h>
+#include <target/espressif/esp_algorithm.h>
+
+/** Index of the first user-defined algo arg. @see algorithm_stub */
+#define ESP_XTENSA_STUB_ARGS_FUNC_START 6
+
+extern const struct esp_algorithm_hw xtensa_algo_hw;
+
+#endif /* OPENOCD_TARGET_XTENSA_ALGO_H */
diff --git a/src/target/espressif/esp_xtensa_smp.c b/src/target/espressif/esp_xtensa_smp.c
index 1d70be9..f883b1c 100644
--- a/src/target/espressif/esp_xtensa_smp.c
+++ b/src/target/espressif/esp_xtensa_smp.c
@@ -16,6 +16,7 @@
#include <target/semihosting_common.h>
#include "esp_xtensa_smp.h"
#include "esp_xtensa_semihosting.h"
+#include "esp_algorithm.h"
/*
Multiprocessor stuff common:
@@ -495,6 +496,83 @@ int esp_xtensa_smp_watchpoint_remove(struct target *target, struct watchpoint *w
return ERROR_OK;
}
+int esp_xtensa_smp_run_func_image(struct target *target, struct esp_algorithm_run_data *run, uint32_t num_args, ...)
+{
+ struct target *run_target = target;
+ struct target_list *head;
+ va_list ap;
+ uint32_t smp_break = 0;
+ int res;
+
+ if (target->smp) {
+ /* find first HALTED and examined core */
+ foreach_smp_target(head, target->smp_targets) {
+ run_target = head->target;
+ if (target_was_examined(run_target) && run_target->state == TARGET_HALTED)
+ break;
+ }
+ if (!head) {
+ LOG_ERROR("Failed to find HALTED core!");
+ return ERROR_FAIL;
+ }
+
+ res = esp_xtensa_smp_smpbreak_disable(run_target, &smp_break);
+ if (res != ERROR_OK)
+ return res;
+ }
+
+ va_start(ap, num_args);
+ int algo_res = esp_algorithm_run_func_image_va(run_target, run, num_args, ap);
+ va_end(ap);
+
+ if (target->smp) {
+ res = esp_xtensa_smp_smpbreak_restore(run_target, smp_break);
+ if (res != ERROR_OK)
+ return res;
+ }
+ return algo_res;
+}
+
+int esp_xtensa_smp_run_onboard_func(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t func_addr,
+ uint32_t num_args,
+ ...)
+{
+ struct target *run_target = target;
+ struct target_list *head;
+ va_list ap;
+ uint32_t smp_break = 0;
+ int res;
+
+ if (target->smp) {
+ /* find first HALTED and examined core */
+ foreach_smp_target(head, target->smp_targets) {
+ run_target = head->target;
+ if (target_was_examined(run_target) && run_target->state == TARGET_HALTED)
+ break;
+ }
+ if (!head) {
+ LOG_ERROR("Failed to find HALTED core!");
+ return ERROR_FAIL;
+ }
+ res = esp_xtensa_smp_smpbreak_disable(run_target, &smp_break);
+ if (res != ERROR_OK)
+ return res;
+ }
+
+ va_start(ap, num_args);
+ int algo_res = esp_algorithm_run_onboard_func_va(run_target, run, func_addr, num_args, ap);
+ va_end(ap);
+
+ if (target->smp) {
+ res = esp_xtensa_smp_smpbreak_restore(run_target, smp_break);
+ if (res != ERROR_OK)
+ return res;
+ }
+ return algo_res;
+}
+
int esp_xtensa_smp_init_arch_info(struct target *target,
struct esp_xtensa_smp_common *esp_xtensa_smp,
struct xtensa_debug_module_config *dm_cfg,
@@ -746,7 +824,7 @@ COMMAND_HANDLER(esp_xtensa_smp_cmd_perfmon_dump)
struct target *curr;
foreach_smp_target(head, target->smp_targets) {
curr = head->target;
- LOG_INFO("CPU%d:", curr->coreid);
+ LOG_TARGET_INFO(curr, ":");
int ret = CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
target_to_xtensa(curr));
if (ret != ERROR_OK)
diff --git a/src/target/espressif/esp_xtensa_smp.h b/src/target/espressif/esp_xtensa_smp.h
index 4e4f3b3..39afd8a 100644
--- a/src/target/espressif/esp_xtensa_smp.h
+++ b/src/target/espressif/esp_xtensa_smp.h
@@ -9,6 +9,7 @@
#define OPENOCD_TARGET_XTENSA_ESP_SMP_H
#include "esp_xtensa.h"
+#include "esp_algorithm.h"
struct esp_xtensa_smp_chip_ops {
int (*poll)(struct target *target);
@@ -47,7 +48,12 @@ int esp_xtensa_smp_init_arch_info(struct target *target,
struct xtensa_debug_module_config *dm_cfg,
const struct esp_xtensa_smp_chip_ops *chip_ops,
const struct esp_semihost_ops *semihost_ops);
-
+int esp_xtensa_smp_run_func_image(struct target *target, struct esp_algorithm_run_data *run, uint32_t num_args, ...);
+int esp_xtensa_smp_run_onboard_func(struct target *target,
+ struct esp_algorithm_run_data *run,
+ uint32_t func_addr,
+ uint32_t num_args,
+ ...);
extern const struct command_registration esp_xtensa_smp_command_handlers[];
extern const struct command_registration esp_xtensa_smp_xtensa_command_handlers[];
extern const struct command_registration esp_xtensa_smp_esp_command_handlers[];
diff --git a/src/target/mips32.h b/src/target/mips32.h
index d072eb9..fc89624 100644
--- a/src/target/mips32.h
+++ b/src/target/mips32.h
@@ -380,7 +380,8 @@ struct mips32_algorithm {
#define MIPS32_OP_XORI 0x0Eu
#define MIPS32_OP_XOR 0x26u
#define MIPS32_OP_SLTU 0x2Bu
-#define MIPS32_OP_SRL 0x03u
+#define MIPS32_OP_SRL 0x02u
+#define MIPS32_OP_SRA 0x03u
#define MIPS32_OP_SYNCI 0x1Fu
#define MIPS32_OP_SLL 0x00u
#define MIPS32_OP_SLTI 0x0Au
@@ -439,7 +440,8 @@ struct mips32_algorithm {
#define MIPS32_ISA_SLL(dst, src, sa) MIPS32_R_INST(MIPS32_OP_SPECIAL, 0, src, dst, sa, MIPS32_OP_SLL)
#define MIPS32_ISA_SLTI(tar, src, val) MIPS32_I_INST(MIPS32_OP_SLTI, src, tar, val)
#define MIPS32_ISA_SLTU(dst, src, tar) MIPS32_R_INST(MIPS32_OP_SPECIAL, src, tar, dst, 0, MIPS32_OP_SLTU)
-#define MIPS32_ISA_SRL(reg, src, off) MIPS32_R_INST(0, 0, src, reg, off, MIPS32_OP_SRL)
+#define MIPS32_ISA_SRA(reg, src, off) MIPS32_R_INST(MIPS32_OP_SPECIAL, 0, src, reg, off, MIPS32_OP_SRA)
+#define MIPS32_ISA_SRL(reg, src, off) MIPS32_R_INST(MIPS32_OP_SPECIAL, 0, src, reg, off, MIPS32_OP_SRL)
#define MIPS32_ISA_SYNC 0xFu
#define MIPS32_ISA_SYNCI(off, base) MIPS32_I_INST(MIPS32_OP_REGIMM, base, MIPS32_OP_SYNCI, off)
diff --git a/src/target/mips32_pracc.c b/src/target/mips32_pracc.c
index 9f0d87c..db50ef9 100644
--- a/src/target/mips32_pracc.c
+++ b/src/target/mips32_pracc.c
@@ -842,12 +842,12 @@ int mips32_pracc_write_regs(struct mips32_common *mips32)
};
uint32_t cp0_write_data[] = {
+ /* status */
+ c0rs[0],
/* lo */
gprs[32],
/* hi */
gprs[33],
- /* status */
- c0rs[0],
/* badvaddr */
c0rs[1],
/* cause */
@@ -856,6 +856,9 @@ int mips32_pracc_write_regs(struct mips32_common *mips32)
c0rs[3],
};
+ /* Write CP0 Status Register first, changes on EXL or ERL bits
+ * may lead to different behaviour on writing to other CP0 registers.
+ */
for (size_t i = 0; i < ARRAY_SIZE(cp0_write_code); i++) {
/* load CP0 value in $1 */
pracc_add_li32(&ctx, 1, cp0_write_data[i], 0);
diff --git a/src/target/mips_m4k.c b/src/target/mips_m4k.c
index 0a06bb1..ad98089 100644
--- a/src/target/mips_m4k.c
+++ b/src/target/mips_m4k.c
@@ -142,7 +142,7 @@ static int mips_m4k_halt_smp(struct target *target)
ret = mips_m4k_halt(curr);
if (ret != ERROR_OK) {
- LOG_ERROR("halt failed target->coreid: %" PRId32, curr->coreid);
+ LOG_TARGET_ERROR(curr, "halt failed.");
retval = ret;
}
}
@@ -412,8 +412,8 @@ static int mips_m4k_restore_smp(struct target *target, uint32_t address, int han
handle_breakpoints, 0);
if (ret != ERROR_OK) {
- LOG_ERROR("target->coreid :%" PRId32 " failed to resume at address :0x%" PRIx32,
- curr->coreid, address);
+ LOG_TARGET_ERROR(curr, "failed to resume at address: 0x%" PRIx32,
+ address);
retval = ret;
}
}
diff --git a/src/target/smp.c b/src/target/smp.c
index effc63f..50b19d0 100644
--- a/src/target/smp.c
+++ b/src/target/smp.c
@@ -132,6 +132,9 @@ COMMAND_HANDLER(handle_smp_gdb_command)
{
struct target *target = get_current_target(CMD_CTX);
int retval = ERROR_OK;
+
+ LOG_WARNING(DEPRECATED_MSG);
+
if (!list_empty(target->smp_targets)) {
if (CMD_ARGC == 1) {
int coreid = 0;
diff --git a/src/target/target.c b/src/target/target.c
index cfd0641..216dcb2 100644
--- a/src/target/target.c
+++ b/src/target/target.c
@@ -299,23 +299,6 @@ const char *target_reset_mode_name(enum target_reset_mode reset_mode)
return cp;
}
-/* determine the number of the new target */
-static int new_target_number(void)
-{
- struct target *t;
- int x;
-
- /* number is 0 based */
- x = -1;
- t = all_targets;
- while (t) {
- if (x < t->target_number)
- x = t->target_number;
- t = t->next;
- }
- return x + 1;
-}
-
static void append_to_list_all_targets(struct target *target)
{
struct target **t = &all_targets;
@@ -451,7 +434,7 @@ void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_
target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
}
-/* return a pointer to a configured target; id is name or number */
+/* return a pointer to a configured target; id is name or index in all_targets */
struct target *get_target(const char *id)
{
struct target *target;
@@ -464,36 +447,17 @@ struct target *get_target(const char *id)
return target;
}
- /* It's OK to remove this fallback sometime after August 2010 or so */
-
- /* no match, try as number */
- unsigned num;
- if (parse_uint(id, &num) != ERROR_OK)
+ /* try as index */
+ unsigned int index, counter;
+ if (parse_uint(id, &index) != ERROR_OK)
return NULL;
- for (target = all_targets; target; target = target->next) {
- if (target->target_number == (int)num) {
- LOG_WARNING("use '%s' as target identifier, not '%u'",
- target_name(target), num);
- return target;
- }
- }
-
- return NULL;
-}
+ for (target = all_targets, counter = index;
+ target && counter;
+ target = target->next, --counter)
+ ;
-/* returns a pointer to the n-th configured target */
-struct target *get_target_by_num(int num)
-{
- struct target *target = all_targets;
-
- while (target) {
- if (target->target_number == num)
- return target;
- target = target->next;
- }
-
- return NULL;
+ return target;
}
struct target *get_current_target(struct command_context *cmd_ctx)
@@ -712,10 +676,14 @@ static int default_check_reset(struct target *target)
* Keep in sync */
int target_examine_one(struct target *target)
{
+ LOG_TARGET_DEBUG(target, "Examination started");
+
target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
int retval = target->type->examine(target);
if (retval != ERROR_OK) {
+ LOG_TARGET_ERROR(target, "Examination failed");
+ LOG_TARGET_DEBUG(target, "examine() returned error code %d", retval);
target_reset_examined(target);
target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
return retval;
@@ -725,6 +693,7 @@ int target_examine_one(struct target *target)
target_set_examined(target);
target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
+ LOG_TARGET_INFO(target, "Examination succeed");
return ERROR_OK;
}
@@ -2222,6 +2191,9 @@ uint32_t target_get_working_area_avail(struct target *target)
static void target_destroy(struct target *target)
{
+ breakpoint_remove_all(target);
+ watchpoint_remove_all(target);
+
if (target->type->deinit_target)
target->type->deinit_target(target);
@@ -2850,10 +2822,10 @@ COMMAND_HANDLER(handle_targets_command)
}
}
- struct target *target = all_targets;
+ unsigned int index = 0;
command_print(CMD, " TargetName Type Endian TapName State ");
command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
- while (target) {
+ for (struct target *target = all_targets; target; target = target->next, ++index) {
const char *state;
char marker = ' ';
@@ -2868,7 +2840,7 @@ COMMAND_HANDLER(handle_targets_command)
/* keep columns lined up to match the headers above */
command_print(CMD,
"%2d%c %-18s %-10s %-6s %-18s %s",
- target->target_number,
+ index,
marker,
target_name(target),
target_type_name(target),
@@ -2876,7 +2848,6 @@ COMMAND_HANDLER(handle_targets_command)
target->endianness)->name,
target->tap->dotted_name,
state);
- target = target->next;
}
return retval;
@@ -3973,7 +3944,7 @@ static int handle_bp_command_set(struct command_invocation *cmd,
} else if (addr == 0) {
if (!target->type->add_context_breakpoint) {
- LOG_ERROR("Context breakpoint not available");
+ LOG_TARGET_ERROR(target, "Context breakpoint not available");
return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
}
retval = context_breakpoint_add(target, asid, length, hw);
@@ -3983,7 +3954,7 @@ static int handle_bp_command_set(struct command_invocation *cmd,
} else {
if (!target->type->add_hybrid_breakpoint) {
- LOG_ERROR("Hybrid breakpoint not available");
+ LOG_TARGET_ERROR(target, "Hybrid breakpoint not available");
return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
}
retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
@@ -4120,7 +4091,7 @@ COMMAND_HANDLER(handle_wp_command)
type = WPT_ACCESS;
break;
default:
- LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
+ LOG_TARGET_ERROR(target, "invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
return ERROR_COMMAND_SYNTAX_ERROR;
}
/* fall through */
@@ -4136,7 +4107,7 @@ COMMAND_HANDLER(handle_wp_command)
int retval = watchpoint_add(target, addr, length, type,
data_value, data_mask);
if (retval != ERROR_OK)
- LOG_ERROR("Failure setting watchpoints");
+ LOG_TARGET_ERROR(target, "Failure setting watchpoints");
return retval;
}
@@ -4329,7 +4300,7 @@ COMMAND_HANDLER(handle_profile_command)
if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
return ERROR_COMMAND_SYNTAX_ERROR;
- const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
+ const uint32_t MAX_PROFILE_SAMPLE_NUM = 1000000;
uint32_t offset;
uint32_t num_of_samples;
int retval = ERROR_OK;
@@ -5062,8 +5033,7 @@ void target_handle_event(struct target *target, enum target_event e)
for (teap = target->event_action; teap; teap = teap->next) {
if (teap->event == e) {
- LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
- target->target_number,
+ LOG_DEBUG("target: %s (%s) event: %d (%s) action: %s",
target_name(target),
target_type_name(target),
e,
@@ -5482,13 +5452,13 @@ no_params:
e = jim_getopt_wide(goi, &w);
if (e != JIM_OK)
return e;
- /* make this exactly 1 or 0 */
- target->backup_working_area = (!!w);
+ /* make this boolean */
+ target->backup_working_area = (w != 0);
} else {
if (goi->argc != 0)
goto no_params;
}
- Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
+ Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area ? 1 : 0));
/* loop for more e*/
break;
@@ -5848,8 +5818,7 @@ COMMAND_HANDLER(handle_target_event_list)
struct target *target = get_current_target(CMD_CTX);
struct target_event_action *teap = target->event_action;
- command_print(CMD, "Event actions for target (%d) %s\n",
- target->target_number,
+ command_print(CMD, "Event actions for target %s\n",
target_name(target));
command_print(CMD, "%-25s | Body", "Event");
command_print(CMD, "------------------------- | "
@@ -5883,7 +5852,17 @@ COMMAND_HANDLER(handle_target_debug_reason)
struct target *target = get_current_target(CMD_CTX);
- command_print(CMD, "%s", debug_reason_name(target));
+
+ const char *debug_reason = nvp_value2name(nvp_target_debug_reason,
+ target->debug_reason)->name;
+
+ if (!debug_reason) {
+ command_print(CMD, "bug: invalid debug reason (%d)",
+ target->debug_reason);
+ return ERROR_FAIL;
+ }
+
+ command_print(CMD, "%s", debug_reason);
return ERROR_OK;
}
@@ -6188,9 +6167,6 @@ static int target_create(struct jim_getopt_info *goi)
/* set empty smp cluster */
target->smp_targets = &empty_smp_targets;
- /* set target number */
- target->target_number = new_target_number();
-
/* allocate memory for each unique target type */
target->type = malloc(sizeof(struct target_type));
if (!target->type) {
@@ -6207,7 +6183,7 @@ static int target_create(struct jim_getopt_info *goi)
target->working_area = 0x0;
target->working_area_size = 0x0;
target->working_areas = NULL;
- target->backup_working_area = 0;
+ target->backup_working_area = false;
target->state = TARGET_UNKNOWN;
target->debug_reason = DBG_REASON_UNDEFINED;
@@ -7095,7 +7071,7 @@ static const struct command_registration target_exec_command_handlers[] = {
.handler = handle_wp_command,
.mode = COMMAND_EXEC,
.help = "list (no params) or create watchpoints",
- .usage = "[address length [('r'|'w'|'a') value [mask]]]",
+ .usage = "[address length [('r'|'w'|'a') [value [mask]]]]",
},
{
.name = "rwp",
diff --git a/src/target/target.h b/src/target/target.h
index b8f3b01..8b2e362 100644
--- a/src/target/target.h
+++ b/src/target/target.h
@@ -118,7 +118,6 @@ enum target_register_class {
struct target {
struct target_type *type; /* target type definition (name, access functions) */
char *cmd_name; /* tcl Name of target */
- int target_number; /* DO NOT USE! field to be removed in 2010 */
struct jtag_tap *tap; /* where on the jtag chain is this */
int32_t coreid; /* which device on the TAP? */
@@ -152,7 +151,7 @@ struct target {
bool working_area_phys_spec; /* physical address specified? */
target_addr_t working_area_phys; /* physical address */
uint32_t working_area_size; /* size in bytes */
- uint32_t backup_working_area; /* whether the content of the working area has to be preserved */
+ bool backup_working_area; /* whether the content of the working area has to be preserved */
struct working_area *working_areas;/* list of allocated working areas */
enum target_debug_reason debug_reason;/* reason why the target entered debug state */
enum target_endianness endianness; /* target endianness */
@@ -418,7 +417,6 @@ int target_call_timer_callbacks_now(void);
*/
int64_t target_timer_next_event(void);
-struct target *get_target_by_num(int num);
struct target *get_current_target(struct command_context *cmd_ctx);
struct target *get_current_target_or_null(struct command_context *cmd_ctx);
struct target *get_target(const char *id);
diff --git a/src/target/xtensa/xtensa.c b/src/target/xtensa/xtensa.c
index c575b53..d2ca32c 100644
--- a/src/target/xtensa/xtensa.c
+++ b/src/target/xtensa/xtensa.c
@@ -16,6 +16,7 @@
#include <helper/time_support.h>
#include <helper/align.h>
#include <target/register.h>
+#include <target/algorithm.h>
#include "xtensa_chip.h"
#include "xtensa.h"
@@ -822,7 +823,7 @@ int xtensa_examine(struct target *target)
struct xtensa *xtensa = target_to_xtensa(target);
unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
- LOG_DEBUG("coreid = %d", target->coreid);
+ LOG_TARGET_DEBUG(target, "");
if (xtensa->core_config->core_type == XT_UNDEF) {
LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
@@ -1096,7 +1097,7 @@ int xtensa_assert_reset(struct target *target)
{
struct xtensa *xtensa = target_to_xtensa(target);
- LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
+ LOG_TARGET_DEBUG(target, " begin");
xtensa_queue_pwr_reg_write(xtensa,
XDMREG_PWRCTL,
PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
@@ -2635,6 +2636,214 @@ int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoin
return ERROR_OK;
}
+int xtensa_start_algorithm(struct target *target,
+ int num_mem_params, struct mem_param *mem_params,
+ int num_reg_params, struct reg_param *reg_params,
+ target_addr_t entry_point, target_addr_t exit_point,
+ void *arch_info)
+{
+ struct xtensa *xtensa = target_to_xtensa(target);
+ struct xtensa_algorithm *algorithm_info = arch_info;
+ int retval = ERROR_OK;
+ bool usr_ps = false;
+
+ /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
+ * at the exit point */
+
+ if (target->state != TARGET_HALTED) {
+ LOG_WARNING("Target not halted!");
+ return ERROR_TARGET_NOT_HALTED;
+ }
+
+ for (unsigned int i = 0; i < xtensa->core_cache->num_regs; i++) {
+ struct reg *reg = &xtensa->core_cache->reg_list[i];
+ buf_cpy(reg->value, xtensa->algo_context_backup[i], reg->size);
+ }
+ /* save debug reason, it will be changed */
+ algorithm_info->ctx_debug_reason = target->debug_reason;
+ /* write mem params */
+ for (int i = 0; i < num_mem_params; i++) {
+ if (mem_params[i].direction != PARAM_IN) {
+ retval = target_write_buffer(target, mem_params[i].address,
+ mem_params[i].size,
+ mem_params[i].value);
+ if (retval != ERROR_OK)
+ return retval;
+ }
+ }
+ /* write reg params */
+ for (int i = 0; i < num_reg_params; i++) {
+ if (reg_params[i].size > 32) {
+ LOG_ERROR("BUG: not supported register size (%d)", reg_params[i].size);
+ return ERROR_FAIL;
+ }
+ struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
+ if (!reg) {
+ LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
+ return ERROR_FAIL;
+ }
+ if (reg->size != reg_params[i].size) {
+ LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
+ return ERROR_FAIL;
+ }
+ if (memcmp(reg_params[i].reg_name, "ps", 3)) {
+ usr_ps = true;
+ } else {
+ unsigned int reg_id = xtensa->eps_dbglevel_idx;
+ assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
+ reg = &xtensa->core_cache->reg_list[reg_id];
+ }
+ xtensa_reg_set_value(reg, buf_get_u32(reg_params[i].value, 0, reg->size));
+ reg->valid = 1;
+ }
+ /* ignore custom core mode if custom PS value is specified */
+ if (!usr_ps) {
+ unsigned int eps_reg_idx = xtensa->eps_dbglevel_idx;
+ xtensa_reg_val_t ps = xtensa_reg_get(target, eps_reg_idx);
+ enum xtensa_mode core_mode = XT_PS_RING_GET(ps);
+ if (algorithm_info->core_mode != XT_MODE_ANY && algorithm_info->core_mode != core_mode) {
+ LOG_DEBUG("setting core_mode: 0x%x", algorithm_info->core_mode);
+ xtensa_reg_val_t new_ps = (ps & ~XT_PS_RING_MSK) | XT_PS_RING(algorithm_info->core_mode);
+ /* save previous core mode */
+ /* TODO: core_mode is not restored for now. Can be added to the end of wait_algorithm */
+ algorithm_info->core_mode = core_mode;
+ xtensa_reg_set(target, eps_reg_idx, new_ps);
+ xtensa->core_cache->reg_list[eps_reg_idx].valid = 1;
+ }
+ }
+
+ return xtensa_resume(target, 0, entry_point, 1, 1);
+}
+
+/** Waits for an algorithm in the target. */
+int xtensa_wait_algorithm(struct target *target,
+ int num_mem_params, struct mem_param *mem_params,
+ int num_reg_params, struct reg_param *reg_params,
+ target_addr_t exit_point, unsigned int timeout_ms,
+ void *arch_info)
+{
+ struct xtensa *xtensa = target_to_xtensa(target);
+ struct xtensa_algorithm *algorithm_info = arch_info;
+ int retval = ERROR_OK;
+ xtensa_reg_val_t pc;
+
+ /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
+ * at the exit point */
+
+ retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
+ /* If the target fails to halt due to the breakpoint, force a halt */
+ if (retval != ERROR_OK || target->state != TARGET_HALTED) {
+ retval = target_halt(target);
+ if (retval != ERROR_OK)
+ return retval;
+ retval = target_wait_state(target, TARGET_HALTED, 500);
+ if (retval != ERROR_OK)
+ return retval;
+ LOG_TARGET_ERROR(target, "not halted %d, pc 0x%" PRIx32 ", ps 0x%" PRIx32, retval,
+ xtensa_reg_get(target, XT_REG_IDX_PC),
+ xtensa_reg_get(target, xtensa->eps_dbglevel_idx));
+ return ERROR_TARGET_TIMEOUT;
+ }
+ pc = xtensa_reg_get(target, XT_REG_IDX_PC);
+ if (exit_point && pc != exit_point) {
+ LOG_ERROR("failed algorithm halted at 0x%" PRIx32 ", expected " TARGET_ADDR_FMT, pc, exit_point);
+ return ERROR_TARGET_TIMEOUT;
+ }
+ /* Copy core register values to reg_params[] */
+ for (int i = 0; i < num_reg_params; i++) {
+ if (reg_params[i].direction != PARAM_OUT) {
+ struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
+ if (!reg) {
+ LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
+ return ERROR_FAIL;
+ }
+ if (reg->size != reg_params[i].size) {
+ LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
+ return ERROR_FAIL;
+ }
+ buf_set_u32(reg_params[i].value, 0, 32, xtensa_reg_get_value(reg));
+ }
+ }
+ /* Read memory values to mem_params */
+ LOG_DEBUG("Read mem params");
+ for (int i = 0; i < num_mem_params; i++) {
+ LOG_DEBUG("Check mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
+ if (mem_params[i].direction != PARAM_OUT) {
+ LOG_DEBUG("Read mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
+ retval = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value);
+ if (retval != ERROR_OK)
+ return retval;
+ }
+ }
+
+ /* avoid gdb keep_alive warning */
+ keep_alive();
+
+ for (int i = xtensa->core_cache->num_regs - 1; i >= 0; i--) {
+ struct reg *reg = &xtensa->core_cache->reg_list[i];
+ if (i == XT_REG_IDX_PS) {
+ continue; /* restore mapped reg number of PS depends on NDEBUGLEVEL */
+ } else if (i == XT_REG_IDX_DEBUGCAUSE) {
+ /*FIXME: restoring DEBUGCAUSE causes exception when executing corresponding
+ * instruction in DIR */
+ LOG_DEBUG("Skip restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
+ xtensa->core_cache->reg_list[i].name,
+ buf_get_u32(reg->value, 0, 32),
+ buf_get_u32(xtensa->algo_context_backup[i], 0, 32));
+ buf_cpy(xtensa->algo_context_backup[i], reg->value, reg->size);
+ xtensa->core_cache->reg_list[i].dirty = 0;
+ xtensa->core_cache->reg_list[i].valid = 0;
+ } else if (memcmp(xtensa->algo_context_backup[i], reg->value, reg->size / 8)) {
+ if (reg->size <= 32) {
+ LOG_DEBUG("restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
+ xtensa->core_cache->reg_list[i].name,
+ buf_get_u32(reg->value, 0, reg->size),
+ buf_get_u32(xtensa->algo_context_backup[i], 0, reg->size));
+ } else if (reg->size <= 64) {
+ LOG_DEBUG("restoring register %s: 0x%8.8" PRIx64 " -> 0x%8.8" PRIx64,
+ xtensa->core_cache->reg_list[i].name,
+ buf_get_u64(reg->value, 0, reg->size),
+ buf_get_u64(xtensa->algo_context_backup[i], 0, reg->size));
+ } else {
+ LOG_DEBUG("restoring register %s %u-bits", xtensa->core_cache->reg_list[i].name, reg->size);
+ }
+ buf_cpy(xtensa->algo_context_backup[i], reg->value, reg->size);
+ xtensa->core_cache->reg_list[i].dirty = 1;
+ xtensa->core_cache->reg_list[i].valid = 1;
+ }
+ }
+ target->debug_reason = algorithm_info->ctx_debug_reason;
+
+ retval = xtensa_write_dirty_registers(target);
+ if (retval != ERROR_OK)
+ LOG_ERROR("Failed to write dirty regs (%d)!", retval);
+
+ return retval;
+}
+
+int xtensa_run_algorithm(struct target *target,
+ int num_mem_params, struct mem_param *mem_params,
+ int num_reg_params, struct reg_param *reg_params,
+ target_addr_t entry_point, target_addr_t exit_point,
+ unsigned int timeout_ms, void *arch_info)
+{
+ int retval = xtensa_start_algorithm(target,
+ num_mem_params, mem_params,
+ num_reg_params, reg_params,
+ entry_point, exit_point,
+ arch_info);
+
+ if (retval == ERROR_OK) {
+ retval = xtensa_wait_algorithm(target,
+ num_mem_params, mem_params,
+ num_reg_params, reg_params,
+ exit_point, timeout_ms,
+ arch_info);
+ }
+
+ return retval;
+}
+
static int xtensa_build_reg_cache(struct target *target)
{
struct xtensa *xtensa = target_to_xtensa(target);
@@ -3978,6 +4187,38 @@ COMMAND_HANDLER(xtensa_cmd_smpbreak)
get_current_target(CMD_CTX));
}
+COMMAND_HELPER(xtensa_cmd_dm_rw_do, struct xtensa *xtensa)
+{
+ if (CMD_ARGC == 1) {
+ // read: xtensa dm addr
+ uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
+ uint32_t val;
+ int res = xtensa_dm_read(&xtensa->dbg_mod, addr, &val);
+ if (res == ERROR_OK)
+ command_print(CMD, "xtensa DM(0x%08" PRIx32 ") -> 0x%08" PRIx32, addr, val);
+ else
+ command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : read ERROR %" PRId32, addr, res);
+ return res;
+ } else if (CMD_ARGC == 2) {
+ // write: xtensa dm addr value
+ uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
+ uint32_t val = strtoul(CMD_ARGV[1], NULL, 0);
+ int res = xtensa_dm_write(&xtensa->dbg_mod, addr, val);
+ if (res == ERROR_OK)
+ command_print(CMD, "xtensa DM(0x%08" PRIx32 ") <- 0x%08" PRIx32, addr, val);
+ else
+ command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : write ERROR %" PRId32, addr, res);
+ return res;
+ }
+ return ERROR_COMMAND_SYNTAX_ERROR;
+}
+
+COMMAND_HANDLER(xtensa_cmd_dm_rw)
+{
+ return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do,
+ target_to_xtensa(get_current_target(CMD_CTX)));
+}
+
COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
{
struct xtensa_trace_status trace_status;
@@ -4235,6 +4476,13 @@ static const struct command_registration xtensa_any_command_handlers[] = {
.usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
},
{
+ .name = "dm",
+ .handler = xtensa_cmd_dm_rw,
+ .mode = COMMAND_ANY,
+ .help = "Xtensa DM read/write",
+ .usage = "addr [value]"
+ },
+ {
.name = "perfmon_enable",
.handler = xtensa_cmd_perfmon_enable,
.mode = COMMAND_EXEC,
diff --git a/src/target/xtensa/xtensa.h b/src/target/xtensa/xtensa.h
index 4216ae2..3b37122 100644
--- a/src/target/xtensa/xtensa.h
+++ b/src/target/xtensa/xtensa.h
@@ -222,6 +222,16 @@ struct xtensa_sw_breakpoint {
uint8_t insn_sz; /* 2 or 3 bytes */
};
+/**
+ * Xtensa algorithm data.
+ */
+struct xtensa_algorithm {
+ /** User can set this to specify which core mode algorithm should be run in. */
+ enum xtensa_mode core_mode;
+ /** Used internally to backup and restore debug_reason. */
+ enum target_debug_reason ctx_debug_reason;
+};
+
#define XTENSA_COMMON_MAGIC 0x54E4E555U
/**
@@ -395,6 +405,21 @@ int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint);
int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint);
int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint);
int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint);
+int xtensa_start_algorithm(struct target *target,
+ int num_mem_params, struct mem_param *mem_params,
+ int num_reg_params, struct reg_param *reg_params,
+ target_addr_t entry_point, target_addr_t exit_point,
+ void *arch_info);
+int xtensa_wait_algorithm(struct target *target,
+ int num_mem_params, struct mem_param *mem_params,
+ int num_reg_params, struct reg_param *reg_params,
+ target_addr_t exit_point, unsigned int timeout_ms,
+ void *arch_info);
+int xtensa_run_algorithm(struct target *target,
+ int num_mem_params, struct mem_param *mem_params,
+ int num_reg_params, struct reg_param *reg_params,
+ target_addr_t entry_point, target_addr_t exit_point,
+ unsigned int timeout_ms, void *arch_info);
void xtensa_set_permissive_mode(struct target *target, bool state);
const char *xtensa_get_gdb_arch(struct target *target);
int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p);
diff --git a/src/target/xtensa/xtensa_debug_module.c b/src/target/xtensa/xtensa_debug_module.c
index 31d7a94..8045779 100644
--- a/src/target/xtensa/xtensa_debug_module.c
+++ b/src/target/xtensa/xtensa_debug_module.c
@@ -34,6 +34,16 @@ static const struct xtensa_dm_pwr_reg_offsets xdm_pwr_regs[XDMREG_PWRNUM] =
static const struct xtensa_dm_reg_offsets xdm_regs[XDMREG_NUM] =
XTENSA_DM_REG_OFFSETS;
+static enum xtensa_dm_reg xtensa_dm_regaddr_to_id(uint32_t addr)
+{
+ enum xtensa_dm_reg id;
+ uint32_t addr_masked = (addr & (XTENSA_DM_APB_ALIGN - 1));
+ for (id = XDMREG_TRAXID; id < XDMREG_NUM; id++)
+ if (xdm_regs[id].apb == addr_masked)
+ break;
+ return id;
+}
+
static void xtensa_dm_add_set_ir(struct xtensa_debug_module *dm, uint8_t value)
{
struct scan_field field;
@@ -285,6 +295,34 @@ int xtensa_dm_core_status_clear(struct xtensa_debug_module *dm, xtensa_dsr_t bit
return xtensa_dm_queue_execute(dm);
}
+int xtensa_dm_read(struct xtensa_debug_module *dm, uint32_t addr, uint32_t *val)
+{
+ enum xtensa_dm_reg reg = xtensa_dm_regaddr_to_id(addr);
+ uint8_t buf[sizeof(uint32_t)];
+ if (reg < XDMREG_NUM) {
+ xtensa_dm_queue_enable(dm);
+ dm->dbg_ops->queue_reg_read(dm, reg, buf);
+ xtensa_dm_queue_tdi_idle(dm);
+ int res = xtensa_dm_queue_execute(dm);
+ if (res == ERROR_OK && val)
+ *val = buf_get_u32(buf, 0, 32);
+ return res;
+ }
+ return ERROR_FAIL;
+}
+
+int xtensa_dm_write(struct xtensa_debug_module *dm, uint32_t addr, uint32_t val)
+{
+ enum xtensa_dm_reg reg = xtensa_dm_regaddr_to_id(addr);
+ if (reg < XDMREG_NUM) {
+ xtensa_dm_queue_enable(dm);
+ dm->dbg_ops->queue_reg_write(dm, reg, val);
+ xtensa_dm_queue_tdi_idle(dm);
+ return xtensa_dm_queue_execute(dm);
+ }
+ return ERROR_FAIL;
+}
+
int xtensa_dm_trace_start(struct xtensa_debug_module *dm, struct xtensa_trace_start_config *cfg)
{
/*Turn off trace unit so we can start a new trace. */
diff --git a/src/target/xtensa/xtensa_debug_module.h b/src/target/xtensa/xtensa_debug_module.h
index 46b2935..495da2a 100644
--- a/src/target/xtensa/xtensa_debug_module.h
+++ b/src/target/xtensa/xtensa_debug_module.h
@@ -75,6 +75,22 @@ enum xtensa_dm_reg {
XDMREG_DELAYCNT,
XDMREG_MEMADDRSTART,
XDMREG_MEMADDREND,
+ XDMREG_EXTTIMELO,
+ XDMREG_EXTTIMEHI,
+ XDMREG_TRAXRSVD48,
+ XDMREG_TRAXRSVD4C,
+ XDMREG_TRAXRSVD50,
+ XDMREG_TRAXRSVD54,
+ XDMREG_TRAXRSVD58,
+ XDMREG_TRAXRSVD5C,
+ XDMREG_TRAXRSVD60,
+ XDMREG_TRAXRSVD64,
+ XDMREG_TRAXRSVD68,
+ XDMREG_TRAXRSVD6C,
+ XDMREG_TRAXRSVD70,
+ XDMREG_TRAXRSVD74,
+ XDMREG_CONFIGID0,
+ XDMREG_CONFIGID1,
/* Performance Monitor Registers */
XDMREG_PMG,
@@ -168,6 +184,22 @@ struct xtensa_dm_reg_offsets {
{ .nar = 0x07, .apb = 0x001c }, /* XDMREG_DELAYCNT */ \
{ .nar = 0x08, .apb = 0x0020 }, /* XDMREG_MEMADDRSTART */ \
{ .nar = 0x09, .apb = 0x0024 }, /* XDMREG_MEMADDREND */ \
+ { .nar = 0x10, .apb = 0x0040 }, /* XDMREG_EXTTIMELO */ \
+ { .nar = 0x11, .apb = 0x0044 }, /* XDMREG_EXTTIMEHI */ \
+ { .nar = 0x12, .apb = 0x0048 }, /* XDMREG_TRAXRSVD48 */ \
+ { .nar = 0x13, .apb = 0x004c }, /* XDMREG_TRAXRSVD4C */ \
+ { .nar = 0x14, .apb = 0x0050 }, /* XDMREG_TRAXRSVD50 */ \
+ { .nar = 0x15, .apb = 0x0054 }, /* XDMREG_TRAXRSVD54 */ \
+ { .nar = 0x16, .apb = 0x0058 }, /* XDMREG_TRAXRSVD58 */ \
+ { .nar = 0x17, .apb = 0x005c }, /* XDMREG_TRAXRSVD5C */ \
+ { .nar = 0x18, .apb = 0x0060 }, /* XDMREG_TRAXRSVD60 */ \
+ { .nar = 0x19, .apb = 0x0064 }, /* XDMREG_TRAXRSVD64 */ \
+ { .nar = 0x1a, .apb = 0x0068 }, /* XDMREG_TRAXRSVD68 */ \
+ { .nar = 0x1b, .apb = 0x006c }, /* XDMREG_TRAXRSVD6C */ \
+ { .nar = 0x1c, .apb = 0x0070 }, /* XDMREG_TRAXRSVD70 */ \
+ { .nar = 0x1d, .apb = 0x0074 }, /* XDMREG_TRAXRSVD74 */ \
+ { .nar = 0x1e, .apb = 0x0078 }, /* XDMREG_CONFIGID0 */ \
+ { .nar = 0x1f, .apb = 0x007c }, /* XDMREG_CONFIGID1 */ \
\
/* Performance Monitor Registers */ \
{ .nar = 0x20, .apb = 0x1000 }, /* XDMREG_PMG */ \
@@ -297,6 +329,11 @@ struct xtensa_dm_reg_offsets {
#define DEBUGCAUSE_DI BIT(5) /* Debug Interrupt */
#define DEBUGCAUSE_VALID BIT(31) /* Pseudo-value to trigger reread (NX only) */
+/* TRAXID */
+#define TRAXID_PRODNO_TRAX 0 /* TRAXID.PRODNO value for TRAX module */
+#define TRAXID_PRODNO_SHIFT 28
+#define TRAXID_PRODNO_MASK 0xf
+
#define TRAXCTRL_TREN BIT(0) /* Trace enable. Tracing starts on 0->1 */
#define TRAXCTRL_TRSTP BIT(1) /* Trace Stop. Make 1 to stop trace. */
#define TRAXCTRL_PCMEN BIT(2) /* PC match enable */
@@ -512,6 +549,9 @@ static inline xtensa_dsr_t xtensa_dm_core_status_get(struct xtensa_debug_module
return dm->core_status.dsr;
}
+int xtensa_dm_read(struct xtensa_debug_module *dm, uint32_t addr, uint32_t *val);
+int xtensa_dm_write(struct xtensa_debug_module *dm, uint32_t addr, uint32_t val);
+
int xtensa_dm_device_id_read(struct xtensa_debug_module *dm);
static inline xtensa_ocdid_t xtensa_dm_device_id_get(struct xtensa_debug_module *dm)
{