diff options
Diffstat (limited to 'contrib/plugins')
-rw-r--r-- | contrib/plugins/Makefile | 69 | ||||
-rw-r--r-- | contrib/plugins/bbv.c | 158 | ||||
-rw-r--r-- | contrib/plugins/cache.c | 34 | ||||
-rw-r--r-- | contrib/plugins/cflow.c | 393 | ||||
-rw-r--r-- | contrib/plugins/execlog.c | 6 | ||||
-rw-r--r-- | contrib/plugins/hotblocks.c | 33 | ||||
-rw-r--r-- | contrib/plugins/hotpages.c | 10 | ||||
-rw-r--r-- | contrib/plugins/howvec.c | 11 | ||||
-rw-r--r-- | contrib/plugins/hwprofile.c | 35 | ||||
-rw-r--r-- | contrib/plugins/ips.c | 55 | ||||
-rw-r--r-- | contrib/plugins/lockstep.c | 48 | ||||
-rw-r--r-- | contrib/plugins/meson.build | 30 | ||||
-rw-r--r-- | contrib/plugins/stoptrigger.c | 157 |
13 files changed, 902 insertions, 137 deletions
diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile deleted file mode 100644 index 449ead1..0000000 --- a/contrib/plugins/Makefile +++ /dev/null @@ -1,69 +0,0 @@ -# -*- Mode: makefile -*- -# -# This Makefile example is fairly independent from the main makefile -# so users can take and adapt it for their build. We only really -# include config-host.mak so we don't have to repeat probing for -# programs that the main configure has already done for us. -# - -include config-host.mak - -TOP_SRC_PATH = $(SRC_PATH)/../.. - -VPATH += $(SRC_PATH) - -NAMES := -NAMES += execlog -NAMES += hotblocks -NAMES += hotpages -NAMES += howvec - -# The lockstep example communicates using unix sockets, -# and can't be easily made to work on windows. -ifneq ($(CONFIG_WIN32),y) -NAMES += lockstep -endif - -NAMES += hwprofile -NAMES += cache -NAMES += drcov -NAMES += ips - -ifeq ($(CONFIG_WIN32),y) -SO_SUFFIX := .dll -LDLIBS += $(shell $(PKG_CONFIG) --libs glib-2.0) -else -SO_SUFFIX := .so -endif - -SONAMES := $(addsuffix $(SO_SUFFIX),$(addprefix lib,$(NAMES))) - -# The main QEMU uses Glib extensively so it's perfectly fine to use it -# in plugins (which many example do). -PLUGIN_CFLAGS := $(shell $(PKG_CONFIG) --cflags glib-2.0) -PLUGIN_CFLAGS += -fPIC -Wall -PLUGIN_CFLAGS += -I$(TOP_SRC_PATH)/include/qemu - -all: $(SONAMES) - -%.o: %.c - $(CC) $(CFLAGS) $(PLUGIN_CFLAGS) -c -o $@ $< - -ifeq ($(CONFIG_WIN32),y) -lib%$(SO_SUFFIX): %.o win32_linker.o ../../plugins/libqemu_plugin_api.a - $(CC) -shared -o $@ $^ $(LDLIBS) -else ifeq ($(CONFIG_DARWIN),y) -lib%$(SO_SUFFIX): %.o - $(CC) -bundle -Wl,-undefined,dynamic_lookup -o $@ $^ $(LDLIBS) -else -lib%$(SO_SUFFIX): %.o - $(CC) -shared -o $@ $^ $(LDLIBS) -endif - - -clean: - rm -f *.o *$(SO_SUFFIX) *.d - rm -Rf .libs - -.PHONY: all clean -.SECONDARY: diff --git a/contrib/plugins/bbv.c b/contrib/plugins/bbv.c new file mode 100644 index 0000000..b9da6f8 --- /dev/null +++ b/contrib/plugins/bbv.c @@ -0,0 +1,158 @@ +/* + * Generate basic block vectors for use with the SimPoint analysis tool. + * SimPoint: https://cseweb.ucsd.edu/~calder/simpoint/ + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include <stdio.h> +#include <glib.h> + +#include <qemu-plugin.h> + +typedef struct Bb { + uint64_t vaddr; + struct qemu_plugin_scoreboard *count; + unsigned int index; +} Bb; + +typedef struct Vcpu { + uint64_t count; + FILE *file; +} Vcpu; + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; +static GHashTable *bbs; +static GRWLock bbs_lock; +static char *filename; +static struct qemu_plugin_scoreboard *vcpus; +static uint64_t interval = 100000000; + +static void plugin_exit(qemu_plugin_id_t id, void *p) +{ + for (int i = 0; i < qemu_plugin_num_vcpus(); i++) { + fclose(((Vcpu *)qemu_plugin_scoreboard_find(vcpus, i))->file); + } + + g_hash_table_unref(bbs); + g_free(filename); + qemu_plugin_scoreboard_free(vcpus); +} + +static void free_bb(void *data) +{ + qemu_plugin_scoreboard_free(((Bb *)data)->count); + g_free(data); +} + +static qemu_plugin_u64 count_u64(void) +{ + return qemu_plugin_scoreboard_u64_in_struct(vcpus, Vcpu, count); +} + +static qemu_plugin_u64 bb_count_u64(Bb *bb) +{ + return qemu_plugin_scoreboard_u64(bb->count); +} + +static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index) +{ + g_autofree gchar *vcpu_filename = NULL; + Vcpu *vcpu = qemu_plugin_scoreboard_find(vcpus, vcpu_index); + + vcpu_filename = g_strdup_printf("%s.%u.bb", filename, vcpu_index); + vcpu->file = fopen(vcpu_filename, "w"); +} + +static void vcpu_interval_exec(unsigned int vcpu_index, void *udata) +{ + Vcpu *vcpu = qemu_plugin_scoreboard_find(vcpus, vcpu_index); + GHashTableIter iter; + void *value; + + if (!vcpu->file) { + return; + } + + vcpu->count -= interval; + + fputc('T', vcpu->file); + + g_rw_lock_reader_lock(&bbs_lock); + g_hash_table_iter_init(&iter, bbs); + + while (g_hash_table_iter_next(&iter, NULL, &value)) { + Bb *bb = value; + uint64_t bb_count = qemu_plugin_u64_get(bb_count_u64(bb), vcpu_index); + + if (!bb_count) { + continue; + } + + fprintf(vcpu->file, ":%u:%" PRIu64 " ", bb->index, bb_count); + qemu_plugin_u64_set(bb_count_u64(bb), vcpu_index, 0); + } + + g_rw_lock_reader_unlock(&bbs_lock); + fputc('\n', vcpu->file); +} + +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + uint64_t n_insns = qemu_plugin_tb_n_insns(tb); + uint64_t vaddr = qemu_plugin_tb_vaddr(tb); + Bb *bb; + + g_rw_lock_writer_lock(&bbs_lock); + bb = g_hash_table_lookup(bbs, &vaddr); + if (!bb) { + bb = g_new(Bb, 1); + bb->vaddr = vaddr; + bb->count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); + bb->index = g_hash_table_size(bbs) + 1; + g_hash_table_replace(bbs, &bb->vaddr, bb); + } + g_rw_lock_writer_unlock(&bbs_lock); + + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, count_u64(), n_insns); + + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count_u64(bb), n_insns); + + qemu_plugin_register_vcpu_tb_exec_cond_cb( + tb, vcpu_interval_exec, QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_COND_GE, count_u64(), interval, NULL); +} + +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, + const qemu_info_t *info, + int argc, char **argv) +{ + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "interval") == 0) { + interval = g_ascii_strtoull(tokens[1], NULL, 10); + } else if (g_strcmp0(tokens[0], "outfile") == 0) { + filename = tokens[1]; + tokens[1] = NULL; + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } + + if (!filename) { + fputs("outfile unspecified\n", stderr); + return -1; + } + + bbs = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, free_bb); + vcpus = qemu_plugin_scoreboard_new(sizeof(Vcpu)); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + qemu_plugin_register_vcpu_init_cb(id, vcpu_init); + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + + return 0; +} diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c index c5c8ac7..5650858 100644 --- a/contrib/plugins/cache.c +++ b/contrib/plugins/cache.c @@ -208,7 +208,7 @@ static int fifo_get_first_block(Cache *cache, int set) static void fifo_update_on_miss(Cache *cache, int set, int blk_idx) { GQueue *q = cache->sets[set].fifo_queue; - g_queue_push_head(q, GINT_TO_POINTER(blk_idx)); + g_queue_push_head(q, (gpointer)(intptr_t) blk_idx); } static void fifo_destroy(Cache *cache) @@ -471,13 +471,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) n_insns = qemu_plugin_tb_n_insns(tb); for (i = 0; i < n_insns; i++) { struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); - uint64_t effective_addr; - - if (sys) { - effective_addr = (uint64_t) qemu_plugin_insn_haddr(insn); - } else { - effective_addr = (uint64_t) qemu_plugin_insn_vaddr(insn); - } + uint64_t effective_addr = sys ? (uintptr_t) qemu_plugin_insn_haddr(insn) : + qemu_plugin_insn_vaddr(insn); /* * Instructions might get translated multiple times, we do not create @@ -485,14 +480,13 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) * entry from the hash table and register it for the callback again. */ g_mutex_lock(&hashtable_lock); - data = g_hash_table_lookup(miss_ht, GUINT_TO_POINTER(effective_addr)); + data = g_hash_table_lookup(miss_ht, &effective_addr); if (data == NULL) { data = g_new0(InsnData, 1); data->disas_str = qemu_plugin_insn_disas(insn); data->symbol = qemu_plugin_insn_symbol(insn); data->addr = effective_addr; - g_hash_table_insert(miss_ht, GUINT_TO_POINTER(effective_addr), - (gpointer) data); + g_hash_table_insert(miss_ht, &data->addr, data); } g_mutex_unlock(&hashtable_lock); @@ -558,7 +552,7 @@ static void append_stats_line(GString *line, " %-12" PRIu64 " %-11" PRIu64 " %10.4lf%%", l2_access, l2_misses, - l2_access ? l2_miss_rate : 0.0); + l2_miss_rate); } g_string_append(line, "\n"); @@ -582,7 +576,7 @@ static void sum_stats(void) } } -static int dcmp(gconstpointer a, gconstpointer b) +static int dcmp(gconstpointer a, gconstpointer b, gpointer d) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; @@ -590,7 +584,7 @@ static int dcmp(gconstpointer a, gconstpointer b) return insn_a->l1_dmisses < insn_b->l1_dmisses ? 1 : -1; } -static int icmp(gconstpointer a, gconstpointer b) +static int icmp(gconstpointer a, gconstpointer b, gpointer d) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; @@ -598,7 +592,7 @@ static int icmp(gconstpointer a, gconstpointer b) return insn_a->l1_imisses < insn_b->l1_imisses ? 1 : -1; } -static int l2_cmp(gconstpointer a, gconstpointer b) +static int l2_cmp(gconstpointer a, gconstpointer b, gpointer d) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; @@ -609,7 +603,7 @@ static int l2_cmp(gconstpointer a, gconstpointer b) static void log_stats(void) { int i; - Cache *icache, *dcache, *l2_cache; + Cache *icache, *dcache, *l2_cache = NULL; g_autoptr(GString) rep = g_string_new("core #, data accesses, data misses," " dmiss rate, insn accesses," @@ -651,7 +645,7 @@ static void log_top_insns(void) InsnData *insn; miss_insns = g_hash_table_get_values(miss_ht); - miss_insns = g_list_sort(miss_insns, dcmp); + miss_insns = g_list_sort_with_data(miss_insns, dcmp, NULL); g_autoptr(GString) rep = g_string_new(""); g_string_append_printf(rep, "%s", "address, data misses, instruction\n"); @@ -665,7 +659,7 @@ static void log_top_insns(void) insn->l1_dmisses, insn->disas_str); } - miss_insns = g_list_sort(miss_insns, icmp); + miss_insns = g_list_sort_with_data(miss_insns, icmp, NULL); g_string_append_printf(rep, "%s", "\naddress, fetch misses, instruction\n"); for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) { @@ -682,7 +676,7 @@ static void log_top_insns(void) goto finish; } - miss_insns = g_list_sort(miss_insns, l2_cmp); + miss_insns = g_list_sort_with_data(miss_insns, l2_cmp, NULL); g_string_append_printf(rep, "%s", "\naddress, L2 misses, instruction\n"); for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) { @@ -853,7 +847,7 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); - miss_ht = g_hash_table_new_full(NULL, g_direct_equal, NULL, insn_free); + miss_ht = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, insn_free); return 0; } diff --git a/contrib/plugins/cflow.c b/contrib/plugins/cflow.c new file mode 100644 index 0000000..b5e33f2 --- /dev/null +++ b/contrib/plugins/cflow.c @@ -0,0 +1,393 @@ +/* + * Control Flow plugin + * + * This plugin will track changes to control flow and detect where + * instructions fault. + * + * Copyright (c) 2024 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include <glib.h> +#include <inttypes.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include <qemu-plugin.h> + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +typedef enum { + SORT_HOTTEST, /* hottest branch insn */ + SORT_EXCEPTION, /* most early exits */ + SORT_POPDEST, /* most destinations (usually ret's) */ +} ReportType; + +ReportType report = SORT_HOTTEST; +int topn = 10; + +typedef struct { + uint64_t daddr; + uint64_t dcount; +} DestData; + +/* A node is an address where we can go to multiple places */ +typedef struct { + GMutex lock; + /* address of the branch point */ + uint64_t addr; + /* array of DestData */ + GArray *dests; + /* early exit/fault count */ + uint64_t early_exit; + /* jump destination count */ + uint64_t dest_count; + /* instruction data */ + char *insn_disas; + /* symbol? */ + const char *symbol; + /* times translated as last in block? */ + int last_count; + /* times translated in the middle of block? */ + int mid_count; +} NodeData; + +typedef enum { + /* last insn in block, expected flow control */ + LAST_INSN = (1 << 0), + /* mid-block insn, can only be an exception */ + EXCP_INSN = (1 << 1), + /* multiple disassembly, may have changed */ + MULT_INSN = (1 << 2), +} InsnTypes; + +typedef struct { + /* address of the branch point */ + uint64_t addr; + /* disassembly */ + char *insn_disas; + /* symbol? */ + const char *symbol; + /* types */ + InsnTypes type_flag; +} InsnData; + +/* We use this to track the current execution state */ +typedef struct { + /* address of current translated block */ + uint64_t tb_pc; + /* address of end of block */ + uint64_t end_block; + /* next pc after end of block */ + uint64_t pc_after_block; + /* address of last executed PC */ + uint64_t last_pc; +} VCPUScoreBoard; + +/* descriptors for accessing the above scoreboard */ +static qemu_plugin_u64 tb_pc; +static qemu_plugin_u64 end_block; +static qemu_plugin_u64 pc_after_block; +static qemu_plugin_u64 last_pc; + + +static GMutex node_lock; +static GHashTable *nodes; +struct qemu_plugin_scoreboard *state; + +/* SORT_HOTTEST */ +static gint hottest(gconstpointer a, gconstpointer b, gpointer d) +{ + NodeData *na = (NodeData *) a; + NodeData *nb = (NodeData *) b; + + return na->dest_count > nb->dest_count ? -1 : + na->dest_count == nb->dest_count ? 0 : 1; +} + +static gint exception(gconstpointer a, gconstpointer b, gpointer d) +{ + NodeData *na = (NodeData *) a; + NodeData *nb = (NodeData *) b; + + return na->early_exit > nb->early_exit ? -1 : + na->early_exit == nb->early_exit ? 0 : 1; +} + +static gint popular(gconstpointer a, gconstpointer b, gpointer d) +{ + NodeData *na = (NodeData *) a; + NodeData *nb = (NodeData *) b; + + return na->dests->len > nb->dests->len ? -1 : + na->dests->len == nb->dests->len ? 0 : 1; +} + +/* Filter out non-branches - returns true to remove entry */ +static gboolean filter_non_branches(gpointer key, gpointer value, + gpointer user_data) +{ + NodeData *node = (NodeData *) value; + + return node->dest_count == 0; +} + +static void plugin_exit(qemu_plugin_id_t id, void *p) +{ + g_autoptr(GString) result = g_string_new("collected "); + GList *data; + GCompareDataFunc sort = &hottest; + int i = 0; + + g_mutex_lock(&node_lock); + g_string_append_printf(result, "%d control flow nodes in the hash table\n", + g_hash_table_size(nodes)); + + /* remove all nodes that didn't branch */ + g_hash_table_foreach_remove(nodes, filter_non_branches, NULL); + + data = g_hash_table_get_values(nodes); + + switch (report) { + case SORT_HOTTEST: + sort = &hottest; + break; + case SORT_EXCEPTION: + sort = &exception; + break; + case SORT_POPDEST: + sort = &popular; + break; + } + + data = g_list_sort_with_data(data, sort, NULL); + + for (GList *l = data; + l != NULL && i < topn; + l = l->next, i++) { + NodeData *n = l->data; + const char *type = n->mid_count ? "sync fault" : "branch"; + g_string_append_printf(result, " addr: 0x%"PRIx64 " %s: %s (%s)\n", + n->addr, n->symbol, n->insn_disas, type); + if (n->early_exit) { + g_string_append_printf(result, " early exits %"PRId64"\n", + n->early_exit); + } + g_string_append_printf(result, " branches %"PRId64"\n", + n->dest_count); + for (int j = 0; j < n->dests->len; j++) { + DestData *dd = &g_array_index(n->dests, DestData, j); + g_string_append_printf(result, " to 0x%"PRIx64" (%"PRId64")\n", + dd->daddr, dd->dcount); + } + } + + qemu_plugin_outs(result->str); + + g_mutex_unlock(&node_lock); +} + +static void plugin_init(void) +{ + g_mutex_init(&node_lock); + nodes = g_hash_table_new(g_int64_hash, g_int64_equal); + state = qemu_plugin_scoreboard_new(sizeof(VCPUScoreBoard)); + + /* score board declarations */ + tb_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, tb_pc); + end_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, + end_block); + pc_after_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, + pc_after_block); + last_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, + last_pc); +} + +static NodeData *create_node(uint64_t addr) +{ + NodeData *node = g_new0(NodeData, 1); + g_mutex_init(&node->lock); + node->addr = addr; + node->dests = g_array_new(true, true, sizeof(DestData)); + return node; +} + +static NodeData *fetch_node(uint64_t addr, bool create_if_not_found) +{ + NodeData *node = NULL; + + g_mutex_lock(&node_lock); + node = (NodeData *) g_hash_table_lookup(nodes, &addr); + if (!node && create_if_not_found) { + node = create_node(addr); + g_hash_table_insert(nodes, &node->addr, node); + } + g_mutex_unlock(&node_lock); + return node; +} + +/* + * Called when we detect a non-linear execution (pc != + * pc_after_block). This could be due to a fault causing some sort of + * exit exception (if last_pc != block_end) or just a taken branch. + */ +static void vcpu_tb_branched_exec(unsigned int cpu_index, void *udata) +{ + uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index); + uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index); + uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index); + uint64_t pc = qemu_plugin_u64_get(tb_pc, cpu_index); + + /* return early for address 0 */ + if (!lpc) { + return; + } + + NodeData *node = fetch_node(lpc, true); + DestData *data = NULL; + bool early_exit = (lpc != ebpc); + GArray *dests; + + /* the condition should never hit */ + g_assert(pc != npc); + + g_mutex_lock(&node->lock); + + if (early_exit) { + fprintf(stderr, "%s: pc=%"PRIx64", epbc=%"PRIx64 + " npc=%"PRIx64", lpc=%"PRIx64"\n", + __func__, pc, ebpc, npc, lpc); + node->early_exit++; + if (!node->mid_count) { + /* count now as we've only just allocated */ + node->mid_count++; + } + } + + dests = node->dests; + for (int i = 0; i < dests->len; i++) { + if (g_array_index(dests, DestData, i).daddr == pc) { + data = &g_array_index(dests, DestData, i); + } + } + + /* we've never seen this before, allocate a new entry */ + if (!data) { + DestData new_entry = { .daddr = pc }; + g_array_append_val(dests, new_entry); + data = &g_array_index(dests, DestData, dests->len - 1); + g_assert(data->daddr == pc); + } + + data->dcount++; + node->dest_count++; + + g_mutex_unlock(&node->lock); +} + +/* + * At the start of each block we need to resolve two things: + * + * - is last_pc == block_end, if not we had an early exit + * - is start of block last_pc + insn width, if not we jumped + * + * Once those are dealt with we can instrument the rest of the + * instructions for their execution. + * + */ +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + uint64_t pc = qemu_plugin_tb_vaddr(tb); + size_t insns = qemu_plugin_tb_n_insns(tb); + struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0); + struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1); + + /* + * check if we are executing linearly after the last block. We can + * handle both early block exits and normal branches in the + * callback if we hit it. + */ + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_STORE_U64, tb_pc, pc); + qemu_plugin_register_vcpu_tb_exec_cond_cb( + tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_COND_NE, pc_after_block, pc, NULL); + + /* + * Now we can set start/end for this block so the next block can + * check where we are at. Do this on the first instruction and not + * the TB so we don't get mixed up with above. + */ + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn, + QEMU_PLUGIN_INLINE_STORE_U64, + end_block, qemu_plugin_insn_vaddr(last_insn)); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn, + QEMU_PLUGIN_INLINE_STORE_U64, + pc_after_block, + qemu_plugin_insn_vaddr(last_insn) + + qemu_plugin_insn_size(last_insn)); + + for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) { + struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx); + uint64_t ipc = qemu_plugin_insn_vaddr(insn); + /* + * If this is a potential branch point check if we could grab + * the disassembly for it. If it is the last instruction + * always create an entry. + */ + NodeData *node = fetch_node(ipc, last_insn); + if (node) { + g_mutex_lock(&node->lock); + if (!node->insn_disas) { + node->insn_disas = qemu_plugin_insn_disas(insn); + } + if (!node->symbol) { + node->symbol = qemu_plugin_insn_symbol(insn); + } + if (last_insn == insn) { + node->last_count++; + } else { + node->mid_count++; + } + g_mutex_unlock(&node->lock); + } + + /* Store the PC of what we are about to execute */ + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn, + QEMU_PLUGIN_INLINE_STORE_U64, + last_pc, ipc); + } +} + +QEMU_PLUGIN_EXPORT +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, + int argc, char **argv) +{ + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "sort") == 0) { + if (g_strcmp0(tokens[1], "hottest") == 0) { + report = SORT_HOTTEST; + } else if (g_strcmp0(tokens[1], "early") == 0) { + report = SORT_EXCEPTION; + } else if (g_strcmp0(tokens[1], "exceptions") == 0) { + report = SORT_POPDEST; + } else { + fprintf(stderr, "failed to parse: %s\n", tokens[1]); + return -1; + } + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } + + plugin_init(); + + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + return 0; +} diff --git a/contrib/plugins/execlog.c b/contrib/plugins/execlog.c index 371db97..d67d010 100644 --- a/contrib/plugins/execlog.c +++ b/contrib/plugins/execlog.c @@ -101,7 +101,7 @@ static void insn_check_regs(CPU *cpu) GByteArray *temp = reg->last; g_string_append_printf(cpu->last_exec, ", %s -> 0x", reg->name); /* TODO: handle BE properly */ - for (int i = sz; i >= 0; i--) { + for (int i = sz - 1; i >= 0; i--) { g_string_append_printf(cpu->last_exec, "%02x", reg->new->data[i]); } @@ -181,8 +181,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) bool check_regs_this = rmatches; bool check_regs_next = false; - size_t n = qemu_plugin_tb_n_insns(tb); - for (size_t i = 0; i < n; i++) { + size_t n_insns = qemu_plugin_tb_n_insns(tb); + for (size_t i = 0; i < n_insns; i++) { char *insn_disas; uint64_t insn_vaddr; diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c index 02bc507..98404b6 100644 --- a/contrib/plugins/hotblocks.c +++ b/contrib/plugins/hotblocks.c @@ -29,7 +29,7 @@ static guint64 limit = 20; * * The internals of the TCG are not exposed to plugins so we can only * get the starting PC for each block. We cheat this slightly by - * xor'ing the number of instructions to the hash to help + * checking the number of instructions as well to help * differentiate. */ typedef struct { @@ -39,7 +39,7 @@ typedef struct { unsigned long insns; } ExecCount; -static gint cmp_exec_count(gconstpointer a, gconstpointer b) +static gint cmp_exec_count(gconstpointer a, gconstpointer b, gpointer d) { ExecCount *ea = (ExecCount *) a; ExecCount *eb = (ExecCount *) b; @@ -50,6 +50,20 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) return count_a > count_b ? -1 : 1; } +static guint exec_count_hash(gconstpointer v) +{ + const ExecCount *e = v; + return e->start_addr ^ e->insns; +} + +static gboolean exec_count_equal(gconstpointer v1, gconstpointer v2) +{ + const ExecCount *ea = v1; + const ExecCount *eb = v2; + return (ea->start_addr == eb->start_addr) && + (ea->insns == eb->insns); +} + static void exec_count_free(gpointer key, gpointer value, gpointer user_data) { ExecCount *cnt = value; @@ -65,7 +79,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) g_string_append_printf(report, "%d entries in the hash table\n", g_hash_table_size(hotblocks)); counts = g_hash_table_get_values(hotblocks); - it = g_list_sort(counts, cmp_exec_count); + it = g_list_sort_with_data(counts, cmp_exec_count, NULL); if (it) { g_string_append_printf(report, "pc, tcount, icount, ecount\n"); @@ -91,7 +105,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) static void plugin_init(void) { - hotblocks = g_hash_table_new(NULL, g_direct_equal); + hotblocks = g_hash_table_new(exec_count_hash, exec_count_equal); } static void vcpu_tb_exec(unsigned int cpu_index, void *udata) @@ -111,10 +125,15 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) ExecCount *cnt; uint64_t pc = qemu_plugin_tb_vaddr(tb); size_t insns = qemu_plugin_tb_n_insns(tb); - uint64_t hash = pc ^ insns; g_mutex_lock(&lock); - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash); + { + ExecCount e; + e.start_addr = pc; + e.insns = insns; + cnt = (ExecCount *) g_hash_table_lookup(hotblocks, &e); + } + if (cnt) { cnt->trans_count++; } else { @@ -123,7 +142,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) cnt->trans_count = 1; cnt->insns = insns; cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); - g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); + g_hash_table_insert(hotblocks, cnt, cnt); } g_mutex_unlock(&lock); diff --git a/contrib/plugins/hotpages.c b/contrib/plugins/hotpages.c index 8316ae5..9d48ac9 100644 --- a/contrib/plugins/hotpages.c +++ b/contrib/plugins/hotpages.c @@ -48,7 +48,7 @@ typedef struct { static GMutex lock; static GHashTable *pages; -static gint cmp_access_count(gconstpointer a, gconstpointer b) +static gint cmp_access_count(gconstpointer a, gconstpointer b, gpointer d) { PageCounters *ea = (PageCounters *) a; PageCounters *eb = (PageCounters *) b; @@ -83,7 +83,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) if (counts && g_list_next(counts)) { GList *it; - it = g_list_sort(counts, cmp_access_count); + it = g_list_sort_with_data(counts, cmp_access_count, NULL); for (i = 0; i < limit && it->next; i++, it = it->next) { PageCounters *rec = (PageCounters *) it->data; @@ -103,7 +103,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) static void plugin_init(void) { page_mask = (page_size - 1); - pages = g_hash_table_new(NULL, g_direct_equal); + pages = g_hash_table_new(g_int64_hash, g_int64_equal); } static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, @@ -130,12 +130,12 @@ static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, page &= ~page_mask; g_mutex_lock(&lock); - count = (PageCounters *) g_hash_table_lookup(pages, GUINT_TO_POINTER(page)); + count = (PageCounters *) g_hash_table_lookup(pages, &page); if (!count) { count = g_new0(PageCounters, 1); count->page_address = page; - g_hash_table_insert(pages, GUINT_TO_POINTER(page), (gpointer) count); + g_hash_table_insert(pages, &count->page_address, count); } if (qemu_plugin_mem_is_store(meminfo)) { count->writes++; diff --git a/contrib/plugins/howvec.c b/contrib/plugins/howvec.c index 9be67f7..42bddb6 100644 --- a/contrib/plugins/howvec.c +++ b/contrib/plugins/howvec.c @@ -155,7 +155,7 @@ static ClassSelector class_tables[] = { static InsnClassExecCount *class_table; static int class_table_sz; -static gint cmp_exec_count(gconstpointer a, gconstpointer b) +static gint cmp_exec_count(gconstpointer a, gconstpointer b, gpointer d) { InsnExecCount *ea = (InsnExecCount *) a; InsnExecCount *eb = (InsnExecCount *) b; @@ -208,7 +208,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) counts = g_hash_table_get_values(insns); if (counts && g_list_next(counts)) { g_string_append_printf(report, "Individual Instructions:\n"); - counts = g_list_sort(counts, cmp_exec_count); + counts = g_list_sort_with_data(counts, cmp_exec_count, NULL); for (i = 0; i < limit && g_list_next(counts); i++, counts = g_list_next(counts)) { @@ -253,6 +253,8 @@ static struct qemu_plugin_scoreboard *find_counter( int i; uint64_t *cnt = NULL; uint32_t opcode = 0; + /* if opcode is greater than 32 bits, we should refactor insn hash table. */ + G_STATIC_ASSERT(sizeof(opcode) == sizeof(uint32_t)); InsnClassExecCount *class = NULL; /* @@ -284,7 +286,7 @@ static struct qemu_plugin_scoreboard *find_counter( g_mutex_lock(&lock); icount = (InsnExecCount *) g_hash_table_lookup(insns, - GUINT_TO_POINTER(opcode)); + (gpointer)(intptr_t) opcode); if (!icount) { icount = g_new0(InsnExecCount, 1); @@ -295,8 +297,7 @@ static struct qemu_plugin_scoreboard *find_counter( qemu_plugin_scoreboard_new(sizeof(uint64_t)); icount->count = qemu_plugin_scoreboard_u64(score); - g_hash_table_insert(insns, GUINT_TO_POINTER(opcode), - (gpointer) icount); + g_hash_table_insert(insns, (gpointer)(intptr_t) opcode, icount); } g_mutex_unlock(&lock); diff --git a/contrib/plugins/hwprofile.c b/contrib/plugins/hwprofile.c index 739ac0c..a9838cc 100644 --- a/contrib/plugins/hwprofile.c +++ b/contrib/plugins/hwprofile.c @@ -43,6 +43,8 @@ typedef struct { static GMutex lock; static GHashTable *devices; +static struct qemu_plugin_scoreboard *source_pc_scoreboard; +static qemu_plugin_u64 source_pc; /* track the access pattern to a piece of HW */ static bool pattern; @@ -69,7 +71,7 @@ static void plugin_init(void) devices = g_hash_table_new(NULL, NULL); } -static gint sort_cmp(gconstpointer a, gconstpointer b) +static gint sort_cmp(gconstpointer a, gconstpointer b, gpointer d) { DeviceCounts *ea = (DeviceCounts *) a; DeviceCounts *eb = (DeviceCounts *) b; @@ -77,7 +79,7 @@ static gint sort_cmp(gconstpointer a, gconstpointer b) eb->totals.reads + eb->totals.writes ? -1 : 1; } -static gint sort_loc(gconstpointer a, gconstpointer b) +static gint sort_loc(gconstpointer a, gconstpointer b, gpointer d) { IOLocationCounts *ea = (IOLocationCounts *) a; IOLocationCounts *eb = (IOLocationCounts *) b; @@ -124,13 +126,13 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) if (counts && g_list_next(counts)) { GList *it; - it = g_list_sort(counts, sort_cmp); + it = g_list_sort_with_data(counts, sort_cmp, NULL); while (it) { DeviceCounts *rec = (DeviceCounts *) it->data; if (rec->detail) { GList *accesses = g_hash_table_get_values(rec->detail); - GList *io_it = g_list_sort(accesses, sort_loc); + GList *io_it = g_list_sort_with_data(accesses, sort_loc, NULL); const char *prefix = pattern ? "off" : "pc"; g_string_append_printf(report, "%s @ 0x%"PRIx64"\n", rec->name, rec->base); @@ -159,7 +161,7 @@ static DeviceCounts *new_count(const char *name, uint64_t base) count->name = name; count->base = base; if (pattern || source) { - count->detail = g_hash_table_new(NULL, NULL); + count->detail = g_hash_table_new(g_int64_hash, g_int64_equal); } g_hash_table_insert(devices, (gpointer) name, count); return count; @@ -169,7 +171,7 @@ static IOLocationCounts *new_location(GHashTable *table, uint64_t off_or_pc) { IOLocationCounts *loc = g_new0(IOLocationCounts, 1); loc->off_or_pc = off_or_pc; - g_hash_table_insert(table, (gpointer) off_or_pc, loc); + g_hash_table_insert(table, &loc->off_or_pc, loc); return loc; } @@ -224,12 +226,12 @@ static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, /* either track offsets or source of access */ if (source) { - off = (uint64_t) udata; + off = qemu_plugin_u64_get(source_pc, cpu_index); } if (pattern || source) { IOLocationCounts *io_count = g_hash_table_lookup(counts->detail, - (gpointer) off); + &off); if (!io_count) { io_count = new_location(counts->detail, off); } @@ -247,10 +249,14 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) for (i = 0; i < n; i++) { struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); - gpointer udata = (gpointer) (source ? qemu_plugin_insn_vaddr(insn) : 0); + if (source) { + uint64_t pc = qemu_plugin_insn_vaddr(insn); + qemu_plugin_register_vcpu_mem_inline_per_vcpu( + insn, rw, QEMU_PLUGIN_INLINE_STORE_U64, + source_pc, pc); + } qemu_plugin_register_vcpu_mem_cb(insn, vcpu_haddr, - QEMU_PLUGIN_CB_NO_REGS, - rw, udata); + QEMU_PLUGIN_CB_NO_REGS, rw, NULL); } } @@ -306,10 +312,9 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, return -1; } - /* Just warn about overflow */ - if (info->system.smp_vcpus > 64 || - info->system.max_vcpus > 64) { - fprintf(stderr, "hwprofile: can only track up to 64 CPUs\n"); + if (source) { + source_pc_scoreboard = qemu_plugin_scoreboard_new(sizeof(uint64_t)); + source_pc = qemu_plugin_scoreboard_u64(source_pc_scoreboard); } plugin_init(); diff --git a/contrib/plugins/ips.c b/contrib/plugins/ips.c index 29fa556..f110c56 100644 --- a/contrib/plugins/ips.c +++ b/contrib/plugins/ips.c @@ -129,20 +129,62 @@ static void plugin_exit(qemu_plugin_id_t id, void *udata) qemu_plugin_scoreboard_free(vcpus); } +typedef struct { + const char *suffix; + unsigned long multipler; +} ScaleEntry; + +/* a bit like units.h but not binary */ +static const ScaleEntry scales[] = { + { "k", 1000 }, + { "m", 1000 * 1000 }, + { "g", 1000 * 1000 * 1000 }, +}; + QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { + bool ipq_set = false; + for (int i = 0; i < argc; i++) { char *opt = argv[i]; g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); if (g_strcmp0(tokens[0], "ips") == 0) { - max_insn_per_second = g_ascii_strtoull(tokens[1], NULL, 10); + char *endptr = NULL; + max_insn_per_second = g_ascii_strtoull(tokens[1], &endptr, 10); if (!max_insn_per_second && errno) { fprintf(stderr, "%s: couldn't parse %s (%s)\n", __func__, tokens[1], g_strerror(errno)); return -1; } + + if (endptr && *endptr != 0) { + g_autofree gchar *lower = g_utf8_strdown(endptr, -1); + unsigned long scale = 0; + + for (int j = 0; j < G_N_ELEMENTS(scales); j++) { + if (g_strcmp0(lower, scales[j].suffix) == 0) { + scale = scales[j].multipler; + break; + } + } + + if (scale) { + max_insn_per_second *= scale; + } else { + fprintf(stderr, "bad suffix: %s\n", endptr); + return -1; + } + } + } else if (g_strcmp0(tokens[0], "ipq") == 0) { + max_insn_per_quantum = g_ascii_strtoull(tokens[1], NULL, 10); + + if (!max_insn_per_quantum) { + fprintf(stderr, "bad ipq value: %s\n", tokens[0]); + return -1; + } + ipq_set = true; } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; @@ -150,7 +192,16 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } vcpus = qemu_plugin_scoreboard_new(sizeof(vCPUTime)); - max_insn_per_quantum = max_insn_per_second / NUM_TIME_UPDATE_PER_SEC; + + if (!ipq_set) { + max_insn_per_quantum = max_insn_per_second / NUM_TIME_UPDATE_PER_SEC; + } + + if (max_insn_per_quantum == 0) { + fprintf(stderr, "minimum of %d instructions per second needed\n", + NUM_TIME_UPDATE_PER_SEC); + return -1; + } time_handle = qemu_plugin_request_time_control(); g_assert(time_handle); diff --git a/contrib/plugins/lockstep.c b/contrib/plugins/lockstep.c index 237543b..62981d4 100644 --- a/contrib/plugins/lockstep.c +++ b/contrib/plugins/lockstep.c @@ -14,7 +14,8 @@ * particular run may execute the exact same sequence of blocks. An * asynchronous event (for example X11 graphics update) may cause a * block to end early and a new partial block to start. This means - * serial only test cases are a better bet. -d nochain may also help. + * serial only test cases are a better bet. -d nochain may also help + * as well as -accel tcg,one-insn-per-tb=on * * This code is not thread safe! * @@ -57,7 +58,7 @@ typedef struct { /* The execution state we compare */ typedef struct { uint64_t pc; - unsigned long insn_count; + uint64_t insn_count; } ExecState; typedef struct { @@ -100,6 +101,31 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) plugin_cleanup(id); } +/* + * g_memdup has been deprecated in Glib since 2.68 and + * will complain about it if you try to use it. However until + * glib_req_ver for QEMU is bumped we make a copy of the glib-compat + * handler. + */ +static inline gpointer g_memdup2_qemu(gconstpointer mem, gsize byte_size) +{ +#if GLIB_CHECK_VERSION(2, 68, 0) + return g_memdup2(mem, byte_size); +#else + gpointer new_mem; + + if (mem && byte_size != 0) { + new_mem = g_malloc(byte_size); + memcpy(new_mem, mem, byte_size); + } else { + new_mem = NULL; + } + + return new_mem; +#endif +} +#define g_memdup2(m, s) g_memdup2_qemu(m, s) + static void report_divergance(ExecState *us, ExecState *them) { DivergeState divrec = { log, 0 }; @@ -134,10 +160,13 @@ static void report_divergance(ExecState *us, ExecState *them) /* Output short log entry of going out of sync... */ if (verbose || divrec.distance == 1 || diverged) { - g_string_printf(out, - "@ 0x%016" PRIx64 " vs 0x%016" PRIx64 + g_string_printf(out, "@ " + "0x%016" PRIx64 " (%" PRId64 ") vs " + "0x%016" PRIx64 " (%" PRId64 ")" " (%d/%d since last)\n", - us->pc, them->pc, g_slist_length(divergence_log), + us->pc, us->insn_count, + them->pc, them->insn_count, + g_slist_length(divergence_log), divrec.distance); qemu_plugin_outs(out->str); } @@ -146,10 +175,7 @@ static void report_divergance(ExecState *us, ExecState *them) int i; GSList *entry; - g_string_printf(out, - "Δ insn_count @ 0x%016" PRIx64 - " (%ld) vs 0x%016" PRIx64 " (%ld)\n", - us->pc, us->insn_count, them->pc, them->insn_count); + g_string_printf(out, "Δ too high, we have diverged, previous insns\n"); for (entry = log, i = 0; g_slist_next(entry) && i < 5; @@ -162,7 +188,7 @@ static void report_divergance(ExecState *us, ExecState *them) prev->insn_count); } qemu_plugin_outs(out->str); - qemu_plugin_outs("too much divergence... giving up."); + qemu_plugin_outs("giving up\n"); qemu_plugin_uninstall(our_id, plugin_cleanup); } } @@ -347,7 +373,7 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, return -1; } } else if (g_strcmp0(tokens[0], "sockpath") == 0) { - sock_path = tokens[1]; + sock_path = g_strdup(tokens[1]); } else { fprintf(stderr, "option parsing failed: %s\n", p); return -1; diff --git a/contrib/plugins/meson.build b/contrib/plugins/meson.build new file mode 100644 index 0000000..1876bc7 --- /dev/null +++ b/contrib/plugins/meson.build @@ -0,0 +1,30 @@ +contrib_plugins = ['bbv', 'cache', 'cflow', 'drcov', 'execlog', 'hotblocks', + 'hotpages', 'howvec', 'hwprofile', 'ips', 'stoptrigger'] +if host_os != 'windows' + # lockstep uses socket.h + contrib_plugins += 'lockstep' +endif + +t = [] +if get_option('plugins') + foreach i : contrib_plugins + if host_os == 'windows' + t += shared_module(i, files(i + '.c') + 'win32_linker.c', + include_directories: '../../include/qemu', + link_depends: [win32_qemu_plugin_api_lib], + link_args: win32_qemu_plugin_api_link_flags, + dependencies: glib) + else + t += shared_module(i, files(i + '.c'), + include_directories: '../../include/qemu', + dependencies: glib) + endif + endforeach +endif +if t.length() > 0 + alias_target('contrib-plugins', t) +else + run_target('contrib-plugins', command: [python, '-c', '']) +endif + +plugin_modules += t diff --git a/contrib/plugins/stoptrigger.c b/contrib/plugins/stoptrigger.c new file mode 100644 index 0000000..b3a6ed6 --- /dev/null +++ b/contrib/plugins/stoptrigger.c @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2024, Simon Hamelin <simon.hamelin@grenoble-inp.org> + * + * Stop execution once a given address is reached or if the + * count of executed instructions reached a specified limit + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include <assert.h> +#include <glib.h> +#include <inttypes.h> +#include <stdio.h> +#include <stdlib.h> + +#include <qemu-plugin.h> + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +/* Scoreboard to track executed instructions count */ +typedef struct { + uint64_t insn_count; + uint64_t current_pc; +} InstructionsCount; +static struct qemu_plugin_scoreboard *insn_count_sb; +static qemu_plugin_u64 insn_count; +static qemu_plugin_u64 current_pc; + +static uint64_t icount; +static int icount_exit_code; + +static bool exit_on_icount; +static bool exit_on_address; + +/* Map trigger addresses to exit code */ +static GHashTable *addrs_ht; + +typedef struct { + uint64_t exit_addr; + int exit_code; +} ExitInfo; + +static void exit_emulation(int return_code, char *message) +{ + qemu_plugin_outs(message); + g_free(message); + exit(return_code); +} + +static void exit_icount_reached(unsigned int cpu_index, void *udata) +{ + uint64_t insn_vaddr = qemu_plugin_u64_get(current_pc, cpu_index); + char *msg = g_strdup_printf("icount reached at 0x%" PRIx64 ", exiting\n", + insn_vaddr); + exit_emulation(icount_exit_code, msg); +} + +static void exit_address_reached(unsigned int cpu_index, void *udata) +{ + ExitInfo *ei = udata; + g_assert(ei); + char *msg = g_strdup_printf("0x%" PRIx64 " reached, exiting\n", ei->exit_addr); + exit_emulation(ei->exit_code, msg); +} + +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + size_t tb_n = qemu_plugin_tb_n_insns(tb); + for (size_t i = 0; i < tb_n; i++) { + struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); + uint64_t insn_vaddr = qemu_plugin_insn_vaddr(insn); + + if (exit_on_icount) { + /* Increment and check scoreboard for each instruction */ + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + insn, QEMU_PLUGIN_INLINE_STORE_U64, current_pc, insn_vaddr); + qemu_plugin_register_vcpu_insn_exec_cond_cb( + insn, exit_icount_reached, QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_COND_EQ, insn_count, icount + 1, NULL); + } + + if (exit_on_address) { + ExitInfo *ei = g_hash_table_lookup(addrs_ht, &insn_vaddr); + if (ei) { + /* Exit triggered by address */ + qemu_plugin_register_vcpu_insn_exec_cb( + insn, exit_address_reached, QEMU_PLUGIN_CB_NO_REGS, ei); + } + } + } +} + +static void plugin_exit(qemu_plugin_id_t id, void *p) +{ + g_hash_table_destroy(addrs_ht); + qemu_plugin_scoreboard_free(insn_count_sb); +} + +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, + const qemu_info_t *info, int argc, + char **argv) +{ + addrs_ht = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, g_free); + + insn_count_sb = qemu_plugin_scoreboard_new(sizeof(InstructionsCount)); + insn_count = qemu_plugin_scoreboard_u64_in_struct( + insn_count_sb, InstructionsCount, insn_count); + current_pc = qemu_plugin_scoreboard_u64_in_struct( + insn_count_sb, InstructionsCount, current_pc); + + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "icount") == 0) { + g_auto(GStrv) icount_tokens = g_strsplit(tokens[1], ":", 2); + icount = g_ascii_strtoull(icount_tokens[0], NULL, 0); + if (icount < 1 || g_strrstr(icount_tokens[0], "-") != NULL) { + fprintf(stderr, + "icount parsing failed: '%s' must be a positive " + "integer\n", + icount_tokens[0]); + return -1; + } + if (icount_tokens[1]) { + icount_exit_code = g_ascii_strtoull(icount_tokens[1], NULL, 0); + } + exit_on_icount = true; + } else if (g_strcmp0(tokens[0], "addr") == 0) { + g_auto(GStrv) addr_tokens = g_strsplit(tokens[1], ":", 2); + ExitInfo *ei = g_malloc(sizeof(ExitInfo)); + ei->exit_addr = g_ascii_strtoull(addr_tokens[0], NULL, 0); + ei->exit_code = 0; + if (addr_tokens[1]) { + ei->exit_code = g_ascii_strtoull(addr_tokens[1], NULL, 0); + } + g_hash_table_insert(addrs_ht, &ei->exit_addr, ei); + exit_on_address = true; + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } + + if (!exit_on_icount && !exit_on_address) { + fprintf(stderr, "'icount' or 'addr' argument missing\n"); + return -1; + } + + /* Register translation block and exit callbacks */ + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + + return 0; +} |