aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/translate-all.c
diff options
context:
space:
mode:
Diffstat (limited to 'accel/tcg/translate-all.c')
-rw-r--r--accel/tcg/translate-all.c80
1 files changed, 44 insertions, 36 deletions
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index fb9ebfa..8f17a91 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1991,7 +1991,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu_loop_exit_noexc(cpu);
}
-static void print_qht_statistics(struct qht_stats hst)
+static void print_qht_statistics(struct qht_stats hst, GString *buf)
{
uint32_t hgram_opts;
size_t hgram_bins;
@@ -2000,9 +2000,11 @@ static void print_qht_statistics(struct qht_stats hst)
if (!hst.head_buckets) {
return;
}
- qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
- hst.used_head_buckets, hst.head_buckets,
- (double)hst.used_head_buckets / hst.head_buckets * 100);
+ g_string_append_printf(buf, "TB hash buckets %zu/%zu "
+ "(%0.2f%% head buckets used)\n",
+ hst.used_head_buckets, hst.head_buckets,
+ (double)hst.used_head_buckets /
+ hst.head_buckets * 100);
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
@@ -2010,8 +2012,9 @@ static void print_qht_statistics(struct qht_stats hst)
hgram_opts |= QDIST_PR_NODECIMAL;
}
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
- qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
- qdist_avg(&hst.occupancy) * 100, hgram);
+ g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
+ "Histogram: %s\n",
+ qdist_avg(&hst.occupancy) * 100, hgram);
g_free(hgram);
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
@@ -2023,8 +2026,9 @@ static void print_qht_statistics(struct qht_stats hst)
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
}
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
- qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
- qdist_avg(&hst.chain), hgram);
+ g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
+ "Histogram: %s\n",
+ qdist_avg(&hst.chain), hgram);
g_free(hgram);
}
@@ -2061,7 +2065,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
return false;
}
-void dump_exec_info(void)
+void dump_exec_info(GString *buf)
{
struct tb_tree_stats tst = {};
struct qht_stats hst;
@@ -2070,44 +2074,48 @@ void dump_exec_info(void)
tcg_tb_foreach(tb_tree_stats_iter, &tst);
nb_tbs = tst.nb_tbs;
/* XXX: avoid using doubles ? */
- qemu_printf("Translation buffer state:\n");
+ g_string_append_printf(buf, "Translation buffer state:\n");
/*
* Report total code size including the padding and TB structs;
* otherwise users might think "-accel tcg,tb-size" is not honoured.
* For avg host size we use the precise numbers from tb_tree_stats though.
*/
- qemu_printf("gen code size %zu/%zu\n",
- tcg_code_size(), tcg_code_capacity());
- qemu_printf("TB count %zu\n", nb_tbs);
- qemu_printf("TB avg target size %zu max=%zu bytes\n",
- nb_tbs ? tst.target_size / nb_tbs : 0,
- tst.max_target_size);
- qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
- nb_tbs ? tst.host_size / nb_tbs : 0,
- tst.target_size ? (double)tst.host_size / tst.target_size : 0);
- qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
- nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
- qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
- tst.direct_jmp_count,
- nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
- tst.direct_jmp2_count,
- nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
+ g_string_append_printf(buf, "gen code size %zu/%zu\n",
+ tcg_code_size(), tcg_code_capacity());
+ g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
+ g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
+ nb_tbs ? tst.target_size / nb_tbs : 0,
+ tst.max_target_size);
+ g_string_append_printf(buf, "TB avg host size %zu bytes "
+ "(expansion ratio: %0.1f)\n",
+ nb_tbs ? tst.host_size / nb_tbs : 0,
+ tst.target_size ?
+ (double)tst.host_size / tst.target_size : 0);
+ g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
+ tst.cross_page,
+ nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
+ g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
+ "(2 jumps=%zu %zu%%)\n",
+ tst.direct_jmp_count,
+ nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
+ tst.direct_jmp2_count,
+ nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
qht_statistics_init(&tb_ctx.htable, &hst);
- print_qht_statistics(hst);
+ print_qht_statistics(hst, buf);
qht_statistics_destroy(&hst);
- qemu_printf("\nStatistics:\n");
- qemu_printf("TB flush count %u\n",
- qatomic_read(&tb_ctx.tb_flush_count));
- qemu_printf("TB invalidate count %u\n",
- qatomic_read(&tb_ctx.tb_phys_invalidate_count));
+ g_string_append_printf(buf, "\nStatistics:\n");
+ g_string_append_printf(buf, "TB flush count %u\n",
+ qatomic_read(&tb_ctx.tb_flush_count));
+ g_string_append_printf(buf, "TB invalidate count %u\n",
+ qatomic_read(&tb_ctx.tb_phys_invalidate_count));
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
- qemu_printf("TLB full flushes %zu\n", flush_full);
- qemu_printf("TLB partial flushes %zu\n", flush_part);
- qemu_printf("TLB elided flushes %zu\n", flush_elide);
- tcg_dump_info();
+ g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
+ g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
+ g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
+ tcg_dump_info(buf);
}
void dump_opcount_info(void)