diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-03-10 12:34:27 -0600 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2021-06-13 17:42:03 -0700 |
commit | 032a4b1ba09ab15bb9331a75d49db186e782c00c (patch) | |
tree | bc8f15a896f0093c74babca6f4ec0a6dfdf0a968 /tcg/region.c | |
parent | 47d590df34b22595f1a6f9e8aafe5531cd2e4b13 (diff) | |
download | qemu-032a4b1ba09ab15bb9331a75d49db186e782c00c.zip qemu-032a4b1ba09ab15bb9331a75d49db186e782c00c.tar.gz qemu-032a4b1ba09ab15bb9331a75d49db186e782c00c.tar.bz2 |
tcg: Allocate code_gen_buffer into struct tcg_region_state
Do not mess around with setting values within tcg_init_ctx.
Put the values into 'region' directly, which is where they
will live for the lifetime of the program.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/region.c')
-rw-r--r-- | tcg/region.c | 64 |
1 files changed, 27 insertions, 37 deletions
diff --git a/tcg/region.c b/tcg/region.c index 5beba41..afa11ec 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -70,13 +70,12 @@ static size_t tree_size; bool in_code_gen_buffer(const void *p) { - const TCGContext *s = &tcg_init_ctx; /* * Much like it is valid to have a pointer to the byte past the * end of an array (so long as you don't dereference it), allow * a pointer to the byte past the end of the code gen buffer. */ - return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size; + return (size_t)(p - region.start_aligned) <= region.total_size; } #ifdef CONFIG_DEBUG_TCG @@ -562,8 +561,8 @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) } qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); - tcg_ctx->code_gen_buffer = buf; - tcg_ctx->code_gen_buffer_size = size; + region.start_aligned = buf; + region.total_size = size; return true; } #elif defined(_WIN32) @@ -584,8 +583,8 @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) return false; } - tcg_ctx->code_gen_buffer = buf; - tcg_ctx->code_gen_buffer_size = size; + region.start_aligned = buf; + region.total_size = size; return true; } #else @@ -637,8 +636,8 @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot, /* Request large pages for the buffer. */ qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); - tcg_ctx->code_gen_buffer = buf; - tcg_ctx->code_gen_buffer_size = size; + region.start_aligned = buf; + region.total_size = size; return true; } @@ -659,8 +658,8 @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) return false; } /* The size of the mapping may have been adjusted. */ - size = tcg_ctx->code_gen_buffer_size; - buf_rx = tcg_ctx->code_gen_buffer; + buf_rx = region.start_aligned; + size = region.total_size; #endif buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); @@ -682,8 +681,8 @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) #endif close(fd); - tcg_ctx->code_gen_buffer = buf_rw; - tcg_ctx->code_gen_buffer_size = size; + region.start_aligned = buf_rw; + region.total_size = size; tcg_splitwx_diff = buf_rx - buf_rw; /* Request large pages for the buffer and the splitwx. */ @@ -734,7 +733,7 @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) return false; } - buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer; + buf_rw = (mach_vm_address_t)region.start_aligned; buf_rx = 0; ret = mach_vm_remap(mach_task_self(), &buf_rx, @@ -846,11 +845,8 @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) */ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) { - void *buf, *aligned, *end; - size_t total_size; size_t page_size; size_t region_size; - size_t n_regions; size_t i; bool ok; @@ -858,39 +854,33 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) splitwx, &error_fatal); assert(ok); - buf = tcg_init_ctx.code_gen_buffer; - total_size = tcg_init_ctx.code_gen_buffer_size; - page_size = qemu_real_host_page_size; - n_regions = tcg_n_regions(total_size, max_cpus); - - /* The first region will be 'aligned - buf' bytes larger than the others */ - aligned = QEMU_ALIGN_PTR_UP(buf, page_size); - g_assert(aligned < tcg_init_ctx.code_gen_buffer + total_size); - /* * Make region_size a multiple of page_size, using aligned as the start. * As a result of this we might end up with a few extra pages at the end of * the buffer; we will assign those to the last region. */ - region_size = (total_size - (aligned - buf)) / n_regions; + region.n = tcg_n_regions(region.total_size, max_cpus); + page_size = qemu_real_host_page_size; + region_size = region.total_size / region.n; region_size = QEMU_ALIGN_DOWN(region_size, page_size); /* A region must have at least 2 pages; one code, one guard */ g_assert(region_size >= 2 * page_size); + region.stride = region_size; + + /* Reserve space for guard pages. */ + region.size = region_size - page_size; + region.total_size -= page_size; + + /* + * The first region will be smaller than the others, via the prologue, + * which has yet to be allocated. For now, the first region begins at + * the page boundary. + */ + region.after_prologue = region.start_aligned; /* init the region struct */ qemu_mutex_init(®ion.lock); - region.n = n_regions; - region.size = region_size - page_size; - region.stride = region_size; - region.after_prologue = buf; - region.start_aligned = aligned; - /* page-align the end, since its last page will be a guard page */ - end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size); - /* account for that last guard page */ - end -= page_size; - total_size = end - aligned; - region.total_size = total_size; /* * Set guard pages in the rw buffer, as that's the one into which |