|
@@ -606,15 +606,13 @@ static inline void *alloc_code_gen_buffer(void)
|
|
|
{
|
|
|
void *buf = static_code_gen_buffer;
|
|
|
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
|
|
|
- size_t full_size, size;
|
|
|
+ size_t size;
|
|
|
|
|
|
/* page-align the beginning and end of the buffer */
|
|
|
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
|
|
|
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
|
|
|
|
|
|
- /* Reserve a guard page. */
|
|
|
- full_size = end - buf;
|
|
|
- size = full_size - qemu_real_host_page_size;
|
|
|
+ size = end - buf;
|
|
|
|
|
|
/* Honor a command-line option limiting the size of the buffer. */
|
|
|
if (size > tcg_ctx->code_gen_buffer_size) {
|
|
@@ -633,9 +631,6 @@ static inline void *alloc_code_gen_buffer(void)
|
|
|
if (qemu_mprotect_rwx(buf, size)) {
|
|
|
abort();
|
|
|
}
|
|
|
- if (qemu_mprotect_none(buf + size, qemu_real_host_page_size)) {
|
|
|
- abort();
|
|
|
- }
|
|
|
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
|
|
|
|
|
|
return buf;
|
|
@@ -644,22 +639,16 @@ static inline void *alloc_code_gen_buffer(void)
|
|
|
static inline void *alloc_code_gen_buffer(void)
|
|
|
{
|
|
|
size_t size = tcg_ctx->code_gen_buffer_size;
|
|
|
- void *buf1, *buf2;
|
|
|
-
|
|
|
- /* Perform the allocation in two steps, so that the guard page
|
|
|
- is reserved but uncommitted. */
|
|
|
- buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
|
|
|
- MEM_RESERVE, PAGE_NOACCESS);
|
|
|
- if (buf1 != NULL) {
|
|
|
- buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
|
|
|
- assert(buf1 == buf2);
|
|
|
- }
|
|
|
+ void *buf;
|
|
|
|
|
|
- return buf1;
|
|
|
+ buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
|
|
|
+ PAGE_EXECUTE_READWRITE);
|
|
|
+ return buf;
|
|
|
}
|
|
|
#else
|
|
|
static inline void *alloc_code_gen_buffer(void)
|
|
|
{
|
|
|
+ int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
|
|
|
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
|
uintptr_t start = 0;
|
|
|
size_t size = tcg_ctx->code_gen_buffer_size;
|
|
@@ -693,8 +682,7 @@ static inline void *alloc_code_gen_buffer(void)
|
|
|
# endif
|
|
|
# endif
|
|
|
|
|
|
- buf = mmap((void *)start, size + qemu_real_host_page_size,
|
|
|
- PROT_NONE, flags, -1, 0);
|
|
|
+ buf = mmap((void *)start, size, prot, flags, -1, 0);
|
|
|
if (buf == MAP_FAILED) {
|
|
|
return NULL;
|
|
|
}
|
|
@@ -704,24 +692,23 @@ static inline void *alloc_code_gen_buffer(void)
|
|
|
/* Try again, with the original still mapped, to avoid re-acquiring
|
|
|
that 256mb crossing. This time don't specify an address. */
|
|
|
size_t size2;
|
|
|
- void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
|
|
|
- PROT_NONE, flags, -1, 0);
|
|
|
+ void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
|
|
|
switch ((int)(buf2 != MAP_FAILED)) {
|
|
|
case 1:
|
|
|
if (!cross_256mb(buf2, size)) {
|
|
|
/* Success! Use the new buffer. */
|
|
|
- munmap(buf, size + qemu_real_host_page_size);
|
|
|
+ munmap(buf, size);
|
|
|
break;
|
|
|
}
|
|
|
/* Failure. Work with what we had. */
|
|
|
- munmap(buf2, size + qemu_real_host_page_size);
|
|
|
+ munmap(buf2, size);
|
|
|
/* fallthru */
|
|
|
default:
|
|
|
/* Split the original buffer. Free the smaller half. */
|
|
|
buf2 = split_cross_256mb(buf, size);
|
|
|
size2 = tcg_ctx->code_gen_buffer_size;
|
|
|
if (buf == buf2) {
|
|
|
- munmap(buf + size2 + qemu_real_host_page_size, size - size2);
|
|
|
+ munmap(buf + size2, size - size2);
|
|
|
} else {
|
|
|
munmap(buf, size - size2);
|
|
|
}
|
|
@@ -732,10 +719,6 @@ static inline void *alloc_code_gen_buffer(void)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- /* Make the final buffer accessible. The guard page at the end
|
|
|
- will remain inaccessible with PROT_NONE. */
|
|
|
- mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
|
|
|
-
|
|
|
/* Request large pages for the buffer. */
|
|
|
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
|
|
|
|
|
@@ -916,13 +899,8 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
|
|
size_t host_size = 0;
|
|
|
|
|
|
g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
|
|
|
- printf("qemu: flush code_size=%td nb_tbs=%zu avg_tb_size=%zu\n",
|
|
|
- tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer, nb_tbs,
|
|
|
- nb_tbs > 0 ? host_size / nb_tbs : 0);
|
|
|
- }
|
|
|
- if ((unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer)
|
|
|
- > tcg_ctx->code_gen_buffer_size) {
|
|
|
- cpu_abort(cpu, "Internal error: code buffer overflow\n");
|
|
|
+ printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
|
|
|
+ tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
|
|
|
}
|
|
|
|
|
|
CPU_FOREACH(cpu) {
|
|
@@ -936,7 +914,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
|
|
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
|
|
|
page_flush_tb();
|
|
|
|
|
|
- tcg_ctx->code_gen_ptr = tcg_ctx->code_gen_buffer;
|
|
|
+ tcg_region_reset_all();
|
|
|
/* XXX: flush processor icache at this point if cache flush is
|
|
|
expensive */
|
|
|
atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
|
@@ -1274,9 +1252,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|
|
|
|
|
phys_pc = get_page_addr_code(env, pc);
|
|
|
|
|
|
+ buffer_overflow:
|
|
|
tb = tb_alloc(pc);
|
|
|
if (unlikely(!tb)) {
|
|
|
- buffer_overflow:
|
|
|
/* flush must be done */
|
|
|
tb_flush(cpu);
|
|
|
mmap_unlock();
|
|
@@ -1380,9 +1358,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- tcg_ctx->code_gen_ptr = (void *)
|
|
|
+ atomic_set(&tcg_ctx->code_gen_ptr, (void *)
|
|
|
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
|
|
|
- CODE_GEN_ALIGN);
|
|
|
+ CODE_GEN_ALIGN));
|
|
|
|
|
|
/* init jump list */
|
|
|
assert(((uintptr_t)tb & 3) == 0);
|
|
@@ -1908,9 +1886,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
|
|
|
* otherwise users might think "-tb-size" is not honoured.
|
|
|
* For avg host size we use the precise numbers from tb_tree_stats though.
|
|
|
*/
|
|
|
- cpu_fprintf(f, "gen code size %td/%zd\n",
|
|
|
- tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer,
|
|
|
- tcg_ctx->code_gen_highwater - tcg_ctx->code_gen_buffer);
|
|
|
+ cpu_fprintf(f, "gen code size %zu/%zu\n",
|
|
|
+ tcg_code_size(), tcg_code_capacity());
|
|
|
cpu_fprintf(f, "TB count %zu\n", nb_tbs);
|
|
|
cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
|
|
|
nb_tbs ? tst.target_size / nb_tbs : 0,
|