|
@@ -1041,12 +1041,15 @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
|
|
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
|
|
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
|
|
__attribute__((aligned(CODE_GEN_ALIGN)));
|
|
__attribute__((aligned(CODE_GEN_ALIGN)));
|
|
|
|
|
|
-static inline void *alloc_code_gen_buffer(void)
|
|
|
|
|
|
+static inline void *alloc_code_gen_buffer(bool no_rwx_pages)
|
|
{
|
|
{
|
|
void *buf = static_code_gen_buffer;
|
|
void *buf = static_code_gen_buffer;
|
|
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
|
|
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
|
|
size_t size;
|
|
size_t size;
|
|
|
|
|
|
|
|
+ /* not applicable */
|
|
|
|
+ assert(!no_rwx_pages);
|
|
|
|
+
|
|
/* page-align the beginning and end of the buffer */
|
|
/* page-align the beginning and end of the buffer */
|
|
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
|
|
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
|
|
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
|
|
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
|
|
@@ -1075,24 +1078,32 @@ static inline void *alloc_code_gen_buffer(void)
|
|
return buf;
|
|
return buf;
|
|
}
|
|
}
|
|
#elif defined(_WIN32)
|
|
#elif defined(_WIN32)
|
|
-static inline void *alloc_code_gen_buffer(void)
|
|
|
|
|
|
+static inline void *alloc_code_gen_buffer(bool no_rwx_pages)
|
|
{
|
|
{
|
|
size_t size = tcg_ctx->code_gen_buffer_size;
|
|
size_t size = tcg_ctx->code_gen_buffer_size;
|
|
|
|
+ assert(!no_rwx_pages); // not applicable
|
|
return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
|
|
return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
|
|
PAGE_EXECUTE_READWRITE);
|
|
PAGE_EXECUTE_READWRITE);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
-static inline void *alloc_code_gen_buffer(void)
|
|
|
|
|
|
+static inline void *alloc_code_gen_buffer(bool no_rwx_pages)
|
|
{
|
|
{
|
|
-#if defined(CONFIG_IOS_JIT)
|
|
|
|
int prot = PROT_READ | PROT_EXEC;
|
|
int prot = PROT_READ | PROT_EXEC;
|
|
-#else
|
|
|
|
- int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
|
|
|
|
-#endif
|
|
|
|
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
size_t size = tcg_ctx->code_gen_buffer_size;
|
|
size_t size = tcg_ctx->code_gen_buffer_size;
|
|
void *buf;
|
|
void *buf;
|
|
|
|
|
|
|
|
+#if defined(CONFIG_DARWIN) // both iOS and macOS (Apple Silicon) applicable
|
|
|
|
+ if (!no_rwx_pages) {
|
|
|
|
+ prot |= PROT_WRITE;
|
|
|
|
+ flags |= MAP_JIT;
|
|
|
|
+ }
|
|
|
|
+#else
|
|
|
|
+ /* not applicable */
|
|
|
|
+ assert(!no_rwx_pages);
|
|
|
|
+ prot |= PROT_WRITE;
|
|
|
|
+#endif
|
|
|
|
+
|
|
buf = mmap(NULL, size, prot, flags, -1, 0);
|
|
buf = mmap(NULL, size, prot, flags, -1, 0);
|
|
if (buf == MAP_FAILED) {
|
|
if (buf == MAP_FAILED) {
|
|
return NULL;
|
|
return NULL;
|
|
@@ -1172,10 +1183,10 @@ static inline void *alloc_jit_rw_mirror(void *base, size_t size)
|
|
}
|
|
}
|
|
#endif /* CONFIG_IOS_JIT */
|
|
#endif /* CONFIG_IOS_JIT */
|
|
|
|
|
|
-static inline void code_gen_alloc(size_t tb_size)
|
|
|
|
|
|
+static inline void code_gen_alloc(size_t tb_size, bool mirror_rwx)
|
|
{
|
|
{
|
|
tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
|
|
tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
|
|
- tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
|
|
|
|
|
|
+ tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(mirror_rwx);
|
|
if (tcg_ctx->code_gen_buffer == NULL) {
|
|
if (tcg_ctx->code_gen_buffer == NULL) {
|
|
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
|
|
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
|
|
exit(1);
|
|
exit(1);
|
|
@@ -1183,13 +1194,18 @@ static inline void code_gen_alloc(size_t tb_size)
|
|
#if defined(CONFIG_IOS_JIT)
|
|
#if defined(CONFIG_IOS_JIT)
|
|
void *mirror;
|
|
void *mirror;
|
|
|
|
|
|
- /* For iOS JIT we need a mirror mapping for code execution */
|
|
|
|
- mirror = alloc_jit_rw_mirror(tcg_ctx->code_gen_buffer,
|
|
|
|
- tcg_ctx->code_gen_buffer_size
|
|
|
|
- );
|
|
|
|
- if (mirror == NULL) {
|
|
|
|
- fprintf(stderr, "Could not remap code buffer mirror\n");
|
|
|
|
- exit(1);
|
|
|
|
|
|
+ if (mirror_rwx) {
|
|
|
|
+ /* For iOS JIT we need a mirror mapping for code execution */
|
|
|
|
+ mirror = alloc_jit_rw_mirror(tcg_ctx->code_gen_buffer,
|
|
|
|
+ tcg_ctx->code_gen_buffer_size
|
|
|
|
+ );
|
|
|
|
+ if (mirror == NULL) {
|
|
|
|
+ fprintf(stderr, "Could not remap code buffer mirror\n");
|
|
|
|
+ exit(1);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* If we have JIT entitlements */
|
|
|
|
+ mirror = tcg_ctx->code_gen_buffer;
|
|
}
|
|
}
|
|
tcg_ctx->code_rw_mirror_diff = mirror - tcg_ctx->code_gen_buffer;
|
|
tcg_ctx->code_rw_mirror_diff = mirror - tcg_ctx->code_gen_buffer;
|
|
#endif /* CONFIG_IOS_JIT */
|
|
#endif /* CONFIG_IOS_JIT */
|
|
@@ -1218,14 +1234,14 @@ static void tb_htable_init(void)
|
|
|
|
|
|
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
|
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
|
(in bytes) allocated to the translation buffer. Zero means default
|
|
(in bytes) allocated to the translation buffer. Zero means default
|
|
- size. */
|
|
|
|
-void tcg_exec_init(unsigned long tb_size)
|
|
|
|
|
|
+ size. mirror_rwx only applicable on iOS. */
|
|
|
|
+void tcg_exec_init(unsigned long tb_size, bool mirror_rwx)
|
|
{
|
|
{
|
|
tcg_allowed = true;
|
|
tcg_allowed = true;
|
|
cpu_gen_init();
|
|
cpu_gen_init();
|
|
page_init();
|
|
page_init();
|
|
tb_htable_init();
|
|
tb_htable_init();
|
|
- code_gen_alloc(tb_size);
|
|
|
|
|
|
+ code_gen_alloc(tb_size, mirror_rwx);
|
|
#if defined(CONFIG_SOFTMMU)
|
|
#if defined(CONFIG_SOFTMMU)
|
|
/* There's no guest base to take into account, so go ahead and
|
|
/* There's no guest base to take into account, so go ahead and
|
|
initialize the prologue now. */
|
|
initialize the prologue now. */
|