|
@@ -58,6 +58,7 @@
|
|
#include "sysemu/tcg.h"
|
|
#include "sysemu/tcg.h"
|
|
#include "qapi/error.h"
|
|
#include "qapi/error.h"
|
|
#include "hw/core/tcg-cpu-ops.h"
|
|
#include "hw/core/tcg-cpu-ops.h"
|
|
|
|
+#include "tb-jmp-cache.h"
|
|
#include "tb-hash.h"
|
|
#include "tb-hash.h"
|
|
#include "tb-context.h"
|
|
#include "tb-context.h"
|
|
#include "internal.h"
|
|
#include "internal.h"
|
|
@@ -967,7 +968,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
|
}
|
|
}
|
|
|
|
|
|
CPU_FOREACH(cpu) {
|
|
CPU_FOREACH(cpu) {
|
|
- cpu_tb_jmp_cache_clear(cpu);
|
|
|
|
|
|
+ tcg_flush_jmp_cache(cpu);
|
|
}
|
|
}
|
|
|
|
|
|
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
|
|
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
|
|
@@ -1187,8 +1188,9 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
|
/* remove the TB from the hash list */
|
|
/* remove the TB from the hash list */
|
|
h = tb_jmp_cache_hash_func(tb->pc);
|
|
h = tb_jmp_cache_hash_func(tb->pc);
|
|
CPU_FOREACH(cpu) {
|
|
CPU_FOREACH(cpu) {
|
|
- if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
|
|
|
|
- qatomic_set(&cpu->tb_jmp_cache[h], NULL);
|
|
|
|
|
|
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
|
|
|
|
+ if (qatomic_read(&jc->array[h].tb) == tb) {
|
|
|
|
+ qatomic_set(&jc->array[h].tb, NULL);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2443,6 +2445,26 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
|
}
|
|
}
|
|
#endif /* CONFIG_USER_ONLY */
|
|
#endif /* CONFIG_USER_ONLY */
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Called by generic code at e.g. cpu reset after cpu creation,
|
|
|
|
+ * therefore we must be prepared to allocate the jump cache.
|
|
|
|
+ */
|
|
|
|
+void tcg_flush_jmp_cache(CPUState *cpu)
|
|
|
|
+{
|
|
|
|
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
|
|
|
|
+
|
|
|
|
+ if (likely(jc)) {
|
|
|
|
+ for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
|
|
|
|
+ qatomic_set(&jc->array[i].tb, NULL);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* This should happen once during realize, and thus never race. */
|
|
|
|
+ jc = g_new0(CPUJumpCache, 1);
|
|
|
|
+ jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
|
|
|
|
+ assert(jc == NULL);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
|
|
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
|
|
void tcg_flush_softmmu_tlb(CPUState *cs)
|
|
void tcg_flush_softmmu_tlb(CPUState *cs)
|
|
{
|
|
{
|