Selaa lähdekoodia

accel/tcg: Add cpu_in_serial_context

Like cpu_in_exclusive_context, but also true if
there is no other cpu against which we could race.

Use it in tb_flush as a direct replacement.
Use it in cpu_loop_exit_atomic to ensure that there
is no loop against cpu_exec_step_atomic.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Richard Henderson 2 vuotta sitten
vanhempi
commit
9877ea05de
3 muutettua tiedostoa jossa 13 lisäystä ja 1 poistoa
  1. 3 0
      accel/tcg/cpu-exec-common.c
  2. 9 0
      accel/tcg/internal.h
  3. 1 1
      accel/tcg/tb-maint.c

+ 3 - 0
accel/tcg/cpu-exec-common.c

@@ -22,6 +22,7 @@
 #include "sysemu/tcg.h"
 #include "sysemu/tcg.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
 #include "qemu/plugin.h"
 #include "qemu/plugin.h"
+#include "internal.h"
 
 
 bool tcg_allowed;
 bool tcg_allowed;
 
 
@@ -81,6 +82,8 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
 
 
 void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
 void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
 {
 {
+    /* Prevent looping if already executing in a serial context. */
+    g_assert(!cpu_in_serial_context(cpu));
     cpu->exception_index = EXCP_ATOMIC;
     cpu->exception_index = EXCP_ATOMIC;
     cpu_loop_exit_restore(cpu, pc);
     cpu_loop_exit_restore(cpu, pc);
 }
 }

+ 9 - 0
accel/tcg/internal.h

@@ -64,6 +64,15 @@ static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
     }
     }
 }
 }
 
 
+/*
+ * Return true if CS is not running in parallel with other cpus, either
+ * because there are no other cpus or we are within an exclusive context.
+ */
+static inline bool cpu_in_serial_context(CPUState *cs)
+{
+    return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
+}
+
 extern int64_t max_delay;
 extern int64_t max_delay;
 extern int64_t max_advance;
 extern int64_t max_advance;
 
 

+ 1 - 1
accel/tcg/tb-maint.c

@@ -760,7 +760,7 @@ void tb_flush(CPUState *cpu)
     if (tcg_enabled()) {
     if (tcg_enabled()) {
         unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
         unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
 
 
-        if (cpu_in_exclusive_context(cpu)) {
+        if (cpu_in_serial_context(cpu)) {
             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
         } else {
         } else {
             async_safe_run_on_cpu(cpu, do_tb_flush,
             async_safe_run_on_cpu(cpu, do_tb_flush,