|
@@ -250,8 +250,8 @@ static void cpu_update_icount_locked(CPUState *cpu)
|
|
|
int64_t executed = cpu_get_icount_executed(cpu);
|
|
|
cpu->icount_budget -= executed;
|
|
|
|
|
|
- atomic_set__nocheck(&timers_state.qemu_icount,
|
|
|
- timers_state.qemu_icount + executed);
|
|
|
+ atomic_set_i64(&timers_state.qemu_icount,
|
|
|
+ timers_state.qemu_icount + executed);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -280,8 +280,8 @@ static int64_t cpu_get_icount_raw_locked(void)
|
|
|
/* Take into account what has run */
|
|
|
cpu_update_icount_locked(cpu);
|
|
|
}
|
|
|
- /* The read is protected by the seqlock, so __nocheck is okay. */
|
|
|
- return atomic_read__nocheck(&timers_state.qemu_icount);
|
|
|
+ /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
|
|
|
+ return atomic_read_i64(&timers_state.qemu_icount);
|
|
|
}
|
|
|
|
|
|
static int64_t cpu_get_icount_locked(void)
|