|
@@ -223,6 +223,111 @@ void force_sig_fault(int sig, int code, abi_ulong addr)
|
|
|
|
|
|
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
|
|
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
|
|
{
|
|
{
|
|
|
|
+ CPUArchState *env = thread_cpu->env_ptr;
|
|
|
|
+ CPUState *cpu = env_cpu(env);
|
|
|
|
+ TaskState *ts = cpu->opaque;
|
|
|
|
+ target_siginfo_t tinfo;
|
|
|
|
+ ucontext_t *uc = puc;
|
|
|
|
+ struct emulated_sigtable *k;
|
|
|
|
+ int guest_sig;
|
|
|
|
+ uintptr_t pc = 0;
|
|
|
|
+ bool sync_sig = false;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
|
|
|
|
+ * handling wrt signal blocking and unwinding.
|
|
|
|
+ */
|
|
|
|
+ if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
|
|
|
|
+ MMUAccessType access_type;
|
|
|
|
+ uintptr_t host_addr;
|
|
|
|
+ abi_ptr guest_addr;
|
|
|
|
+ bool is_write;
|
|
|
|
+
|
|
|
|
+ host_addr = (uintptr_t)info->si_addr;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Convert forcefully to guest address space: addresses outside
|
|
|
|
+ * reserved_va are still valid to report via SEGV_MAPERR.
|
|
|
|
+ */
|
|
|
|
+ guest_addr = h2g_nocheck(host_addr);
|
|
|
|
+
|
|
|
|
+ pc = host_signal_pc(uc);
|
|
|
|
+ is_write = host_signal_write(info, uc);
|
|
|
|
+ access_type = adjust_signal_pc(&pc, is_write);
|
|
|
|
+
|
|
|
|
+ if (host_sig == SIGSEGV) {
|
|
|
|
+ bool maperr = true;
|
|
|
|
+
|
|
|
|
+ if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
|
|
|
|
+ /* If this was a write to a TB protected page, restart. */
|
|
|
|
+ if (is_write &&
|
|
|
|
+ handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
|
|
|
|
+ pc, guest_addr)) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * With reserved_va, the whole address space is PROT_NONE,
|
|
|
|
+ * which means that we may get ACCERR when we want MAPERR.
|
|
|
|
+ */
|
|
|
|
+ if (page_get_flags(guest_addr) & PAGE_VALID) {
|
|
|
|
+ maperr = false;
|
|
|
|
+ } else {
|
|
|
|
+ info->si_code = SEGV_MAPERR;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
|
|
|
+ cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
|
|
|
|
+ } else {
|
|
|
|
+ sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
|
|
|
|
+ if (info->si_code == BUS_ADRALN) {
|
|
|
|
+ cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ sync_sig = true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Get the target signal number. */
|
|
|
|
+ guest_sig = host_to_target_signal(host_sig);
|
|
|
|
+ if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ trace_user_host_signal(cpu, host_sig, guest_sig);
|
|
|
|
+
|
|
|
|
+ host_to_target_siginfo_noswap(&tinfo, info);
|
|
|
|
+
|
|
|
|
+ k = &ts->sigtab[guest_sig - 1];
|
|
|
|
+ k->info = tinfo;
|
|
|
|
+ k->pending = guest_sig;
|
|
|
|
+ ts->signal_pending = 1;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * For synchronous signals, unwind the cpu state to the faulting
|
|
|
|
+ * insn and then exit back to the main loop so that the signal
|
|
|
|
+ * is delivered immediately.
|
|
|
|
+ */
|
|
|
|
+ if (sync_sig) {
|
|
|
|
+ cpu->exception_index = EXCP_INTERRUPT;
|
|
|
|
+ cpu_loop_exit_restore(cpu, pc);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ rewind_if_in_safe_syscall(puc);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Block host signals until target signal handler entered. We
|
|
|
|
+ * can't block SIGSEGV or SIGBUS while we're executing guest
|
|
|
|
+ * code in case the guest code provokes one in the window between
|
|
|
|
+ * now and it getting out to the main loop. Signals will be
|
|
|
|
+ * unblocked again in process_pending_signals().
|
|
|
|
+ */
|
|
|
|
+ sigfillset(&uc->uc_sigmask);
|
|
|
|
+ sigdelset(&uc->uc_sigmask, SIGSEGV);
|
|
|
|
+ sigdelset(&uc->uc_sigmask, SIGBUS);
|
|
|
|
+
|
|
|
|
+ /* Interrupt the virtual CPU as soon as possible. */
|
|
|
|
+ cpu_exit(thread_cpu);
|
|
}
|
|
}
|
|
|
|
|
|
void signal_init(void)
|
|
void signal_init(void)
|