|
@@ -929,9 +929,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
|
|
|
e2);
|
|
|
env->eip = offset;
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
-#ifdef TARGET_X86_64
|
|
|
void helper_sysret(CPUX86State *env, int dflag)
|
|
|
{
|
|
|
int cpl, selector;
|
|
@@ -984,7 +982,7 @@ void helper_sysret(CPUX86State *env, int dflag)
|
|
|
DESC_W_MASK | DESC_A_MASK);
|
|
|
}
|
|
|
}
|
|
|
-#endif
|
|
|
+#endif /* TARGET_X86_64 */
|
|
|
|
|
|
/* real mode interrupt */
|
|
|
static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
|
|
@@ -1112,76 +1110,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
|
|
|
do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
|
|
|
}
|
|
|
|
|
|
-bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
|
-{
|
|
|
- X86CPU *cpu = X86_CPU(cs);
|
|
|
- CPUX86State *env = &cpu->env;
|
|
|
- int intno;
|
|
|
-
|
|
|
- interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
|
|
|
- if (!interrupt_request) {
|
|
|
- return false;
|
|
|
- }
|
|
|
-
|
|
|
- /* Don't process multiple interrupt requests in a single call.
|
|
|
- * This is required to make icount-driven execution deterministic.
|
|
|
- */
|
|
|
- switch (interrupt_request) {
|
|
|
-#if !defined(CONFIG_USER_ONLY)
|
|
|
- case CPU_INTERRUPT_POLL:
|
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
|
|
- apic_poll_irq(cpu->apic_state);
|
|
|
- break;
|
|
|
-#endif
|
|
|
- case CPU_INTERRUPT_SIPI:
|
|
|
- do_cpu_sipi(cpu);
|
|
|
- break;
|
|
|
- case CPU_INTERRUPT_SMI:
|
|
|
- cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
|
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
|
|
-#ifdef CONFIG_USER_ONLY
|
|
|
- cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
|
|
|
-#else
|
|
|
- do_smm_enter(cpu);
|
|
|
-#endif /* CONFIG_USER_ONLY */
|
|
|
- break;
|
|
|
- case CPU_INTERRUPT_NMI:
|
|
|
- cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
|
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
|
|
- env->hflags2 |= HF2_NMI_MASK;
|
|
|
- do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
|
|
|
- break;
|
|
|
- case CPU_INTERRUPT_MCE:
|
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
|
|
- do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
|
|
|
- break;
|
|
|
- case CPU_INTERRUPT_HARD:
|
|
|
- cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
|
|
|
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
|
|
|
- CPU_INTERRUPT_VIRQ);
|
|
|
- intno = cpu_get_pic_interrupt(env);
|
|
|
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
|
|
- "Servicing hardware INT=0x%02x\n", intno);
|
|
|
- do_interrupt_x86_hardirq(env, intno, 1);
|
|
|
- break;
|
|
|
-#if !defined(CONFIG_USER_ONLY)
|
|
|
- case CPU_INTERRUPT_VIRQ:
|
|
|
- cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
|
|
|
- intno = x86_ldl_phys(cs, env->vm_vmcb
|
|
|
- + offsetof(struct vmcb, control.int_vector));
|
|
|
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
|
|
- "Servicing virtual hardware INT=0x%02x\n", intno);
|
|
|
- do_interrupt_x86_hardirq(env, intno, 1);
|
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
|
|
- env->int_ctl &= ~V_IRQ_MASK;
|
|
|
- break;
|
|
|
-#endif
|
|
|
- }
|
|
|
-
|
|
|
- /* Ensure that no TB jump will be modified as the program flow was changed. */
|
|
|
- return true;
|
|
|
-}
|
|
|
-
|
|
|
void helper_lldt(CPUX86State *env, int selector)
|
|
|
{
|
|
|
SegmentCache *dt;
|