|
@@ -61,6 +61,7 @@
|
|
#include "vmx.h"
|
|
#include "vmx.h"
|
|
#include "x86.h"
|
|
#include "x86.h"
|
|
#include "x86_descr.h"
|
|
#include "x86_descr.h"
|
|
|
|
+#include "x86_flags.h"
|
|
#include "x86_mmu.h"
|
|
#include "x86_mmu.h"
|
|
#include "x86_decode.h"
|
|
#include "x86_decode.h"
|
|
#include "x86_emu.h"
|
|
#include "x86_emu.h"
|
|
@@ -103,7 +104,7 @@ static void update_apic_tpr(CPUState *cpu)
|
|
|
|
|
|
#define VECTORING_INFO_VECTOR_MASK 0xff
|
|
#define VECTORING_INFO_VECTOR_MASK 0xff
|
|
|
|
|
|
-void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
|
|
|
|
|
|
+void hvf_handle_io(CPUState *env, uint16_t port, void *buffer,
|
|
int direction, int size, int count)
|
|
int direction, int size, int count)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
@@ -434,6 +435,264 @@ static void hvf_cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void hvf_load_regs(CPUState *cs)
|
|
|
|
+{
|
|
|
|
+ X86CPU *cpu = X86_CPU(cs);
|
|
|
|
+ CPUX86State *env = &cpu->env;
|
|
|
|
+
|
|
|
|
+ int i = 0;
|
|
|
|
+ RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX);
|
|
|
|
+ RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX);
|
|
|
|
+ RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX);
|
|
|
|
+ RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX);
|
|
|
|
+ RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI);
|
|
|
|
+ RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI);
|
|
|
|
+ RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP);
|
|
|
|
+ RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP);
|
|
|
|
+ for (i = 8; i < 16; i++) {
|
|
|
|
+ RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
|
|
|
|
+ rflags_to_lflags(env);
|
|
|
|
+ env->eip = rreg(cs->accel->fd, HV_X86_RIP);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void hvf_store_regs(CPUState *cs)
|
|
|
|
+{
|
|
|
|
+ X86CPU *cpu = X86_CPU(cs);
|
|
|
|
+ CPUX86State *env = &cpu->env;
|
|
|
|
+
|
|
|
|
+ int i = 0;
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RAX, RAX(env));
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RBX, RBX(env));
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RCX, RCX(env));
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RDX, RDX(env));
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RSI, RSI(env));
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RDI, RDI(env));
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RBP, RBP(env));
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RSP, RSP(env));
|
|
|
|
+ for (i = 8; i < 16; i++) {
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ lflags_to_rflags(env);
|
|
|
|
+ wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags);
|
|
|
|
+ macvm_set_rip(cs, env->eip);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void hvf_simulate_rdmsr(CPUX86State *env)
|
|
|
|
+{
|
|
|
|
+ X86CPU *cpu = env_archcpu(env);
|
|
|
|
+ CPUState *cs = env_cpu(env);
|
|
|
|
+ uint32_t msr = ECX(env);
|
|
|
|
+ uint64_t val = 0;
|
|
|
|
+
|
|
|
|
+ switch (msr) {
|
|
|
|
+ case MSR_IA32_TSC:
|
|
|
|
+ val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_IA32_APICBASE:
|
|
|
|
+ val = cpu_get_apic_base(cpu->apic_state);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_APIC_START ... MSR_APIC_END: {
|
|
|
|
+ int ret;
|
|
|
|
+ int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
|
|
|
|
+
|
|
|
|
+ ret = apic_msr_read(index, &val);
|
|
|
|
+ if (ret < 0) {
|
|
|
|
+ x86_emul_raise_exception(env, EXCP0D_GPF, 0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case MSR_IA32_UCODE_REV:
|
|
|
|
+ val = cpu->ucode_rev;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_EFER:
|
|
|
|
+ val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_FSBASE:
|
|
|
|
+ val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_GSBASE:
|
|
|
|
+ val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_KERNELGSBASE:
|
|
|
|
+ val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_STAR:
|
|
|
|
+ abort();
|
|
|
|
+ break;
|
|
|
|
+ case MSR_LSTAR:
|
|
|
|
+ abort();
|
|
|
|
+ break;
|
|
|
|
+ case MSR_CSTAR:
|
|
|
|
+ abort();
|
|
|
|
+ break;
|
|
|
|
+ case MSR_IA32_MISC_ENABLE:
|
|
|
|
+ val = env->msr_ia32_misc_enable;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRphysBase(0):
|
|
|
|
+ case MSR_MTRRphysBase(1):
|
|
|
|
+ case MSR_MTRRphysBase(2):
|
|
|
|
+ case MSR_MTRRphysBase(3):
|
|
|
|
+ case MSR_MTRRphysBase(4):
|
|
|
|
+ case MSR_MTRRphysBase(5):
|
|
|
|
+ case MSR_MTRRphysBase(6):
|
|
|
|
+ case MSR_MTRRphysBase(7):
|
|
|
|
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRphysMask(0):
|
|
|
|
+ case MSR_MTRRphysMask(1):
|
|
|
|
+ case MSR_MTRRphysMask(2):
|
|
|
|
+ case MSR_MTRRphysMask(3):
|
|
|
|
+ case MSR_MTRRphysMask(4):
|
|
|
|
+ case MSR_MTRRphysMask(5):
|
|
|
|
+ case MSR_MTRRphysMask(6):
|
|
|
|
+ case MSR_MTRRphysMask(7):
|
|
|
|
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRfix64K_00000:
|
|
|
|
+ val = env->mtrr_fixed[0];
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRfix16K_80000:
|
|
|
|
+ case MSR_MTRRfix16K_A0000:
|
|
|
|
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRfix4K_C0000:
|
|
|
|
+ case MSR_MTRRfix4K_C8000:
|
|
|
|
+ case MSR_MTRRfix4K_D0000:
|
|
|
|
+ case MSR_MTRRfix4K_D8000:
|
|
|
|
+ case MSR_MTRRfix4K_E0000:
|
|
|
|
+ case MSR_MTRRfix4K_E8000:
|
|
|
|
+ case MSR_MTRRfix4K_F0000:
|
|
|
|
+ case MSR_MTRRfix4K_F8000:
|
|
|
|
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRdefType:
|
|
|
|
+ val = env->mtrr_deftype;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_CORE_THREAD_COUNT:
|
|
|
|
+ val = cpu_x86_get_msr_core_thread_count(cpu);
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
|
|
|
|
+ val = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ RAX(env) = (uint32_t)val;
|
|
|
|
+ RDX(env) = (uint32_t)(val >> 32);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void hvf_simulate_wrmsr(CPUX86State *env)
|
|
|
|
+{
|
|
|
|
+ X86CPU *cpu = env_archcpu(env);
|
|
|
|
+ CPUState *cs = env_cpu(env);
|
|
|
|
+ uint32_t msr = ECX(env);
|
|
|
|
+ uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
|
|
|
|
+
|
|
|
|
+ switch (msr) {
|
|
|
|
+ case MSR_IA32_TSC:
|
|
|
|
+ break;
|
|
|
|
+ case MSR_IA32_APICBASE: {
|
|
|
|
+ int r;
|
|
|
|
+
|
|
|
|
+ r = cpu_set_apic_base(cpu->apic_state, data);
|
|
|
|
+ if (r < 0) {
|
|
|
|
+ x86_emul_raise_exception(env, EXCP0D_GPF, 0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case MSR_APIC_START ... MSR_APIC_END: {
|
|
|
|
+ int ret;
|
|
|
|
+ int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
|
|
|
|
+
|
|
|
|
+ ret = apic_msr_write(index, data);
|
|
|
|
+ if (ret < 0) {
|
|
|
|
+ x86_emul_raise_exception(env, EXCP0D_GPF, 0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case MSR_FSBASE:
|
|
|
|
+ wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_GSBASE:
|
|
|
|
+ wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_KERNELGSBASE:
|
|
|
|
+ wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data);
|
|
|
|
+ break;
|
|
|
|
+ case MSR_STAR:
|
|
|
|
+ abort();
|
|
|
|
+ break;
|
|
|
|
+ case MSR_LSTAR:
|
|
|
|
+ abort();
|
|
|
|
+ break;
|
|
|
|
+ case MSR_CSTAR:
|
|
|
|
+ abort();
|
|
|
|
+ break;
|
|
|
|
+ case MSR_EFER:
|
|
|
|
+ /*printf("new efer %llx\n", EFER(cs));*/
|
|
|
|
+ wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data);
|
|
|
|
+ if (data & MSR_EFER_NXE) {
|
|
|
|
+ hv_vcpu_invalidate_tlb(cs->accel->fd);
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRphysBase(0):
|
|
|
|
+ case MSR_MTRRphysBase(1):
|
|
|
|
+ case MSR_MTRRphysBase(2):
|
|
|
|
+ case MSR_MTRRphysBase(3):
|
|
|
|
+ case MSR_MTRRphysBase(4):
|
|
|
|
+ case MSR_MTRRphysBase(5):
|
|
|
|
+ case MSR_MTRRphysBase(6):
|
|
|
|
+ case MSR_MTRRphysBase(7):
|
|
|
|
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRphysMask(0):
|
|
|
|
+ case MSR_MTRRphysMask(1):
|
|
|
|
+ case MSR_MTRRphysMask(2):
|
|
|
|
+ case MSR_MTRRphysMask(3):
|
|
|
|
+ case MSR_MTRRphysMask(4):
|
|
|
|
+ case MSR_MTRRphysMask(5):
|
|
|
|
+ case MSR_MTRRphysMask(6):
|
|
|
|
+ case MSR_MTRRphysMask(7):
|
|
|
|
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRfix64K_00000:
|
|
|
|
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRfix16K_80000:
|
|
|
|
+ case MSR_MTRRfix16K_A0000:
|
|
|
|
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRfix4K_C0000:
|
|
|
|
+ case MSR_MTRRfix4K_C8000:
|
|
|
|
+ case MSR_MTRRfix4K_D0000:
|
|
|
|
+ case MSR_MTRRfix4K_D8000:
|
|
|
|
+ case MSR_MTRRfix4K_E0000:
|
|
|
|
+ case MSR_MTRRfix4K_E8000:
|
|
|
|
+ case MSR_MTRRfix4K_F0000:
|
|
|
|
+ case MSR_MTRRfix4K_F8000:
|
|
|
|
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
|
|
|
|
+ break;
|
|
|
|
+ case MSR_MTRRdefType:
|
|
|
|
+ env->mtrr_deftype = data;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Related to support known hypervisor interface */
|
|
|
|
+ /* if (g_hypervisor_iface)
|
|
|
|
+ g_hypervisor_iface->wrmsr_handler(cs, msr, data);
|
|
|
|
+
|
|
|
|
+ printf("write msr %llx\n", RCX(cs));*/
|
|
|
|
+}
|
|
|
|
+
|
|
int hvf_vcpu_exec(CPUState *cpu)
|
|
int hvf_vcpu_exec(CPUState *cpu)
|
|
{
|
|
{
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
|
@@ -517,10 +776,10 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|
if (ept_emulation_fault(slot, gpa, exit_qual)) {
|
|
if (ept_emulation_fault(slot, gpa, exit_qual)) {
|
|
struct x86_decode decode;
|
|
struct x86_decode decode;
|
|
|
|
|
|
- load_regs(cpu);
|
|
|
|
|
|
+ hvf_load_regs(cpu);
|
|
decode_instruction(env, &decode);
|
|
decode_instruction(env, &decode);
|
|
exec_instruction(env, &decode);
|
|
exec_instruction(env, &decode);
|
|
- store_regs(cpu);
|
|
|
|
|
|
+ hvf_store_regs(cpu);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
@@ -535,8 +794,8 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|
|
|
|
|
if (!string && in) {
|
|
if (!string && in) {
|
|
uint64_t val = 0;
|
|
uint64_t val = 0;
|
|
- load_regs(cpu);
|
|
|
|
- hvf_handle_io(env, port, &val, 0, size, 1);
|
|
|
|
|
|
+ hvf_load_regs(cpu);
|
|
|
|
+ hvf_handle_io(env_cpu(env), port, &val, 0, size, 1);
|
|
if (size == 1) {
|
|
if (size == 1) {
|
|
AL(env) = val;
|
|
AL(env) = val;
|
|
} else if (size == 2) {
|
|
} else if (size == 2) {
|
|
@@ -547,21 +806,21 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|
RAX(env) = (uint64_t)val;
|
|
RAX(env) = (uint64_t)val;
|
|
}
|
|
}
|
|
env->eip += ins_len;
|
|
env->eip += ins_len;
|
|
- store_regs(cpu);
|
|
|
|
|
|
+ hvf_store_regs(cpu);
|
|
break;
|
|
break;
|
|
} else if (!string && !in) {
|
|
} else if (!string && !in) {
|
|
RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX);
|
|
RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX);
|
|
- hvf_handle_io(env, port, &RAX(env), 1, size, 1);
|
|
|
|
|
|
+ hvf_handle_io(env_cpu(env), port, &RAX(env), 1, size, 1);
|
|
macvm_set_rip(cpu, rip + ins_len);
|
|
macvm_set_rip(cpu, rip + ins_len);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
struct x86_decode decode;
|
|
struct x86_decode decode;
|
|
|
|
|
|
- load_regs(cpu);
|
|
|
|
|
|
+ hvf_load_regs(cpu);
|
|
decode_instruction(env, &decode);
|
|
decode_instruction(env, &decode);
|
|
assert(ins_len == decode.len);
|
|
assert(ins_len == decode.len);
|
|
exec_instruction(env, &decode);
|
|
exec_instruction(env, &decode);
|
|
- store_regs(cpu);
|
|
|
|
|
|
+ hvf_store_regs(cpu);
|
|
|
|
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -614,21 +873,21 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|
case EXIT_REASON_RDMSR:
|
|
case EXIT_REASON_RDMSR:
|
|
case EXIT_REASON_WRMSR:
|
|
case EXIT_REASON_WRMSR:
|
|
{
|
|
{
|
|
- load_regs(cpu);
|
|
|
|
|
|
+ hvf_load_regs(cpu);
|
|
if (exit_reason == EXIT_REASON_RDMSR) {
|
|
if (exit_reason == EXIT_REASON_RDMSR) {
|
|
- simulate_rdmsr(env);
|
|
|
|
|
|
+ hvf_simulate_rdmsr(env);
|
|
} else {
|
|
} else {
|
|
- simulate_wrmsr(env);
|
|
|
|
|
|
+ hvf_simulate_wrmsr(env);
|
|
}
|
|
}
|
|
env->eip += ins_len;
|
|
env->eip += ins_len;
|
|
- store_regs(cpu);
|
|
|
|
|
|
+ hvf_store_regs(cpu);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
case EXIT_REASON_CR_ACCESS: {
|
|
case EXIT_REASON_CR_ACCESS: {
|
|
int cr;
|
|
int cr;
|
|
int reg;
|
|
int reg;
|
|
|
|
|
|
- load_regs(cpu);
|
|
|
|
|
|
+ hvf_load_regs(cpu);
|
|
cr = exit_qual & 15;
|
|
cr = exit_qual & 15;
|
|
reg = (exit_qual >> 8) & 15;
|
|
reg = (exit_qual >> 8) & 15;
|
|
|
|
|
|
@@ -656,16 +915,16 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|
abort();
|
|
abort();
|
|
}
|
|
}
|
|
env->eip += ins_len;
|
|
env->eip += ins_len;
|
|
- store_regs(cpu);
|
|
|
|
|
|
+ hvf_store_regs(cpu);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
case EXIT_REASON_APIC_ACCESS: { /* TODO */
|
|
case EXIT_REASON_APIC_ACCESS: { /* TODO */
|
|
struct x86_decode decode;
|
|
struct x86_decode decode;
|
|
|
|
|
|
- load_regs(cpu);
|
|
|
|
|
|
+ hvf_load_regs(cpu);
|
|
decode_instruction(env, &decode);
|
|
decode_instruction(env, &decode);
|
|
exec_instruction(env, &decode);
|
|
exec_instruction(env, &decode);
|
|
- store_regs(cpu);
|
|
|
|
|
|
+ hvf_store_regs(cpu);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
case EXIT_REASON_TPR: {
|
|
case EXIT_REASON_TPR: {
|
|
@@ -674,7 +933,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|
}
|
|
}
|
|
case EXIT_REASON_TASK_SWITCH: {
|
|
case EXIT_REASON_TASK_SWITCH: {
|
|
uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
|
|
uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
|
|
- x68_segment_selector sel = {.sel = exit_qual & 0xffff};
|
|
|
|
|
|
+ x86_segment_selector sel = {.sel = exit_qual & 0xffff};
|
|
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
|
|
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
|
|
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
|
|
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
|
|
& VMCS_INTR_T_MASK);
|
|
& VMCS_INTR_T_MASK);
|