2
0

misc_helper.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /*
  2. * x86 misc helpers - system code
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu/main-loop.h"
  21. #include "cpu.h"
  22. #include "exec/helper-proto.h"
  23. #include "exec/cpu_ldst.h"
  24. #include "exec/address-spaces.h"
  25. #include "exec/cputlb.h"
  26. #include "tcg/helper-tcg.h"
  27. #include "hw/i386/apic.h"
  28. void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
  29. {
  30. address_space_stb(&address_space_io, port, data,
  31. cpu_get_mem_attrs(env), NULL);
  32. }
  33. target_ulong helper_inb(CPUX86State *env, uint32_t port)
  34. {
  35. return address_space_ldub(&address_space_io, port,
  36. cpu_get_mem_attrs(env), NULL);
  37. }
  38. void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
  39. {
  40. address_space_stw(&address_space_io, port, data,
  41. cpu_get_mem_attrs(env), NULL);
  42. }
  43. target_ulong helper_inw(CPUX86State *env, uint32_t port)
  44. {
  45. return address_space_lduw(&address_space_io, port,
  46. cpu_get_mem_attrs(env), NULL);
  47. }
  48. void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
  49. {
  50. address_space_stl(&address_space_io, port, data,
  51. cpu_get_mem_attrs(env), NULL);
  52. }
  53. target_ulong helper_inl(CPUX86State *env, uint32_t port)
  54. {
  55. return address_space_ldl(&address_space_io, port,
  56. cpu_get_mem_attrs(env), NULL);
  57. }
  58. target_ulong helper_read_cr8(CPUX86State *env)
  59. {
  60. if (!(env->hflags2 & HF2_VINTR_MASK)) {
  61. return cpu_get_apic_tpr(env_archcpu(env)->apic_state);
  62. } else {
  63. return env->int_ctl & V_TPR_MASK;
  64. }
  65. }
  66. void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
  67. {
  68. switch (reg) {
  69. case 0:
  70. /*
  71. * If we reach this point, the CR0 write intercept is disabled.
  72. * But we could still exit if the hypervisor has requested the selective
  73. * intercept for bits other than TS and MP
  74. */
  75. if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) &&
  76. ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) {
  77. cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC());
  78. }
  79. cpu_x86_update_cr0(env, t0);
  80. break;
  81. case 3:
  82. if ((env->efer & MSR_EFER_LMA) &&
  83. (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) {
  84. cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
  85. }
  86. if (!(env->efer & MSR_EFER_LMA)) {
  87. t0 &= 0xffffffffUL;
  88. }
  89. cpu_x86_update_cr3(env, t0);
  90. break;
  91. case 4:
  92. if (t0 & cr4_reserved_bits(env)) {
  93. cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
  94. }
  95. if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
  96. (env->hflags & HF_CS64_MASK)) {
  97. raise_exception_ra(env, EXCP0D_GPF, GETPC());
  98. }
  99. cpu_x86_update_cr4(env, t0);
  100. break;
  101. case 8:
  102. if (!(env->hflags2 & HF2_VINTR_MASK)) {
  103. bql_lock();
  104. cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
  105. bql_unlock();
  106. }
  107. env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
  108. CPUState *cs = env_cpu(env);
  109. if (ctl_has_irq(env)) {
  110. cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
  111. } else {
  112. cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
  113. }
  114. break;
  115. default:
  116. env->cr[reg] = t0;
  117. break;
  118. }
  119. }
  120. void helper_wrmsr(CPUX86State *env)
  121. {
  122. uint64_t val;
  123. CPUState *cs = env_cpu(env);
  124. cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
  125. val = ((uint32_t)env->regs[R_EAX]) |
  126. ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
  127. switch ((uint32_t)env->regs[R_ECX]) {
  128. case MSR_IA32_SYSENTER_CS:
  129. env->sysenter_cs = val & 0xffff;
  130. break;
  131. case MSR_IA32_SYSENTER_ESP:
  132. env->sysenter_esp = val;
  133. break;
  134. case MSR_IA32_SYSENTER_EIP:
  135. env->sysenter_eip = val;
  136. break;
  137. case MSR_IA32_APICBASE: {
  138. int ret;
  139. if (val & MSR_IA32_APICBASE_RESERVED) {
  140. goto error;
  141. }
  142. ret = cpu_set_apic_base(env_archcpu(env)->apic_state, val);
  143. if (ret < 0) {
  144. goto error;
  145. }
  146. break;
  147. }
  148. case MSR_EFER:
  149. {
  150. uint64_t update_mask;
  151. update_mask = 0;
  152. if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
  153. update_mask |= MSR_EFER_SCE;
  154. }
  155. if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
  156. update_mask |= MSR_EFER_LME;
  157. }
  158. if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
  159. update_mask |= MSR_EFER_FFXSR;
  160. }
  161. if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
  162. update_mask |= MSR_EFER_NXE;
  163. }
  164. if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
  165. update_mask |= MSR_EFER_SVME;
  166. }
  167. if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
  168. update_mask |= MSR_EFER_FFXSR;
  169. }
  170. cpu_load_efer(env, (env->efer & ~update_mask) |
  171. (val & update_mask));
  172. }
  173. break;
  174. case MSR_STAR:
  175. env->star = val;
  176. break;
  177. case MSR_PAT:
  178. env->pat = val;
  179. break;
  180. case MSR_IA32_PKRS:
  181. if (val & 0xFFFFFFFF00000000ull) {
  182. goto error;
  183. }
  184. env->pkrs = val;
  185. tlb_flush(cs);
  186. break;
  187. case MSR_VM_HSAVE_PA:
  188. if (val & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
  189. goto error;
  190. }
  191. env->vm_hsave = val;
  192. break;
  193. #ifdef TARGET_X86_64
  194. case MSR_LSTAR:
  195. env->lstar = val;
  196. break;
  197. case MSR_CSTAR:
  198. env->cstar = val;
  199. break;
  200. case MSR_FMASK:
  201. env->fmask = val;
  202. break;
  203. case MSR_FSBASE:
  204. env->segs[R_FS].base = val;
  205. break;
  206. case MSR_GSBASE:
  207. env->segs[R_GS].base = val;
  208. break;
  209. case MSR_KERNELGSBASE:
  210. env->kernelgsbase = val;
  211. break;
  212. #endif
  213. case MSR_MTRRphysBase(0):
  214. case MSR_MTRRphysBase(1):
  215. case MSR_MTRRphysBase(2):
  216. case MSR_MTRRphysBase(3):
  217. case MSR_MTRRphysBase(4):
  218. case MSR_MTRRphysBase(5):
  219. case MSR_MTRRphysBase(6):
  220. case MSR_MTRRphysBase(7):
  221. env->mtrr_var[((uint32_t)env->regs[R_ECX] -
  222. MSR_MTRRphysBase(0)) / 2].base = val;
  223. break;
  224. case MSR_MTRRphysMask(0):
  225. case MSR_MTRRphysMask(1):
  226. case MSR_MTRRphysMask(2):
  227. case MSR_MTRRphysMask(3):
  228. case MSR_MTRRphysMask(4):
  229. case MSR_MTRRphysMask(5):
  230. case MSR_MTRRphysMask(6):
  231. case MSR_MTRRphysMask(7):
  232. env->mtrr_var[((uint32_t)env->regs[R_ECX] -
  233. MSR_MTRRphysMask(0)) / 2].mask = val;
  234. break;
  235. case MSR_MTRRfix64K_00000:
  236. env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
  237. MSR_MTRRfix64K_00000] = val;
  238. break;
  239. case MSR_MTRRfix16K_80000:
  240. case MSR_MTRRfix16K_A0000:
  241. env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
  242. MSR_MTRRfix16K_80000 + 1] = val;
  243. break;
  244. case MSR_MTRRfix4K_C0000:
  245. case MSR_MTRRfix4K_C8000:
  246. case MSR_MTRRfix4K_D0000:
  247. case MSR_MTRRfix4K_D8000:
  248. case MSR_MTRRfix4K_E0000:
  249. case MSR_MTRRfix4K_E8000:
  250. case MSR_MTRRfix4K_F0000:
  251. case MSR_MTRRfix4K_F8000:
  252. env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
  253. MSR_MTRRfix4K_C0000 + 3] = val;
  254. break;
  255. case MSR_MTRRdefType:
  256. env->mtrr_deftype = val;
  257. break;
  258. case MSR_MCG_STATUS:
  259. env->mcg_status = val;
  260. break;
  261. case MSR_MCG_CTL:
  262. if ((env->mcg_cap & MCG_CTL_P)
  263. && (val == 0 || val == ~(uint64_t)0)) {
  264. env->mcg_ctl = val;
  265. }
  266. break;
  267. case MSR_TSC_AUX:
  268. env->tsc_aux = val;
  269. break;
  270. case MSR_IA32_MISC_ENABLE:
  271. env->msr_ia32_misc_enable = val;
  272. break;
  273. case MSR_IA32_BNDCFGS:
  274. /* FIXME: #GP if reserved bits are set. */
  275. /* FIXME: Extend highest implemented bit of linear address. */
  276. env->msr_bndcfgs = val;
  277. cpu_sync_bndcs_hflags(env);
  278. break;
  279. case MSR_APIC_START ... MSR_APIC_END: {
  280. int ret;
  281. int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
  282. bql_lock();
  283. ret = apic_msr_write(index, val);
  284. bql_unlock();
  285. if (ret < 0) {
  286. goto error;
  287. }
  288. break;
  289. }
  290. default:
  291. if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
  292. && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
  293. (4 * env->mcg_cap & 0xff)) {
  294. uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
  295. if ((offset & 0x3) != 0
  296. || (val == 0 || val == ~(uint64_t)0)) {
  297. env->mce_banks[offset] = val;
  298. }
  299. break;
  300. }
  301. /* XXX: exception? */
  302. break;
  303. }
  304. return;
  305. error:
  306. raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
  307. }
  308. void helper_rdmsr(CPUX86State *env)
  309. {
  310. X86CPU *x86_cpu = env_archcpu(env);
  311. uint64_t val;
  312. cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
  313. switch ((uint32_t)env->regs[R_ECX]) {
  314. case MSR_IA32_SYSENTER_CS:
  315. val = env->sysenter_cs;
  316. break;
  317. case MSR_IA32_SYSENTER_ESP:
  318. val = env->sysenter_esp;
  319. break;
  320. case MSR_IA32_SYSENTER_EIP:
  321. val = env->sysenter_eip;
  322. break;
  323. case MSR_IA32_APICBASE:
  324. val = cpu_get_apic_base(env_archcpu(env)->apic_state);
  325. break;
  326. case MSR_EFER:
  327. val = env->efer;
  328. break;
  329. case MSR_STAR:
  330. val = env->star;
  331. break;
  332. case MSR_PAT:
  333. val = env->pat;
  334. break;
  335. case MSR_IA32_PKRS:
  336. val = env->pkrs;
  337. break;
  338. case MSR_VM_HSAVE_PA:
  339. val = env->vm_hsave;
  340. break;
  341. case MSR_IA32_PERF_STATUS:
  342. /* tsc_increment_by_tick */
  343. val = 1000ULL;
  344. /* CPU multiplier */
  345. val |= (((uint64_t)4ULL) << 40);
  346. break;
  347. #ifdef TARGET_X86_64
  348. case MSR_LSTAR:
  349. val = env->lstar;
  350. break;
  351. case MSR_CSTAR:
  352. val = env->cstar;
  353. break;
  354. case MSR_FMASK:
  355. val = env->fmask;
  356. break;
  357. case MSR_FSBASE:
  358. val = env->segs[R_FS].base;
  359. break;
  360. case MSR_GSBASE:
  361. val = env->segs[R_GS].base;
  362. break;
  363. case MSR_KERNELGSBASE:
  364. val = env->kernelgsbase;
  365. break;
  366. case MSR_TSC_AUX:
  367. val = env->tsc_aux;
  368. break;
  369. #endif
  370. case MSR_SMI_COUNT:
  371. val = env->msr_smi_count;
  372. break;
  373. case MSR_MTRRphysBase(0):
  374. case MSR_MTRRphysBase(1):
  375. case MSR_MTRRphysBase(2):
  376. case MSR_MTRRphysBase(3):
  377. case MSR_MTRRphysBase(4):
  378. case MSR_MTRRphysBase(5):
  379. case MSR_MTRRphysBase(6):
  380. case MSR_MTRRphysBase(7):
  381. val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
  382. MSR_MTRRphysBase(0)) / 2].base;
  383. break;
  384. case MSR_MTRRphysMask(0):
  385. case MSR_MTRRphysMask(1):
  386. case MSR_MTRRphysMask(2):
  387. case MSR_MTRRphysMask(3):
  388. case MSR_MTRRphysMask(4):
  389. case MSR_MTRRphysMask(5):
  390. case MSR_MTRRphysMask(6):
  391. case MSR_MTRRphysMask(7):
  392. val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
  393. MSR_MTRRphysMask(0)) / 2].mask;
  394. break;
  395. case MSR_MTRRfix64K_00000:
  396. val = env->mtrr_fixed[0];
  397. break;
  398. case MSR_MTRRfix16K_80000:
  399. case MSR_MTRRfix16K_A0000:
  400. val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
  401. MSR_MTRRfix16K_80000 + 1];
  402. break;
  403. case MSR_MTRRfix4K_C0000:
  404. case MSR_MTRRfix4K_C8000:
  405. case MSR_MTRRfix4K_D0000:
  406. case MSR_MTRRfix4K_D8000:
  407. case MSR_MTRRfix4K_E0000:
  408. case MSR_MTRRfix4K_E8000:
  409. case MSR_MTRRfix4K_F0000:
  410. case MSR_MTRRfix4K_F8000:
  411. val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
  412. MSR_MTRRfix4K_C0000 + 3];
  413. break;
  414. case MSR_MTRRdefType:
  415. val = env->mtrr_deftype;
  416. break;
  417. case MSR_MTRRcap:
  418. if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
  419. val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
  420. MSR_MTRRcap_WC_SUPPORTED;
  421. } else {
  422. /* XXX: exception? */
  423. val = 0;
  424. }
  425. break;
  426. case MSR_MCG_CAP:
  427. val = env->mcg_cap;
  428. break;
  429. case MSR_MCG_CTL:
  430. if (env->mcg_cap & MCG_CTL_P) {
  431. val = env->mcg_ctl;
  432. } else {
  433. val = 0;
  434. }
  435. break;
  436. case MSR_MCG_STATUS:
  437. val = env->mcg_status;
  438. break;
  439. case MSR_IA32_MISC_ENABLE:
  440. val = env->msr_ia32_misc_enable;
  441. break;
  442. case MSR_IA32_BNDCFGS:
  443. val = env->msr_bndcfgs;
  444. break;
  445. case MSR_IA32_UCODE_REV:
  446. val = x86_cpu->ucode_rev;
  447. break;
  448. case MSR_CORE_THREAD_COUNT: {
  449. val = cpu_x86_get_msr_core_thread_count(x86_cpu);
  450. break;
  451. }
  452. case MSR_APIC_START ... MSR_APIC_END: {
  453. int ret;
  454. int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
  455. bql_lock();
  456. ret = apic_msr_read(index, &val);
  457. bql_unlock();
  458. if (ret < 0) {
  459. raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
  460. }
  461. break;
  462. }
  463. default:
  464. if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
  465. && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
  466. (4 * env->mcg_cap & 0xff)) {
  467. uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
  468. val = env->mce_banks[offset];
  469. break;
  470. }
  471. /* XXX: exception? */
  472. val = 0;
  473. break;
  474. }
  475. env->regs[R_EAX] = (uint32_t)(val);
  476. env->regs[R_EDX] = (uint32_t)(val >> 32);
  477. }
  478. void helper_flush_page(CPUX86State *env, target_ulong addr)
  479. {
  480. tlb_flush_page(env_cpu(env), addr);
  481. }
  482. G_NORETURN void helper_hlt(CPUX86State *env)
  483. {
  484. CPUState *cs = env_cpu(env);
  485. do_end_instruction(env);
  486. cs->halted = 1;
  487. cs->exception_index = EXCP_HLT;
  488. cpu_loop_exit(cs);
  489. }
  490. void helper_monitor(CPUX86State *env, target_ulong ptr)
  491. {
  492. if ((uint32_t)env->regs[R_ECX] != 0) {
  493. raise_exception_ra(env, EXCP0D_GPF, GETPC());
  494. }
  495. /* XXX: store address? */
  496. cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
  497. }
  498. G_NORETURN void helper_mwait(CPUX86State *env, int next_eip_addend)
  499. {
  500. CPUState *cs = env_cpu(env);
  501. if ((uint32_t)env->regs[R_ECX] != 0) {
  502. raise_exception_ra(env, EXCP0D_GPF, GETPC());
  503. }
  504. cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
  505. env->eip += next_eip_addend;
  506. /* XXX: not complete but not completely erroneous */
  507. if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
  508. helper_pause(env);
  509. } else {
  510. helper_hlt(env);
  511. }
  512. }