kvm.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: MIPS specific KVM APIs
  7. *
  8. * Copyright (C) 2012-2014 Imagination Technologies Ltd.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include "qemu/osdep.h"
  12. #include <sys/ioctl.h>
  13. #include <linux/kvm.h>
  14. #include "cpu.h"
  15. #include "internal.h"
  16. #include "qemu/error-report.h"
  17. #include "qemu/main-loop.h"
  18. #include "sysemu/kvm.h"
  19. #include "sysemu/kvm_int.h"
  20. #include "sysemu/runstate.h"
  21. #include "kvm_mips.h"
  22. #include "hw/boards.h"
  23. #include "fpu_helper.h"
  24. #define DEBUG_KVM 0
  25. #define DPRINTF(fmt, ...) \
  26. do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
  27. static int kvm_mips_fpu_cap;
  28. static int kvm_mips_msa_cap;
  29. const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
  30. KVM_CAP_LAST_INFO
  31. };
  32. static void kvm_mips_update_state(void *opaque, bool running, RunState state);
  33. unsigned long kvm_arch_vcpu_id(CPUState *cs)
  34. {
  35. return cs->cpu_index;
  36. }
  37. int kvm_arch_init(MachineState *ms, KVMState *s)
  38. {
  39. /* MIPS has 128 signals */
  40. kvm_set_sigmask_len(s, 16);
  41. kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
  42. kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
  43. DPRINTF("%s\n", __func__);
  44. return 0;
  45. }
  46. int kvm_arch_irqchip_create(KVMState *s)
  47. {
  48. return 0;
  49. }
  50. int kvm_arch_init_vcpu(CPUState *cs)
  51. {
  52. CPUMIPSState *env = cpu_env(cs);
  53. int ret = 0;
  54. qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
  55. if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
  56. ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
  57. if (ret < 0) {
  58. /* mark unsupported so it gets disabled on reset */
  59. kvm_mips_fpu_cap = 0;
  60. ret = 0;
  61. }
  62. }
  63. if (kvm_mips_msa_cap && ase_msa_available(env)) {
  64. ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
  65. if (ret < 0) {
  66. /* mark unsupported so it gets disabled on reset */
  67. kvm_mips_msa_cap = 0;
  68. ret = 0;
  69. }
  70. }
  71. DPRINTF("%s\n", __func__);
  72. return ret;
  73. }
  74. int kvm_arch_destroy_vcpu(CPUState *cs)
  75. {
  76. return 0;
  77. }
  78. void kvm_mips_reset_vcpu(MIPSCPU *cpu)
  79. {
  80. CPUMIPSState *env = &cpu->env;
  81. if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
  82. warn_report("KVM does not support FPU, disabling");
  83. env->CP0_Config1 &= ~(1 << CP0C1_FP);
  84. }
  85. if (!kvm_mips_msa_cap && ase_msa_available(env)) {
  86. warn_report("KVM does not support MSA, disabling");
  87. env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
  88. }
  89. DPRINTF("%s\n", __func__);
  90. }
  91. int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
  92. {
  93. DPRINTF("%s\n", __func__);
  94. return 0;
  95. }
  96. int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
  97. {
  98. DPRINTF("%s\n", __func__);
  99. return 0;
  100. }
  101. static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
  102. {
  103. CPUMIPSState *env = &cpu->env;
  104. return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
  105. }
  106. void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
  107. {
  108. MIPSCPU *cpu = MIPS_CPU(cs);
  109. int r;
  110. struct kvm_mips_interrupt intr;
  111. bql_lock();
  112. if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
  113. cpu_mips_io_interrupts_pending(cpu)) {
  114. intr.cpu = -1;
  115. intr.irq = 2;
  116. r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
  117. if (r < 0) {
  118. error_report("%s: cpu %d: failed to inject IRQ %x",
  119. __func__, cs->cpu_index, intr.irq);
  120. }
  121. }
  122. bql_unlock();
  123. }
  124. MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
  125. {
  126. return MEMTXATTRS_UNSPECIFIED;
  127. }
  128. int kvm_arch_process_async_events(CPUState *cs)
  129. {
  130. return cs->halted;
  131. }
  132. int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
  133. {
  134. int ret;
  135. DPRINTF("%s\n", __func__);
  136. switch (run->exit_reason) {
  137. default:
  138. error_report("%s: unknown exit reason %d",
  139. __func__, run->exit_reason);
  140. ret = -1;
  141. break;
  142. }
  143. return ret;
  144. }
  145. bool kvm_arch_stop_on_emulation_error(CPUState *cs)
  146. {
  147. DPRINTF("%s\n", __func__);
  148. return true;
  149. }
  150. void kvm_arch_init_irq_routing(KVMState *s)
  151. {
  152. }
  153. int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
  154. {
  155. CPUState *cs = CPU(cpu);
  156. struct kvm_mips_interrupt intr;
  157. assert(kvm_enabled());
  158. intr.cpu = -1;
  159. if (level) {
  160. intr.irq = irq;
  161. } else {
  162. intr.irq = -irq;
  163. }
  164. kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
  165. return 0;
  166. }
  167. int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
  168. {
  169. CPUState *cs = current_cpu;
  170. CPUState *dest_cs = CPU(cpu);
  171. struct kvm_mips_interrupt intr;
  172. assert(kvm_enabled());
  173. intr.cpu = dest_cs->cpu_index;
  174. if (level) {
  175. intr.irq = irq;
  176. } else {
  177. intr.irq = -irq;
  178. }
  179. DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
  180. kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
  181. return 0;
  182. }
  183. #define MIPS_CP0_32(_R, _S) \
  184. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
  185. #define MIPS_CP0_64(_R, _S) \
  186. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
  187. #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
  188. #define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0)
  189. #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
  190. #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
  191. #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
  192. #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
  193. #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
  194. #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
  195. #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
  196. #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
  197. #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
  198. #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
  199. #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
  200. #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
  201. #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
  202. #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
  203. #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
  204. #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
  205. #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
  206. #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
  207. #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
  208. #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
  209. #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
  210. #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
  211. #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
  212. #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
  213. #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
  214. #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
  215. #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
  216. #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
  217. #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
  218. #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
  219. #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
  220. #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
  221. #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
  222. #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
  223. static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
  224. int32_t *addr)
  225. {
  226. struct kvm_one_reg cp0reg = {
  227. .id = reg_id,
  228. .addr = (uintptr_t)addr
  229. };
  230. return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
  231. }
  232. static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
  233. uint32_t *addr)
  234. {
  235. struct kvm_one_reg cp0reg = {
  236. .id = reg_id,
  237. .addr = (uintptr_t)addr
  238. };
  239. return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
  240. }
  241. static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
  242. target_ulong *addr)
  243. {
  244. uint64_t val64 = *addr;
  245. struct kvm_one_reg cp0reg = {
  246. .id = reg_id,
  247. .addr = (uintptr_t)&val64
  248. };
  249. return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
  250. }
  251. static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
  252. int64_t *addr)
  253. {
  254. struct kvm_one_reg cp0reg = {
  255. .id = reg_id,
  256. .addr = (uintptr_t)addr
  257. };
  258. return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
  259. }
  260. static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
  261. uint64_t *addr)
  262. {
  263. struct kvm_one_reg cp0reg = {
  264. .id = reg_id,
  265. .addr = (uintptr_t)addr
  266. };
  267. return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
  268. }
  269. static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
  270. int32_t *addr)
  271. {
  272. struct kvm_one_reg cp0reg = {
  273. .id = reg_id,
  274. .addr = (uintptr_t)addr
  275. };
  276. return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
  277. }
  278. static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
  279. uint32_t *addr)
  280. {
  281. struct kvm_one_reg cp0reg = {
  282. .id = reg_id,
  283. .addr = (uintptr_t)addr
  284. };
  285. return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
  286. }
  287. static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
  288. target_ulong *addr)
  289. {
  290. int ret;
  291. uint64_t val64 = 0;
  292. struct kvm_one_reg cp0reg = {
  293. .id = reg_id,
  294. .addr = (uintptr_t)&val64
  295. };
  296. ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
  297. if (ret >= 0) {
  298. *addr = val64;
  299. }
  300. return ret;
  301. }
  302. static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
  303. int64_t *addr)
  304. {
  305. struct kvm_one_reg cp0reg = {
  306. .id = reg_id,
  307. .addr = (uintptr_t)addr
  308. };
  309. return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
  310. }
  311. static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
  312. uint64_t *addr)
  313. {
  314. struct kvm_one_reg cp0reg = {
  315. .id = reg_id,
  316. .addr = (uintptr_t)addr
  317. };
  318. return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
  319. }
  320. #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
  321. #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
  322. (1U << CP0C1_FP))
  323. #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
  324. #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
  325. (1U << CP0C3_MSAP))
  326. #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
  327. #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
  328. (1U << CP0C5_UFE) | \
  329. (1U << CP0C5_FRE) | \
  330. (1U << CP0C5_UFR))
  331. #define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \
  332. (0x3fU << CP0C6_KPOS) | \
  333. (1U << CP0C6_KE) | \
  334. (1U << CP0C6_VTLBONLY) | \
  335. (1U << CP0C6_LASX) | \
  336. (1U << CP0C6_SSEN) | \
  337. (1U << CP0C6_DISDRTIME) | \
  338. (1U << CP0C6_PIXNUEN) | \
  339. (1U << CP0C6_SCRAND) | \
  340. (1U << CP0C6_LLEXCEN) | \
  341. (1U << CP0C6_DISVC) | \
  342. (1U << CP0C6_VCLRU) | \
  343. (1U << CP0C6_DCLRU) | \
  344. (1U << CP0C6_PIXUEN) | \
  345. (1U << CP0C6_DISBLKLYEN) | \
  346. (1U << CP0C6_UMEMUALEN) | \
  347. (1U << CP0C6_SFBEN) | \
  348. (1U << CP0C6_FLTINT) | \
  349. (1U << CP0C6_VLTINT) | \
  350. (1U << CP0C6_DISBTB) | \
  351. (3U << CP0C6_STPREFCTL) | \
  352. (1U << CP0C6_INSTPREF) | \
  353. (1U << CP0C6_DATAPREF))
  354. static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
  355. int32_t *addr, int32_t mask)
  356. {
  357. int err;
  358. int32_t tmp, change;
  359. err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
  360. if (err < 0) {
  361. return err;
  362. }
  363. /* only change bits in mask */
  364. change = (*addr ^ tmp) & mask;
  365. if (!change) {
  366. return 0;
  367. }
  368. tmp = tmp ^ change;
  369. return kvm_mips_put_one_reg(cs, reg_id, &tmp);
  370. }
  371. /*
  372. * We freeze the KVM timer when either the VM clock is stopped or the state is
  373. * saved (the state is dirty).
  374. */
  375. /*
  376. * Save the state of the KVM timer when VM clock is stopped or state is synced
  377. * to QEMU.
  378. */
  379. static int kvm_mips_save_count(CPUState *cs)
  380. {
  381. CPUMIPSState *env = cpu_env(cs);
  382. uint64_t count_ctl;
  383. int err, ret = 0;
  384. /* freeze KVM timer */
  385. err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
  386. if (err < 0) {
  387. DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
  388. ret = err;
  389. } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
  390. count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
  391. err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
  392. if (err < 0) {
  393. DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
  394. ret = err;
  395. }
  396. }
  397. /* read CP0_Cause */
  398. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
  399. if (err < 0) {
  400. DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
  401. ret = err;
  402. }
  403. /* read CP0_Count */
  404. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
  405. if (err < 0) {
  406. DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
  407. ret = err;
  408. }
  409. return ret;
  410. }
  411. /*
  412. * Restore the state of the KVM timer when VM clock is restarted or state is
  413. * synced to KVM.
  414. */
  415. static int kvm_mips_restore_count(CPUState *cs)
  416. {
  417. CPUMIPSState *env = cpu_env(cs);
  418. uint64_t count_ctl;
  419. int err_dc, err, ret = 0;
  420. /* check the timer is frozen */
  421. err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
  422. if (err_dc < 0) {
  423. DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
  424. ret = err_dc;
  425. } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
  426. /* freeze timer (sets COUNT_RESUME for us) */
  427. count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
  428. err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
  429. if (err < 0) {
  430. DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
  431. ret = err;
  432. }
  433. }
  434. /* load CP0_Cause */
  435. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
  436. if (err < 0) {
  437. DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
  438. ret = err;
  439. }
  440. /* load CP0_Count */
  441. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
  442. if (err < 0) {
  443. DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
  444. ret = err;
  445. }
  446. /* resume KVM timer */
  447. if (err_dc >= 0) {
  448. count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
  449. err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
  450. if (err < 0) {
  451. DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
  452. ret = err;
  453. }
  454. }
  455. return ret;
  456. }
  457. /*
  458. * Handle the VM clock being started or stopped
  459. */
  460. static void kvm_mips_update_state(void *opaque, bool running, RunState state)
  461. {
  462. CPUState *cs = opaque;
  463. int ret;
  464. uint64_t count_resume;
  465. /*
  466. * If state is already dirty (synced to QEMU) then the KVM timer state is
  467. * already saved and can be restored when it is synced back to KVM.
  468. */
  469. if (!running) {
  470. if (!cs->vcpu_dirty) {
  471. ret = kvm_mips_save_count(cs);
  472. if (ret < 0) {
  473. warn_report("Failed saving count");
  474. }
  475. }
  476. } else {
  477. /* Set clock restore time to now */
  478. count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
  479. ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
  480. &count_resume);
  481. if (ret < 0) {
  482. warn_report("Failed setting COUNT_RESUME");
  483. return;
  484. }
  485. if (!cs->vcpu_dirty) {
  486. ret = kvm_mips_restore_count(cs);
  487. if (ret < 0) {
  488. warn_report("Failed restoring count");
  489. }
  490. }
  491. }
  492. }
  493. static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
  494. {
  495. CPUMIPSState *env = cpu_env(cs);
  496. int err, ret = 0;
  497. unsigned int i;
  498. /* Only put FPU state if we're emulating a CPU with an FPU */
  499. if (env->CP0_Config1 & (1 << CP0C1_FP)) {
  500. /* FPU Control Registers */
  501. if (level == KVM_PUT_FULL_STATE) {
  502. err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
  503. &env->active_fpu.fcr0);
  504. if (err < 0) {
  505. DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
  506. ret = err;
  507. }
  508. }
  509. err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
  510. &env->active_fpu.fcr31);
  511. if (err < 0) {
  512. DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
  513. ret = err;
  514. }
  515. /*
  516. * FPU register state is a subset of MSA vector state, so don't put FPU
  517. * registers if we're emulating a CPU with MSA.
  518. */
  519. if (!ase_msa_available(env)) {
  520. /* Floating point registers */
  521. for (i = 0; i < 32; ++i) {
  522. if (env->CP0_Status & (1 << CP0St_FR)) {
  523. err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
  524. &env->active_fpu.fpr[i].d);
  525. } else {
  526. err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
  527. &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
  528. }
  529. if (err < 0) {
  530. DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
  531. ret = err;
  532. }
  533. }
  534. }
  535. }
  536. /* Only put MSA state if we're emulating a CPU with MSA */
  537. if (ase_msa_available(env)) {
  538. /* MSA Control Registers */
  539. if (level == KVM_PUT_FULL_STATE) {
  540. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
  541. &env->msair);
  542. if (err < 0) {
  543. DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
  544. ret = err;
  545. }
  546. }
  547. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
  548. &env->active_tc.msacsr);
  549. if (err < 0) {
  550. DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
  551. ret = err;
  552. }
  553. /* Vector registers (includes FP registers) */
  554. for (i = 0; i < 32; ++i) {
  555. /* Big endian MSA not supported by QEMU yet anyway */
  556. err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
  557. env->active_fpu.fpr[i].wr.d);
  558. if (err < 0) {
  559. DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
  560. ret = err;
  561. }
  562. }
  563. }
  564. return ret;
  565. }
  566. static int kvm_mips_get_fpu_registers(CPUState *cs)
  567. {
  568. CPUMIPSState *env = cpu_env(cs);
  569. int err, ret = 0;
  570. unsigned int i;
  571. /* Only get FPU state if we're emulating a CPU with an FPU */
  572. if (env->CP0_Config1 & (1 << CP0C1_FP)) {
  573. /* FPU Control Registers */
  574. err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
  575. &env->active_fpu.fcr0);
  576. if (err < 0) {
  577. DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
  578. ret = err;
  579. }
  580. err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
  581. &env->active_fpu.fcr31);
  582. if (err < 0) {
  583. DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
  584. ret = err;
  585. } else {
  586. restore_fp_status(env);
  587. }
  588. /*
  589. * FPU register state is a subset of MSA vector state, so don't save FPU
  590. * registers if we're emulating a CPU with MSA.
  591. */
  592. if (!ase_msa_available(env)) {
  593. /* Floating point registers */
  594. for (i = 0; i < 32; ++i) {
  595. if (env->CP0_Status & (1 << CP0St_FR)) {
  596. err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
  597. &env->active_fpu.fpr[i].d);
  598. } else {
  599. err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
  600. &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
  601. }
  602. if (err < 0) {
  603. DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
  604. ret = err;
  605. }
  606. }
  607. }
  608. }
  609. /* Only get MSA state if we're emulating a CPU with MSA */
  610. if (ase_msa_available(env)) {
  611. /* MSA Control Registers */
  612. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
  613. &env->msair);
  614. if (err < 0) {
  615. DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
  616. ret = err;
  617. }
  618. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
  619. &env->active_tc.msacsr);
  620. if (err < 0) {
  621. DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
  622. ret = err;
  623. } else {
  624. restore_msa_fp_status(env);
  625. }
  626. /* Vector registers (includes FP registers) */
  627. for (i = 0; i < 32; ++i) {
  628. /* Big endian MSA not supported by QEMU yet anyway */
  629. err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
  630. env->active_fpu.fpr[i].wr.d);
  631. if (err < 0) {
  632. DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
  633. ret = err;
  634. }
  635. }
  636. }
  637. return ret;
  638. }
  639. static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
  640. {
  641. CPUMIPSState *env = cpu_env(cs);
  642. int err, ret = 0;
  643. (void)level;
  644. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
  645. if (err < 0) {
  646. DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
  647. ret = err;
  648. }
  649. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
  650. if (err < 0) {
  651. DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err);
  652. ret = err;
  653. }
  654. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
  655. &env->CP0_Context);
  656. if (err < 0) {
  657. DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
  658. ret = err;
  659. }
  660. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
  661. &env->active_tc.CP0_UserLocal);
  662. if (err < 0) {
  663. DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
  664. ret = err;
  665. }
  666. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
  667. &env->CP0_PageMask);
  668. if (err < 0) {
  669. DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
  670. ret = err;
  671. }
  672. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
  673. &env->CP0_PageGrain);
  674. if (err < 0) {
  675. DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err);
  676. ret = err;
  677. }
  678. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
  679. &env->CP0_PWBase);
  680. if (err < 0) {
  681. DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err);
  682. ret = err;
  683. }
  684. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
  685. &env->CP0_PWField);
  686. if (err < 0) {
  687. DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err);
  688. ret = err;
  689. }
  690. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
  691. &env->CP0_PWSize);
  692. if (err < 0) {
  693. DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err);
  694. ret = err;
  695. }
  696. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
  697. if (err < 0) {
  698. DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
  699. ret = err;
  700. }
  701. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
  702. if (err < 0) {
  703. DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err);
  704. ret = err;
  705. }
  706. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
  707. if (err < 0) {
  708. DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
  709. ret = err;
  710. }
  711. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
  712. &env->CP0_BadVAddr);
  713. if (err < 0) {
  714. DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
  715. ret = err;
  716. }
  717. /* If VM clock stopped then state will be restored when it is restarted */
  718. if (runstate_is_running()) {
  719. err = kvm_mips_restore_count(cs);
  720. if (err < 0) {
  721. ret = err;
  722. }
  723. }
  724. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
  725. &env->CP0_EntryHi);
  726. if (err < 0) {
  727. DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
  728. ret = err;
  729. }
  730. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
  731. &env->CP0_Compare);
  732. if (err < 0) {
  733. DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
  734. ret = err;
  735. }
  736. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
  737. if (err < 0) {
  738. DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
  739. ret = err;
  740. }
  741. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
  742. if (err < 0) {
  743. DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
  744. ret = err;
  745. }
  746. err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
  747. if (err < 0) {
  748. DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
  749. ret = err;
  750. }
  751. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
  752. if (err < 0) {
  753. DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err);
  754. ret = err;
  755. }
  756. err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
  757. &env->CP0_Config0,
  758. KVM_REG_MIPS_CP0_CONFIG_MASK);
  759. if (err < 0) {
  760. DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
  761. ret = err;
  762. }
  763. err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
  764. &env->CP0_Config1,
  765. KVM_REG_MIPS_CP0_CONFIG1_MASK);
  766. if (err < 0) {
  767. DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
  768. ret = err;
  769. }
  770. err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
  771. &env->CP0_Config2,
  772. KVM_REG_MIPS_CP0_CONFIG2_MASK);
  773. if (err < 0) {
  774. DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
  775. ret = err;
  776. }
  777. err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
  778. &env->CP0_Config3,
  779. KVM_REG_MIPS_CP0_CONFIG3_MASK);
  780. if (err < 0) {
  781. DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
  782. ret = err;
  783. }
  784. err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
  785. &env->CP0_Config4,
  786. KVM_REG_MIPS_CP0_CONFIG4_MASK);
  787. if (err < 0) {
  788. DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
  789. ret = err;
  790. }
  791. err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
  792. &env->CP0_Config5,
  793. KVM_REG_MIPS_CP0_CONFIG5_MASK);
  794. if (err < 0) {
  795. DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
  796. ret = err;
  797. }
  798. err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6,
  799. &env->CP0_Config6,
  800. KVM_REG_MIPS_CP0_CONFIG6_MASK);
  801. if (err < 0) {
  802. DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err);
  803. ret = err;
  804. }
  805. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
  806. &env->CP0_XContext);
  807. if (err < 0) {
  808. DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err);
  809. ret = err;
  810. }
  811. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
  812. &env->CP0_ErrorEPC);
  813. if (err < 0) {
  814. DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
  815. ret = err;
  816. }
  817. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
  818. &env->CP0_KScratch[0]);
  819. if (err < 0) {
  820. DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err);
  821. ret = err;
  822. }
  823. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
  824. &env->CP0_KScratch[1]);
  825. if (err < 0) {
  826. DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err);
  827. ret = err;
  828. }
  829. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
  830. &env->CP0_KScratch[2]);
  831. if (err < 0) {
  832. DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err);
  833. ret = err;
  834. }
  835. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
  836. &env->CP0_KScratch[3]);
  837. if (err < 0) {
  838. DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err);
  839. ret = err;
  840. }
  841. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
  842. &env->CP0_KScratch[4]);
  843. if (err < 0) {
  844. DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err);
  845. ret = err;
  846. }
  847. err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
  848. &env->CP0_KScratch[5]);
  849. if (err < 0) {
  850. DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err);
  851. ret = err;
  852. }
  853. return ret;
  854. }
  855. static int kvm_mips_get_cp0_registers(CPUState *cs)
  856. {
  857. CPUMIPSState *env = cpu_env(cs);
  858. int err, ret = 0;
  859. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
  860. if (err < 0) {
  861. DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
  862. ret = err;
  863. }
  864. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
  865. if (err < 0) {
  866. DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err);
  867. ret = err;
  868. }
  869. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
  870. &env->CP0_Context);
  871. if (err < 0) {
  872. DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
  873. ret = err;
  874. }
  875. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
  876. &env->active_tc.CP0_UserLocal);
  877. if (err < 0) {
  878. DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
  879. ret = err;
  880. }
  881. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
  882. &env->CP0_PageMask);
  883. if (err < 0) {
  884. DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
  885. ret = err;
  886. }
  887. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
  888. &env->CP0_PageGrain);
  889. if (err < 0) {
  890. DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err);
  891. ret = err;
  892. }
  893. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
  894. &env->CP0_PWBase);
  895. if (err < 0) {
  896. DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err);
  897. ret = err;
  898. }
  899. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
  900. &env->CP0_PWField);
  901. if (err < 0) {
  902. DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err);
  903. ret = err;
  904. }
  905. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
  906. &env->CP0_PWSize);
  907. if (err < 0) {
  908. DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err);
  909. ret = err;
  910. }
  911. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
  912. if (err < 0) {
  913. DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
  914. ret = err;
  915. }
  916. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
  917. if (err < 0) {
  918. DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err);
  919. ret = err;
  920. }
  921. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
  922. if (err < 0) {
  923. DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
  924. ret = err;
  925. }
  926. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
  927. &env->CP0_BadVAddr);
  928. if (err < 0) {
  929. DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
  930. ret = err;
  931. }
  932. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
  933. &env->CP0_EntryHi);
  934. if (err < 0) {
  935. DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
  936. ret = err;
  937. }
  938. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
  939. &env->CP0_Compare);
  940. if (err < 0) {
  941. DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
  942. ret = err;
  943. }
  944. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
  945. if (err < 0) {
  946. DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
  947. ret = err;
  948. }
  949. /* If VM clock stopped then state was already saved when it was stopped */
  950. if (runstate_is_running()) {
  951. err = kvm_mips_save_count(cs);
  952. if (err < 0) {
  953. ret = err;
  954. }
  955. }
  956. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
  957. if (err < 0) {
  958. DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
  959. ret = err;
  960. }
  961. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
  962. if (err < 0) {
  963. DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
  964. ret = err;
  965. }
  966. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
  967. if (err < 0) {
  968. DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err);
  969. ret = err;
  970. }
  971. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
  972. if (err < 0) {
  973. DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
  974. ret = err;
  975. }
  976. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
  977. if (err < 0) {
  978. DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
  979. ret = err;
  980. }
  981. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
  982. if (err < 0) {
  983. DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
  984. ret = err;
  985. }
  986. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
  987. if (err < 0) {
  988. DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
  989. ret = err;
  990. }
  991. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
  992. if (err < 0) {
  993. DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
  994. ret = err;
  995. }
  996. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
  997. if (err < 0) {
  998. DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
  999. ret = err;
  1000. }
  1001. err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6);
  1002. if (err < 0) {
  1003. DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err);
  1004. ret = err;
  1005. }
  1006. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
  1007. &env->CP0_XContext);
  1008. if (err < 0) {
  1009. DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err);
  1010. ret = err;
  1011. }
  1012. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
  1013. &env->CP0_ErrorEPC);
  1014. if (err < 0) {
  1015. DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
  1016. ret = err;
  1017. }
  1018. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
  1019. &env->CP0_KScratch[0]);
  1020. if (err < 0) {
  1021. DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err);
  1022. ret = err;
  1023. }
  1024. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
  1025. &env->CP0_KScratch[1]);
  1026. if (err < 0) {
  1027. DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err);
  1028. ret = err;
  1029. }
  1030. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
  1031. &env->CP0_KScratch[2]);
  1032. if (err < 0) {
  1033. DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err);
  1034. ret = err;
  1035. }
  1036. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
  1037. &env->CP0_KScratch[3]);
  1038. if (err < 0) {
  1039. DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err);
  1040. ret = err;
  1041. }
  1042. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
  1043. &env->CP0_KScratch[4]);
  1044. if (err < 0) {
  1045. DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err);
  1046. ret = err;
  1047. }
  1048. err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
  1049. &env->CP0_KScratch[5]);
  1050. if (err < 0) {
  1051. DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err);
  1052. ret = err;
  1053. }
  1054. return ret;
  1055. }
  1056. int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
  1057. {
  1058. CPUMIPSState *env = cpu_env(cs);
  1059. struct kvm_regs regs;
  1060. int ret;
  1061. int i;
  1062. /* Set the registers based on QEMU's view of things */
  1063. for (i = 0; i < 32; i++) {
  1064. regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
  1065. }
  1066. regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
  1067. regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
  1068. regs.pc = (int64_t)(target_long)env->active_tc.PC;
  1069. ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
  1070. if (ret < 0) {
  1071. return ret;
  1072. }
  1073. ret = kvm_mips_put_cp0_registers(cs, level);
  1074. if (ret < 0) {
  1075. return ret;
  1076. }
  1077. ret = kvm_mips_put_fpu_registers(cs, level);
  1078. if (ret < 0) {
  1079. return ret;
  1080. }
  1081. return ret;
  1082. }
  1083. int kvm_arch_get_registers(CPUState *cs, Error **errp)
  1084. {
  1085. CPUMIPSState *env = cpu_env(cs);
  1086. int ret = 0;
  1087. struct kvm_regs regs;
  1088. int i;
  1089. /* Get the current register set as KVM seems it */
  1090. ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
  1091. if (ret < 0) {
  1092. return ret;
  1093. }
  1094. for (i = 0; i < 32; i++) {
  1095. env->active_tc.gpr[i] = regs.gpr[i];
  1096. }
  1097. env->active_tc.HI[0] = regs.hi;
  1098. env->active_tc.LO[0] = regs.lo;
  1099. env->active_tc.PC = regs.pc;
  1100. kvm_mips_get_cp0_registers(cs);
  1101. kvm_mips_get_fpu_registers(cs);
  1102. return ret;
  1103. }
  1104. int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
  1105. uint64_t address, uint32_t data, PCIDevice *dev)
  1106. {
  1107. return 0;
  1108. }
  1109. int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
  1110. int vector, PCIDevice *dev)
  1111. {
  1112. return 0;
  1113. }
  1114. int kvm_arch_release_virq_post(int virq)
  1115. {
  1116. return 0;
  1117. }
  1118. int kvm_arch_msi_data_to_gsi(uint32_t data)
  1119. {
  1120. abort();
  1121. }
  1122. int kvm_arch_get_default_type(MachineState *machine)
  1123. {
  1124. #if defined(KVM_CAP_MIPS_VZ)
  1125. int r;
  1126. KVMState *s = KVM_STATE(machine->accelerator);
  1127. r = kvm_check_extension(s, KVM_CAP_MIPS_VZ);
  1128. if (r > 0) {
  1129. return KVM_VM_MIPS_VZ;
  1130. }
  1131. #endif
  1132. error_report("KVM_VM_MIPS_VZ type is not available");
  1133. return -1;
  1134. }
  1135. void kvm_arch_accel_class_init(ObjectClass *oc)
  1136. {
  1137. }