2
0

kvm-all.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399
  1. /*
  2. * QEMU KVM support
  3. *
  4. * Copyright IBM, Corp. 2008
  5. * Red Hat, Inc. 2008
  6. *
  7. * Authors:
  8. * Anthony Liguori <aliguori@us.ibm.com>
  9. * Glauber Costa <gcosta@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. *
  14. */
  15. #include <sys/types.h>
  16. #include <sys/ioctl.h>
  17. #include <sys/mman.h>
  18. #include <stdarg.h>
  19. #include <linux/kvm.h>
  20. #include "qemu-common.h"
  21. #include "qemu-barrier.h"
  22. #include "sysemu.h"
  23. #include "hw/hw.h"
  24. #include "gdbstub.h"
  25. #include "kvm.h"
  26. #include "bswap.h"
  27. /* This check must be after config-host.h is included */
  28. #ifdef CONFIG_EVENTFD
  29. #include <sys/eventfd.h>
  30. #endif
  31. /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
  32. #define PAGE_SIZE TARGET_PAGE_SIZE
  33. //#define DEBUG_KVM
  34. #ifdef DEBUG_KVM
  35. #define DPRINTF(fmt, ...) \
  36. do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  37. #else
  38. #define DPRINTF(fmt, ...) \
  39. do { } while (0)
  40. #endif
  41. typedef struct KVMSlot
  42. {
  43. target_phys_addr_t start_addr;
  44. ram_addr_t memory_size;
  45. ram_addr_t phys_offset;
  46. int slot;
  47. int flags;
  48. } KVMSlot;
  49. typedef struct kvm_dirty_log KVMDirtyLog;
  50. struct KVMState
  51. {
  52. KVMSlot slots[32];
  53. int fd;
  54. int vmfd;
  55. int coalesced_mmio;
  56. struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
  57. bool coalesced_flush_in_progress;
  58. int broken_set_mem_region;
  59. int migration_log;
  60. int vcpu_events;
  61. int robust_singlestep;
  62. int debugregs;
  63. #ifdef KVM_CAP_SET_GUEST_DEBUG
  64. struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
  65. #endif
  66. int irqchip_in_kernel;
  67. int pit_in_kernel;
  68. int xsave, xcrs;
  69. int many_ioeventfds;
  70. };
  71. KVMState *kvm_state;
  72. static const KVMCapabilityInfo kvm_required_capabilites[] = {
  73. KVM_CAP_INFO(USER_MEMORY),
  74. KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
  75. KVM_CAP_LAST_INFO
  76. };
  77. static KVMSlot *kvm_alloc_slot(KVMState *s)
  78. {
  79. int i;
  80. for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  81. if (s->slots[i].memory_size == 0) {
  82. return &s->slots[i];
  83. }
  84. }
  85. fprintf(stderr, "%s: no free slot available\n", __func__);
  86. abort();
  87. }
  88. static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
  89. target_phys_addr_t start_addr,
  90. target_phys_addr_t end_addr)
  91. {
  92. int i;
  93. for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  94. KVMSlot *mem = &s->slots[i];
  95. if (start_addr == mem->start_addr &&
  96. end_addr == mem->start_addr + mem->memory_size) {
  97. return mem;
  98. }
  99. }
  100. return NULL;
  101. }
  102. /*
  103. * Find overlapping slot with lowest start address
  104. */
  105. static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
  106. target_phys_addr_t start_addr,
  107. target_phys_addr_t end_addr)
  108. {
  109. KVMSlot *found = NULL;
  110. int i;
  111. for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  112. KVMSlot *mem = &s->slots[i];
  113. if (mem->memory_size == 0 ||
  114. (found && found->start_addr < mem->start_addr)) {
  115. continue;
  116. }
  117. if (end_addr > mem->start_addr &&
  118. start_addr < mem->start_addr + mem->memory_size) {
  119. found = mem;
  120. }
  121. }
  122. return found;
  123. }
  124. int kvm_physical_memory_addr_from_ram(KVMState *s, ram_addr_t ram_addr,
  125. target_phys_addr_t *phys_addr)
  126. {
  127. int i;
  128. for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  129. KVMSlot *mem = &s->slots[i];
  130. if (ram_addr >= mem->phys_offset &&
  131. ram_addr < mem->phys_offset + mem->memory_size) {
  132. *phys_addr = mem->start_addr + (ram_addr - mem->phys_offset);
  133. return 1;
  134. }
  135. }
  136. return 0;
  137. }
  138. static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
  139. {
  140. struct kvm_userspace_memory_region mem;
  141. mem.slot = slot->slot;
  142. mem.guest_phys_addr = slot->start_addr;
  143. mem.memory_size = slot->memory_size;
  144. mem.userspace_addr = (unsigned long)qemu_safe_ram_ptr(slot->phys_offset);
  145. mem.flags = slot->flags;
  146. if (s->migration_log) {
  147. mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
  148. }
  149. return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
  150. }
  151. static void kvm_reset_vcpu(void *opaque)
  152. {
  153. CPUState *env = opaque;
  154. kvm_arch_reset_vcpu(env);
  155. }
  156. int kvm_irqchip_in_kernel(void)
  157. {
  158. return kvm_state->irqchip_in_kernel;
  159. }
  160. int kvm_pit_in_kernel(void)
  161. {
  162. return kvm_state->pit_in_kernel;
  163. }
  164. int kvm_init_vcpu(CPUState *env)
  165. {
  166. KVMState *s = kvm_state;
  167. long mmap_size;
  168. int ret;
  169. DPRINTF("kvm_init_vcpu\n");
  170. ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
  171. if (ret < 0) {
  172. DPRINTF("kvm_create_vcpu failed\n");
  173. goto err;
  174. }
  175. env->kvm_fd = ret;
  176. env->kvm_state = s;
  177. env->kvm_vcpu_dirty = 1;
  178. mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
  179. if (mmap_size < 0) {
  180. ret = mmap_size;
  181. DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
  182. goto err;
  183. }
  184. env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
  185. env->kvm_fd, 0);
  186. if (env->kvm_run == MAP_FAILED) {
  187. ret = -errno;
  188. DPRINTF("mmap'ing vcpu state failed\n");
  189. goto err;
  190. }
  191. if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
  192. s->coalesced_mmio_ring =
  193. (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
  194. }
  195. ret = kvm_arch_init_vcpu(env);
  196. if (ret == 0) {
  197. qemu_register_reset(kvm_reset_vcpu, env);
  198. kvm_arch_reset_vcpu(env);
  199. }
  200. err:
  201. return ret;
  202. }
  203. /*
  204. * dirty pages logging control
  205. */
  206. static int kvm_mem_flags(KVMState *s, bool log_dirty)
  207. {
  208. return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
  209. }
  210. static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
  211. {
  212. KVMState *s = kvm_state;
  213. int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
  214. int old_flags;
  215. old_flags = mem->flags;
  216. flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
  217. mem->flags = flags;
  218. /* If nothing changed effectively, no need to issue ioctl */
  219. if (s->migration_log) {
  220. flags |= KVM_MEM_LOG_DIRTY_PAGES;
  221. }
  222. if (flags == old_flags) {
  223. return 0;
  224. }
  225. return kvm_set_user_memory_region(s, mem);
  226. }
  227. static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
  228. ram_addr_t size, bool log_dirty)
  229. {
  230. KVMState *s = kvm_state;
  231. KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
  232. if (mem == NULL) {
  233. fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
  234. TARGET_FMT_plx "\n", __func__, phys_addr,
  235. (target_phys_addr_t)(phys_addr + size - 1));
  236. return -EINVAL;
  237. }
  238. return kvm_slot_dirty_pages_log_change(mem, log_dirty);
  239. }
  240. static int kvm_log_start(CPUPhysMemoryClient *client,
  241. target_phys_addr_t phys_addr, ram_addr_t size)
  242. {
  243. return kvm_dirty_pages_log_change(phys_addr, size, true);
  244. }
  245. static int kvm_log_stop(CPUPhysMemoryClient *client,
  246. target_phys_addr_t phys_addr, ram_addr_t size)
  247. {
  248. return kvm_dirty_pages_log_change(phys_addr, size, false);
  249. }
  250. static int kvm_set_migration_log(int enable)
  251. {
  252. KVMState *s = kvm_state;
  253. KVMSlot *mem;
  254. int i, err;
  255. s->migration_log = enable;
  256. for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  257. mem = &s->slots[i];
  258. if (!mem->memory_size) {
  259. continue;
  260. }
  261. if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
  262. continue;
  263. }
  264. err = kvm_set_user_memory_region(s, mem);
  265. if (err) {
  266. return err;
  267. }
  268. }
  269. return 0;
  270. }
  271. /* get kvm's dirty pages bitmap and update qemu's */
  272. static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
  273. unsigned long *bitmap,
  274. unsigned long offset,
  275. unsigned long mem_size)
  276. {
  277. unsigned int i, j;
  278. unsigned long page_number, addr, addr1, c;
  279. ram_addr_t ram_addr;
  280. unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
  281. HOST_LONG_BITS;
  282. /*
  283. * bitmap-traveling is faster than memory-traveling (for addr...)
  284. * especially when most of the memory is not dirty.
  285. */
  286. for (i = 0; i < len; i++) {
  287. if (bitmap[i] != 0) {
  288. c = leul_to_cpu(bitmap[i]);
  289. do {
  290. j = ffsl(c) - 1;
  291. c &= ~(1ul << j);
  292. page_number = i * HOST_LONG_BITS + j;
  293. addr1 = page_number * TARGET_PAGE_SIZE;
  294. addr = offset + addr1;
  295. ram_addr = cpu_get_physical_page_desc(addr);
  296. cpu_physical_memory_set_dirty(ram_addr);
  297. } while (c != 0);
  298. }
  299. }
  300. return 0;
  301. }
  302. #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
  303. /**
  304. * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
  305. * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
  306. * This means all bits are set to dirty.
  307. *
  308. * @start_add: start of logged region.
  309. * @end_addr: end of logged region.
  310. */
  311. static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
  312. target_phys_addr_t end_addr)
  313. {
  314. KVMState *s = kvm_state;
  315. unsigned long size, allocated_size = 0;
  316. KVMDirtyLog d;
  317. KVMSlot *mem;
  318. int ret = 0;
  319. d.dirty_bitmap = NULL;
  320. while (start_addr < end_addr) {
  321. mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
  322. if (mem == NULL) {
  323. break;
  324. }
  325. /* XXX bad kernel interface alert
  326. * For dirty bitmap, kernel allocates array of size aligned to
  327. * bits-per-long. But for case when the kernel is 64bits and
  328. * the userspace is 32bits, userspace can't align to the same
  329. * bits-per-long, since sizeof(long) is different between kernel
  330. * and user space. This way, userspace will provide buffer which
  331. * may be 4 bytes less than the kernel will use, resulting in
  332. * userspace memory corruption (which is not detectable by valgrind
  333. * too, in most cases).
  334. * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
  335. * a hope that sizeof(long) wont become >8 any time soon.
  336. */
  337. size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
  338. /*HOST_LONG_BITS*/ 64) / 8;
  339. if (!d.dirty_bitmap) {
  340. d.dirty_bitmap = g_malloc(size);
  341. } else if (size > allocated_size) {
  342. d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
  343. }
  344. allocated_size = size;
  345. memset(d.dirty_bitmap, 0, allocated_size);
  346. d.slot = mem->slot;
  347. if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
  348. DPRINTF("ioctl failed %d\n", errno);
  349. ret = -1;
  350. break;
  351. }
  352. kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap,
  353. mem->start_addr, mem->memory_size);
  354. start_addr = mem->start_addr + mem->memory_size;
  355. }
  356. g_free(d.dirty_bitmap);
  357. return ret;
  358. }
  359. int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
  360. {
  361. int ret = -ENOSYS;
  362. KVMState *s = kvm_state;
  363. if (s->coalesced_mmio) {
  364. struct kvm_coalesced_mmio_zone zone;
  365. zone.addr = start;
  366. zone.size = size;
  367. ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
  368. }
  369. return ret;
  370. }
  371. int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
  372. {
  373. int ret = -ENOSYS;
  374. KVMState *s = kvm_state;
  375. if (s->coalesced_mmio) {
  376. struct kvm_coalesced_mmio_zone zone;
  377. zone.addr = start;
  378. zone.size = size;
  379. ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
  380. }
  381. return ret;
  382. }
  383. int kvm_check_extension(KVMState *s, unsigned int extension)
  384. {
  385. int ret;
  386. ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
  387. if (ret < 0) {
  388. ret = 0;
  389. }
  390. return ret;
  391. }
  392. static int kvm_check_many_ioeventfds(void)
  393. {
  394. /* Userspace can use ioeventfd for io notification. This requires a host
  395. * that supports eventfd(2) and an I/O thread; since eventfd does not
  396. * support SIGIO it cannot interrupt the vcpu.
  397. *
  398. * Older kernels have a 6 device limit on the KVM io bus. Find out so we
  399. * can avoid creating too many ioeventfds.
  400. */
  401. #if defined(CONFIG_EVENTFD)
  402. int ioeventfds[7];
  403. int i, ret = 0;
  404. for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
  405. ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
  406. if (ioeventfds[i] < 0) {
  407. break;
  408. }
  409. ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
  410. if (ret < 0) {
  411. close(ioeventfds[i]);
  412. break;
  413. }
  414. }
  415. /* Decide whether many devices are supported or not */
  416. ret = i == ARRAY_SIZE(ioeventfds);
  417. while (i-- > 0) {
  418. kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
  419. close(ioeventfds[i]);
  420. }
  421. return ret;
  422. #else
  423. return 0;
  424. #endif
  425. }
  426. static const KVMCapabilityInfo *
  427. kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
  428. {
  429. while (list->name) {
  430. if (!kvm_check_extension(s, list->value)) {
  431. return list;
  432. }
  433. list++;
  434. }
  435. return NULL;
  436. }
  437. static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
  438. ram_addr_t phys_offset, bool log_dirty)
  439. {
  440. KVMState *s = kvm_state;
  441. ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
  442. KVMSlot *mem, old;
  443. int err;
  444. /* kvm works in page size chunks, but the function may be called
  445. with sub-page size and unaligned start address. */
  446. size = TARGET_PAGE_ALIGN(size);
  447. start_addr = TARGET_PAGE_ALIGN(start_addr);
  448. /* KVM does not support read-only slots */
  449. phys_offset &= ~IO_MEM_ROM;
  450. while (1) {
  451. mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
  452. if (!mem) {
  453. break;
  454. }
  455. if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr &&
  456. (start_addr + size <= mem->start_addr + mem->memory_size) &&
  457. (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) {
  458. /* The new slot fits into the existing one and comes with
  459. * identical parameters - update flags and done. */
  460. kvm_slot_dirty_pages_log_change(mem, log_dirty);
  461. return;
  462. }
  463. old = *mem;
  464. /* unregister the overlapping slot */
  465. mem->memory_size = 0;
  466. err = kvm_set_user_memory_region(s, mem);
  467. if (err) {
  468. fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
  469. __func__, strerror(-err));
  470. abort();
  471. }
  472. /* Workaround for older KVM versions: we can't join slots, even not by
  473. * unregistering the previous ones and then registering the larger
  474. * slot. We have to maintain the existing fragmentation. Sigh.
  475. *
  476. * This workaround assumes that the new slot starts at the same
  477. * address as the first existing one. If not or if some overlapping
  478. * slot comes around later, we will fail (not seen in practice so far)
  479. * - and actually require a recent KVM version. */
  480. if (s->broken_set_mem_region &&
  481. old.start_addr == start_addr && old.memory_size < size &&
  482. flags < IO_MEM_UNASSIGNED) {
  483. mem = kvm_alloc_slot(s);
  484. mem->memory_size = old.memory_size;
  485. mem->start_addr = old.start_addr;
  486. mem->phys_offset = old.phys_offset;
  487. mem->flags = kvm_mem_flags(s, log_dirty);
  488. err = kvm_set_user_memory_region(s, mem);
  489. if (err) {
  490. fprintf(stderr, "%s: error updating slot: %s\n", __func__,
  491. strerror(-err));
  492. abort();
  493. }
  494. start_addr += old.memory_size;
  495. phys_offset += old.memory_size;
  496. size -= old.memory_size;
  497. continue;
  498. }
  499. /* register prefix slot */
  500. if (old.start_addr < start_addr) {
  501. mem = kvm_alloc_slot(s);
  502. mem->memory_size = start_addr - old.start_addr;
  503. mem->start_addr = old.start_addr;
  504. mem->phys_offset = old.phys_offset;
  505. mem->flags = kvm_mem_flags(s, log_dirty);
  506. err = kvm_set_user_memory_region(s, mem);
  507. if (err) {
  508. fprintf(stderr, "%s: error registering prefix slot: %s\n",
  509. __func__, strerror(-err));
  510. #ifdef TARGET_PPC
  511. fprintf(stderr, "%s: This is probably because your kernel's " \
  512. "PAGE_SIZE is too big. Please try to use 4k " \
  513. "PAGE_SIZE!\n", __func__);
  514. #endif
  515. abort();
  516. }
  517. }
  518. /* register suffix slot */
  519. if (old.start_addr + old.memory_size > start_addr + size) {
  520. ram_addr_t size_delta;
  521. mem = kvm_alloc_slot(s);
  522. mem->start_addr = start_addr + size;
  523. size_delta = mem->start_addr - old.start_addr;
  524. mem->memory_size = old.memory_size - size_delta;
  525. mem->phys_offset = old.phys_offset + size_delta;
  526. mem->flags = kvm_mem_flags(s, log_dirty);
  527. err = kvm_set_user_memory_region(s, mem);
  528. if (err) {
  529. fprintf(stderr, "%s: error registering suffix slot: %s\n",
  530. __func__, strerror(-err));
  531. abort();
  532. }
  533. }
  534. }
  535. /* in case the KVM bug workaround already "consumed" the new slot */
  536. if (!size) {
  537. return;
  538. }
  539. /* KVM does not need to know about this memory */
  540. if (flags >= IO_MEM_UNASSIGNED) {
  541. return;
  542. }
  543. mem = kvm_alloc_slot(s);
  544. mem->memory_size = size;
  545. mem->start_addr = start_addr;
  546. mem->phys_offset = phys_offset;
  547. mem->flags = kvm_mem_flags(s, log_dirty);
  548. err = kvm_set_user_memory_region(s, mem);
  549. if (err) {
  550. fprintf(stderr, "%s: error registering slot: %s\n", __func__,
  551. strerror(-err));
  552. abort();
  553. }
  554. }
  555. static void kvm_client_set_memory(struct CPUPhysMemoryClient *client,
  556. target_phys_addr_t start_addr,
  557. ram_addr_t size, ram_addr_t phys_offset,
  558. bool log_dirty)
  559. {
  560. kvm_set_phys_mem(start_addr, size, phys_offset, log_dirty);
  561. }
  562. static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client,
  563. target_phys_addr_t start_addr,
  564. target_phys_addr_t end_addr)
  565. {
  566. return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
  567. }
  568. static int kvm_client_migration_log(struct CPUPhysMemoryClient *client,
  569. int enable)
  570. {
  571. return kvm_set_migration_log(enable);
  572. }
  573. static CPUPhysMemoryClient kvm_cpu_phys_memory_client = {
  574. .set_memory = kvm_client_set_memory,
  575. .sync_dirty_bitmap = kvm_client_sync_dirty_bitmap,
  576. .migration_log = kvm_client_migration_log,
  577. .log_start = kvm_log_start,
  578. .log_stop = kvm_log_stop,
  579. };
  580. static void kvm_handle_interrupt(CPUState *env, int mask)
  581. {
  582. env->interrupt_request |= mask;
  583. if (!qemu_cpu_is_self(env)) {
  584. qemu_cpu_kick(env);
  585. }
  586. }
  587. int kvm_init(void)
  588. {
  589. static const char upgrade_note[] =
  590. "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
  591. "(see http://sourceforge.net/projects/kvm).\n";
  592. KVMState *s;
  593. const KVMCapabilityInfo *missing_cap;
  594. int ret;
  595. int i;
  596. s = g_malloc0(sizeof(KVMState));
  597. #ifdef KVM_CAP_SET_GUEST_DEBUG
  598. QTAILQ_INIT(&s->kvm_sw_breakpoints);
  599. #endif
  600. for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
  601. s->slots[i].slot = i;
  602. }
  603. s->vmfd = -1;
  604. s->fd = qemu_open("/dev/kvm", O_RDWR);
  605. if (s->fd == -1) {
  606. fprintf(stderr, "Could not access KVM kernel module: %m\n");
  607. ret = -errno;
  608. goto err;
  609. }
  610. ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
  611. if (ret < KVM_API_VERSION) {
  612. if (ret > 0) {
  613. ret = -EINVAL;
  614. }
  615. fprintf(stderr, "kvm version too old\n");
  616. goto err;
  617. }
  618. if (ret > KVM_API_VERSION) {
  619. ret = -EINVAL;
  620. fprintf(stderr, "kvm version not supported\n");
  621. goto err;
  622. }
  623. s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
  624. if (s->vmfd < 0) {
  625. #ifdef TARGET_S390X
  626. fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
  627. "your host kernel command line\n");
  628. #endif
  629. ret = s->vmfd;
  630. goto err;
  631. }
  632. missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
  633. if (!missing_cap) {
  634. missing_cap =
  635. kvm_check_extension_list(s, kvm_arch_required_capabilities);
  636. }
  637. if (missing_cap) {
  638. ret = -EINVAL;
  639. fprintf(stderr, "kvm does not support %s\n%s",
  640. missing_cap->name, upgrade_note);
  641. goto err;
  642. }
  643. s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
  644. s->broken_set_mem_region = 1;
  645. ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
  646. if (ret > 0) {
  647. s->broken_set_mem_region = 0;
  648. }
  649. #ifdef KVM_CAP_VCPU_EVENTS
  650. s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
  651. #endif
  652. s->robust_singlestep =
  653. kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
  654. #ifdef KVM_CAP_DEBUGREGS
  655. s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
  656. #endif
  657. #ifdef KVM_CAP_XSAVE
  658. s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
  659. #endif
  660. #ifdef KVM_CAP_XCRS
  661. s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
  662. #endif
  663. ret = kvm_arch_init(s);
  664. if (ret < 0) {
  665. goto err;
  666. }
  667. kvm_state = s;
  668. cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client);
  669. s->many_ioeventfds = kvm_check_many_ioeventfds();
  670. cpu_interrupt_handler = kvm_handle_interrupt;
  671. return 0;
  672. err:
  673. if (s) {
  674. if (s->vmfd >= 0) {
  675. close(s->vmfd);
  676. }
  677. if (s->fd != -1) {
  678. close(s->fd);
  679. }
  680. }
  681. g_free(s);
  682. return ret;
  683. }
  684. static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
  685. uint32_t count)
  686. {
  687. int i;
  688. uint8_t *ptr = data;
  689. for (i = 0; i < count; i++) {
  690. if (direction == KVM_EXIT_IO_IN) {
  691. switch (size) {
  692. case 1:
  693. stb_p(ptr, cpu_inb(port));
  694. break;
  695. case 2:
  696. stw_p(ptr, cpu_inw(port));
  697. break;
  698. case 4:
  699. stl_p(ptr, cpu_inl(port));
  700. break;
  701. }
  702. } else {
  703. switch (size) {
  704. case 1:
  705. cpu_outb(port, ldub_p(ptr));
  706. break;
  707. case 2:
  708. cpu_outw(port, lduw_p(ptr));
  709. break;
  710. case 4:
  711. cpu_outl(port, ldl_p(ptr));
  712. break;
  713. }
  714. }
  715. ptr += size;
  716. }
  717. }
  718. static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
  719. {
  720. fprintf(stderr, "KVM internal error.");
  721. if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
  722. int i;
  723. fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
  724. for (i = 0; i < run->internal.ndata; ++i) {
  725. fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
  726. i, (uint64_t)run->internal.data[i]);
  727. }
  728. } else {
  729. fprintf(stderr, "\n");
  730. }
  731. if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
  732. fprintf(stderr, "emulation failure\n");
  733. if (!kvm_arch_stop_on_emulation_error(env)) {
  734. cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
  735. return EXCP_INTERRUPT;
  736. }
  737. }
  738. /* FIXME: Should trigger a qmp message to let management know
  739. * something went wrong.
  740. */
  741. return -1;
  742. }
  743. void kvm_flush_coalesced_mmio_buffer(void)
  744. {
  745. KVMState *s = kvm_state;
  746. if (s->coalesced_flush_in_progress) {
  747. return;
  748. }
  749. s->coalesced_flush_in_progress = true;
  750. if (s->coalesced_mmio_ring) {
  751. struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
  752. while (ring->first != ring->last) {
  753. struct kvm_coalesced_mmio *ent;
  754. ent = &ring->coalesced_mmio[ring->first];
  755. cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
  756. smp_wmb();
  757. ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
  758. }
  759. }
  760. s->coalesced_flush_in_progress = false;
  761. }
  762. static void do_kvm_cpu_synchronize_state(void *_env)
  763. {
  764. CPUState *env = _env;
  765. if (!env->kvm_vcpu_dirty) {
  766. kvm_arch_get_registers(env);
  767. env->kvm_vcpu_dirty = 1;
  768. }
  769. }
  770. void kvm_cpu_synchronize_state(CPUState *env)
  771. {
  772. if (!env->kvm_vcpu_dirty) {
  773. run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
  774. }
  775. }
  776. void kvm_cpu_synchronize_post_reset(CPUState *env)
  777. {
  778. kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
  779. env->kvm_vcpu_dirty = 0;
  780. }
  781. void kvm_cpu_synchronize_post_init(CPUState *env)
  782. {
  783. kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
  784. env->kvm_vcpu_dirty = 0;
  785. }
  786. int kvm_cpu_exec(CPUState *env)
  787. {
  788. struct kvm_run *run = env->kvm_run;
  789. int ret, run_ret;
  790. DPRINTF("kvm_cpu_exec()\n");
  791. if (kvm_arch_process_async_events(env)) {
  792. env->exit_request = 0;
  793. return EXCP_HLT;
  794. }
  795. cpu_single_env = env;
  796. do {
  797. if (env->kvm_vcpu_dirty) {
  798. kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
  799. env->kvm_vcpu_dirty = 0;
  800. }
  801. kvm_arch_pre_run(env, run);
  802. if (env->exit_request) {
  803. DPRINTF("interrupt exit requested\n");
  804. /*
  805. * KVM requires us to reenter the kernel after IO exits to complete
  806. * instruction emulation. This self-signal will ensure that we
  807. * leave ASAP again.
  808. */
  809. qemu_cpu_kick_self();
  810. }
  811. cpu_single_env = NULL;
  812. qemu_mutex_unlock_iothread();
  813. run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
  814. qemu_mutex_lock_iothread();
  815. cpu_single_env = env;
  816. kvm_arch_post_run(env, run);
  817. kvm_flush_coalesced_mmio_buffer();
  818. if (run_ret < 0) {
  819. if (run_ret == -EINTR || run_ret == -EAGAIN) {
  820. DPRINTF("io window exit\n");
  821. ret = EXCP_INTERRUPT;
  822. break;
  823. }
  824. DPRINTF("kvm run failed %s\n", strerror(-run_ret));
  825. abort();
  826. }
  827. switch (run->exit_reason) {
  828. case KVM_EXIT_IO:
  829. DPRINTF("handle_io\n");
  830. kvm_handle_io(run->io.port,
  831. (uint8_t *)run + run->io.data_offset,
  832. run->io.direction,
  833. run->io.size,
  834. run->io.count);
  835. ret = 0;
  836. break;
  837. case KVM_EXIT_MMIO:
  838. DPRINTF("handle_mmio\n");
  839. cpu_physical_memory_rw(run->mmio.phys_addr,
  840. run->mmio.data,
  841. run->mmio.len,
  842. run->mmio.is_write);
  843. ret = 0;
  844. break;
  845. case KVM_EXIT_IRQ_WINDOW_OPEN:
  846. DPRINTF("irq_window_open\n");
  847. ret = EXCP_INTERRUPT;
  848. break;
  849. case KVM_EXIT_SHUTDOWN:
  850. DPRINTF("shutdown\n");
  851. qemu_system_reset_request();
  852. ret = EXCP_INTERRUPT;
  853. break;
  854. case KVM_EXIT_UNKNOWN:
  855. fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
  856. (uint64_t)run->hw.hardware_exit_reason);
  857. ret = -1;
  858. break;
  859. case KVM_EXIT_INTERNAL_ERROR:
  860. ret = kvm_handle_internal_error(env, run);
  861. break;
  862. default:
  863. DPRINTF("kvm_arch_handle_exit\n");
  864. ret = kvm_arch_handle_exit(env, run);
  865. break;
  866. }
  867. } while (ret == 0);
  868. if (ret < 0) {
  869. cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
  870. vm_stop(RUN_STATE_INTERNAL_ERROR);
  871. }
  872. env->exit_request = 0;
  873. cpu_single_env = NULL;
  874. return ret;
  875. }
  876. int kvm_ioctl(KVMState *s, int type, ...)
  877. {
  878. int ret;
  879. void *arg;
  880. va_list ap;
  881. va_start(ap, type);
  882. arg = va_arg(ap, void *);
  883. va_end(ap);
  884. ret = ioctl(s->fd, type, arg);
  885. if (ret == -1) {
  886. ret = -errno;
  887. }
  888. return ret;
  889. }
  890. int kvm_vm_ioctl(KVMState *s, int type, ...)
  891. {
  892. int ret;
  893. void *arg;
  894. va_list ap;
  895. va_start(ap, type);
  896. arg = va_arg(ap, void *);
  897. va_end(ap);
  898. ret = ioctl(s->vmfd, type, arg);
  899. if (ret == -1) {
  900. ret = -errno;
  901. }
  902. return ret;
  903. }
  904. int kvm_vcpu_ioctl(CPUState *env, int type, ...)
  905. {
  906. int ret;
  907. void *arg;
  908. va_list ap;
  909. va_start(ap, type);
  910. arg = va_arg(ap, void *);
  911. va_end(ap);
  912. ret = ioctl(env->kvm_fd, type, arg);
  913. if (ret == -1) {
  914. ret = -errno;
  915. }
  916. return ret;
  917. }
  918. int kvm_has_sync_mmu(void)
  919. {
  920. return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
  921. }
  922. int kvm_has_vcpu_events(void)
  923. {
  924. return kvm_state->vcpu_events;
  925. }
  926. int kvm_has_robust_singlestep(void)
  927. {
  928. return kvm_state->robust_singlestep;
  929. }
  930. int kvm_has_debugregs(void)
  931. {
  932. return kvm_state->debugregs;
  933. }
  934. int kvm_has_xsave(void)
  935. {
  936. return kvm_state->xsave;
  937. }
  938. int kvm_has_xcrs(void)
  939. {
  940. return kvm_state->xcrs;
  941. }
  942. int kvm_has_many_ioeventfds(void)
  943. {
  944. if (!kvm_enabled()) {
  945. return 0;
  946. }
  947. return kvm_state->many_ioeventfds;
  948. }
  949. void kvm_setup_guest_memory(void *start, size_t size)
  950. {
  951. if (!kvm_has_sync_mmu()) {
  952. int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
  953. if (ret) {
  954. perror("qemu_madvise");
  955. fprintf(stderr,
  956. "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
  957. exit(1);
  958. }
  959. }
  960. }
  961. #ifdef KVM_CAP_SET_GUEST_DEBUG
  962. struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
  963. target_ulong pc)
  964. {
  965. struct kvm_sw_breakpoint *bp;
  966. QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
  967. if (bp->pc == pc) {
  968. return bp;
  969. }
  970. }
  971. return NULL;
  972. }
  973. int kvm_sw_breakpoints_active(CPUState *env)
  974. {
  975. return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
  976. }
  977. struct kvm_set_guest_debug_data {
  978. struct kvm_guest_debug dbg;
  979. CPUState *env;
  980. int err;
  981. };
  982. static void kvm_invoke_set_guest_debug(void *data)
  983. {
  984. struct kvm_set_guest_debug_data *dbg_data = data;
  985. CPUState *env = dbg_data->env;
  986. dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
  987. }
  988. int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
  989. {
  990. struct kvm_set_guest_debug_data data;
  991. data.dbg.control = reinject_trap;
  992. if (env->singlestep_enabled) {
  993. data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
  994. }
  995. kvm_arch_update_guest_debug(env, &data.dbg);
  996. data.env = env;
  997. run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
  998. return data.err;
  999. }
  1000. int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
  1001. target_ulong len, int type)
  1002. {
  1003. struct kvm_sw_breakpoint *bp;
  1004. CPUState *env;
  1005. int err;
  1006. if (type == GDB_BREAKPOINT_SW) {
  1007. bp = kvm_find_sw_breakpoint(current_env, addr);
  1008. if (bp) {
  1009. bp->use_count++;
  1010. return 0;
  1011. }
  1012. bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
  1013. if (!bp) {
  1014. return -ENOMEM;
  1015. }
  1016. bp->pc = addr;
  1017. bp->use_count = 1;
  1018. err = kvm_arch_insert_sw_breakpoint(current_env, bp);
  1019. if (err) {
  1020. g_free(bp);
  1021. return err;
  1022. }
  1023. QTAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
  1024. bp, entry);
  1025. } else {
  1026. err = kvm_arch_insert_hw_breakpoint(addr, len, type);
  1027. if (err) {
  1028. return err;
  1029. }
  1030. }
  1031. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  1032. err = kvm_update_guest_debug(env, 0);
  1033. if (err) {
  1034. return err;
  1035. }
  1036. }
  1037. return 0;
  1038. }
  1039. int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
  1040. target_ulong len, int type)
  1041. {
  1042. struct kvm_sw_breakpoint *bp;
  1043. CPUState *env;
  1044. int err;
  1045. if (type == GDB_BREAKPOINT_SW) {
  1046. bp = kvm_find_sw_breakpoint(current_env, addr);
  1047. if (!bp) {
  1048. return -ENOENT;
  1049. }
  1050. if (bp->use_count > 1) {
  1051. bp->use_count--;
  1052. return 0;
  1053. }
  1054. err = kvm_arch_remove_sw_breakpoint(current_env, bp);
  1055. if (err) {
  1056. return err;
  1057. }
  1058. QTAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
  1059. g_free(bp);
  1060. } else {
  1061. err = kvm_arch_remove_hw_breakpoint(addr, len, type);
  1062. if (err) {
  1063. return err;
  1064. }
  1065. }
  1066. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  1067. err = kvm_update_guest_debug(env, 0);
  1068. if (err) {
  1069. return err;
  1070. }
  1071. }
  1072. return 0;
  1073. }
  1074. void kvm_remove_all_breakpoints(CPUState *current_env)
  1075. {
  1076. struct kvm_sw_breakpoint *bp, *next;
  1077. KVMState *s = current_env->kvm_state;
  1078. CPUState *env;
  1079. QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
  1080. if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
  1081. /* Try harder to find a CPU that currently sees the breakpoint. */
  1082. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  1083. if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
  1084. break;
  1085. }
  1086. }
  1087. }
  1088. }
  1089. kvm_arch_remove_all_hw_breakpoints();
  1090. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  1091. kvm_update_guest_debug(env, 0);
  1092. }
  1093. }
  1094. #else /* !KVM_CAP_SET_GUEST_DEBUG */
  1095. int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
  1096. {
  1097. return -EINVAL;
  1098. }
  1099. int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
  1100. target_ulong len, int type)
  1101. {
  1102. return -EINVAL;
  1103. }
  1104. int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
  1105. target_ulong len, int type)
  1106. {
  1107. return -EINVAL;
  1108. }
  1109. void kvm_remove_all_breakpoints(CPUState *current_env)
  1110. {
  1111. }
  1112. #endif /* !KVM_CAP_SET_GUEST_DEBUG */
  1113. int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
  1114. {
  1115. struct kvm_signal_mask *sigmask;
  1116. int r;
  1117. if (!sigset) {
  1118. return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
  1119. }
  1120. sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
  1121. sigmask->len = 8;
  1122. memcpy(sigmask->sigset, sigset, sizeof(*sigset));
  1123. r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
  1124. g_free(sigmask);
  1125. return r;
  1126. }
  1127. int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign)
  1128. {
  1129. int ret;
  1130. struct kvm_ioeventfd iofd;
  1131. iofd.datamatch = val;
  1132. iofd.addr = addr;
  1133. iofd.len = 4;
  1134. iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
  1135. iofd.fd = fd;
  1136. if (!kvm_enabled()) {
  1137. return -ENOSYS;
  1138. }
  1139. if (!assign) {
  1140. iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
  1141. }
  1142. ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
  1143. if (ret < 0) {
  1144. return -errno;
  1145. }
  1146. return 0;
  1147. }
  1148. int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
  1149. {
  1150. struct kvm_ioeventfd kick = {
  1151. .datamatch = val,
  1152. .addr = addr,
  1153. .len = 2,
  1154. .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
  1155. .fd = fd,
  1156. };
  1157. int r;
  1158. if (!kvm_enabled()) {
  1159. return -ENOSYS;
  1160. }
  1161. if (!assign) {
  1162. kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
  1163. }
  1164. r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
  1165. if (r < 0) {
  1166. return r;
  1167. }
  1168. return 0;
  1169. }
  1170. int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr)
  1171. {
  1172. return kvm_arch_on_sigbus_vcpu(env, code, addr);
  1173. }
  1174. int kvm_on_sigbus(int code, void *addr)
  1175. {
  1176. return kvm_arch_on_sigbus(code, addr);
  1177. }