xen-hvm.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * Copyright (C) 2010 Citrix Ltd.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. *
  7. * Contributions after 2012-01-13 are licensed under the terms of the
  8. * GNU GPL, version 2 or (at your option) any later version.
  9. */
  10. #include "qemu/osdep.h"
  11. #include "qemu/units.h"
  12. #include "qemu/error-report.h"
  13. #include "qapi/error.h"
  14. #include "qapi/qapi-commands-migration.h"
  15. #include "trace.h"
  16. #include "hw/hw.h"
  17. #include "hw/i386/pc.h"
  18. #include "hw/irq.h"
  19. #include "hw/i386/apic-msidef.h"
  20. #include "hw/xen/xen-x86.h"
  21. #include "qemu/range.h"
  22. #include "hw/xen/xen-hvm-common.h"
  23. #include "hw/xen/arch_hvm.h"
  24. #include <xen/hvm/e820.h>
  25. #include "exec/target_page.h"
  26. #include "target/i386/cpu.h"
  27. #include "system/runstate.h"
  28. #include "system/xen-mapcache.h"
  29. #include "system/xen.h"
  30. static MemoryRegion ram_640k, ram_lo, ram_hi;
  31. static MemoryRegion *framebuffer;
  32. static bool xen_in_migration;
  33. /* Compatibility with older version */
  34. /*
  35. * This allows QEMU to build on a system that has Xen 4.5 or earlier installed.
  36. * This is here (not in hw/xen/xen_native.h) because xen/hvm/ioreq.h needs to
  37. * be included before this block and hw/xen/xen_native.h needs to be included
  38. * before xen/hvm/ioreq.h
  39. */
  40. #ifndef IOREQ_TYPE_VMWARE_PORT
  41. #define IOREQ_TYPE_VMWARE_PORT 3
  42. struct vmware_regs {
  43. uint32_t esi;
  44. uint32_t edi;
  45. uint32_t ebx;
  46. uint32_t ecx;
  47. uint32_t edx;
  48. };
  49. typedef struct vmware_regs vmware_regs_t;
  50. struct shared_vmport_iopage {
  51. struct vmware_regs vcpu_vmport_regs[1];
  52. };
  53. typedef struct shared_vmport_iopage shared_vmport_iopage_t;
  54. #endif
  55. static shared_vmport_iopage_t *shared_vmport_page;
  56. static QLIST_HEAD(, XenPhysmap) xen_physmap;
  57. static const XenPhysmap *log_for_dirtybit;
  58. /* Buffer used by xen_sync_dirty_bitmap */
  59. static unsigned long *dirty_bitmap;
  60. static Notifier suspend;
  61. static Notifier wakeup;
  62. /* Xen specific function for piix pci */
  63. int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
  64. {
  65. return irq_num + (PCI_SLOT(pci_dev->devfn) << 2);
  66. }
  67. void xen_intx_set_irq(void *opaque, int irq_num, int level)
  68. {
  69. xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2,
  70. irq_num & 3, level);
  71. }
  72. int xen_set_pci_link_route(uint8_t link, uint8_t irq)
  73. {
  74. return xendevicemodel_set_pci_link_route(xen_dmod, xen_domid, link, irq);
  75. }
  76. int xen_is_pirq_msi(uint32_t msi_data)
  77. {
  78. /* If vector is 0, the msi is remapped into a pirq, passed as
  79. * dest_id.
  80. */
  81. return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
  82. }
  83. void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
  84. {
  85. xen_inject_msi(xen_domid, addr, data);
  86. }
  87. static void xen_suspend_notifier(Notifier *notifier, void *data)
  88. {
  89. xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
  90. }
  91. /* Xen Interrupt Controller */
  92. static void xen_set_irq(void *opaque, int irq, int level)
  93. {
  94. xen_set_isa_irq_level(xen_domid, irq, level);
  95. }
  96. qemu_irq *xen_interrupt_controller_init(void)
  97. {
  98. return qemu_allocate_irqs(xen_set_irq, NULL, 16);
  99. }
  100. /* Memory Ops */
  101. static void xen_ram_init(PCMachineState *pcms,
  102. ram_addr_t ram_size, MemoryRegion **ram_memory_p)
  103. {
  104. X86MachineState *x86ms = X86_MACHINE(pcms);
  105. MemoryRegion *sysmem = get_system_memory();
  106. ram_addr_t block_len;
  107. uint64_t user_lowmem =
  108. object_property_get_uint(qdev_get_machine(),
  109. PC_MACHINE_MAX_RAM_BELOW_4G,
  110. &error_abort);
  111. /* Handle the machine opt max-ram-below-4g. It is basically doing
  112. * min(xen limit, user limit).
  113. */
  114. if (!user_lowmem) {
  115. user_lowmem = HVM_BELOW_4G_RAM_END; /* default */
  116. }
  117. if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
  118. user_lowmem = HVM_BELOW_4G_RAM_END;
  119. }
  120. if (ram_size >= user_lowmem) {
  121. x86ms->above_4g_mem_size = ram_size - user_lowmem;
  122. x86ms->below_4g_mem_size = user_lowmem;
  123. } else {
  124. x86ms->above_4g_mem_size = 0;
  125. x86ms->below_4g_mem_size = ram_size;
  126. }
  127. if (!x86ms->above_4g_mem_size) {
  128. block_len = ram_size;
  129. } else {
  130. /*
  131. * Xen does not allocate the memory continuously, it keeps a
  132. * hole of the size computed above or passed in.
  133. */
  134. block_len = (4 * GiB) + x86ms->above_4g_mem_size;
  135. }
  136. memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len,
  137. &error_fatal);
  138. *ram_memory_p = &xen_memory;
  139. memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
  140. &xen_memory, 0, 0xa0000);
  141. memory_region_add_subregion(sysmem, 0, &ram_640k);
  142. /* Skip of the VGA IO memory space, it will be registered later by the VGA
  143. * emulated device.
  144. *
  145. * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
  146. * the Options ROM, so it is registered here as RAM.
  147. */
  148. memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
  149. &xen_memory, 0xc0000,
  150. x86ms->below_4g_mem_size - 0xc0000);
  151. memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
  152. if (x86ms->above_4g_mem_size > 0) {
  153. memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
  154. &xen_memory, 0x100000000ULL,
  155. x86ms->above_4g_mem_size);
  156. memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
  157. }
  158. }
  159. static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size,
  160. int page_mask)
  161. {
  162. XenPhysmap *physmap = NULL;
  163. start_addr &= page_mask;
  164. QLIST_FOREACH(physmap, &xen_physmap, list) {
  165. if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
  166. return physmap;
  167. }
  168. }
  169. return NULL;
  170. }
  171. static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size,
  172. int page_mask)
  173. {
  174. hwaddr addr = phys_offset & page_mask;
  175. XenPhysmap *physmap = NULL;
  176. QLIST_FOREACH(physmap, &xen_physmap, list) {
  177. if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
  178. return physmap->start_addr + (phys_offset - physmap->phys_offset);
  179. }
  180. }
  181. return phys_offset;
  182. }
  183. #ifdef XEN_COMPAT_PHYSMAP
  184. static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
  185. {
  186. char path[80], value[17];
  187. snprintf(path, sizeof(path),
  188. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
  189. xen_domid, (uint64_t)physmap->phys_offset);
  190. snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr);
  191. if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
  192. return -1;
  193. }
  194. snprintf(path, sizeof(path),
  195. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
  196. xen_domid, (uint64_t)physmap->phys_offset);
  197. snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size);
  198. if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
  199. return -1;
  200. }
  201. if (physmap->name) {
  202. snprintf(path, sizeof(path),
  203. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
  204. xen_domid, (uint64_t)physmap->phys_offset);
  205. if (!xs_write(state->xenstore, 0, path,
  206. physmap->name, strlen(physmap->name))) {
  207. return -1;
  208. }
  209. }
  210. return 0;
  211. }
  212. #else
  213. static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
  214. {
  215. return 0;
  216. }
  217. #endif
  218. static int xen_add_to_physmap(XenIOState *state,
  219. hwaddr start_addr,
  220. ram_addr_t size,
  221. MemoryRegion *mr,
  222. hwaddr offset_within_region)
  223. {
  224. unsigned target_page_bits = qemu_target_page_bits();
  225. int page_size = qemu_target_page_size();
  226. int page_mask = -page_size;
  227. unsigned long nr_pages;
  228. int rc = 0;
  229. XenPhysmap *physmap = NULL;
  230. hwaddr pfn, start_gpfn;
  231. hwaddr phys_offset = memory_region_get_ram_addr(mr);
  232. const char *mr_name;
  233. if (get_physmapping(start_addr, size, page_mask)) {
  234. return 0;
  235. }
  236. if (size <= 0) {
  237. return -1;
  238. }
  239. /* Xen can only handle a single dirty log region for now and we want
  240. * the linear framebuffer to be that region.
  241. * Avoid tracking any regions that is not videoram and avoid tracking
  242. * the legacy vga region. */
  243. if (mr == framebuffer && start_addr > 0xbffff) {
  244. goto go_physmap;
  245. }
  246. return -1;
  247. go_physmap:
  248. DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
  249. start_addr, start_addr + size);
  250. mr_name = memory_region_name(mr);
  251. physmap = g_new(XenPhysmap, 1);
  252. physmap->start_addr = start_addr;
  253. physmap->size = size;
  254. physmap->name = mr_name;
  255. physmap->phys_offset = phys_offset;
  256. QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
  257. if (runstate_check(RUN_STATE_INMIGRATE)) {
  258. /* Now when we have a physmap entry we can replace a dummy mapping with
  259. * a real one of guest foreign memory. */
  260. uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size);
  261. assert(p && p == memory_region_get_ram_ptr(mr));
  262. return 0;
  263. }
  264. pfn = phys_offset >> target_page_bits;
  265. start_gpfn = start_addr >> target_page_bits;
  266. nr_pages = size >> target_page_bits;
  267. rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn,
  268. start_gpfn);
  269. if (rc) {
  270. int saved_errno = errno;
  271. error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx
  272. " to GFN %"HWADDR_PRIx" failed: %s",
  273. nr_pages, pfn, start_gpfn, strerror(saved_errno));
  274. errno = saved_errno;
  275. return -1;
  276. }
  277. rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid,
  278. start_addr >> target_page_bits,
  279. (start_addr + size - 1) >> target_page_bits,
  280. XEN_DOMCTL_MEM_CACHEATTR_WB);
  281. if (rc) {
  282. error_report("pin_memory_cacheattr failed: %s", strerror(errno));
  283. }
  284. return xen_save_physmap(state, physmap);
  285. }
  286. static int xen_remove_from_physmap(XenIOState *state,
  287. hwaddr start_addr,
  288. ram_addr_t size)
  289. {
  290. unsigned target_page_bits = qemu_target_page_bits();
  291. int page_size = qemu_target_page_size();
  292. int page_mask = -page_size;
  293. int rc = 0;
  294. XenPhysmap *physmap = NULL;
  295. hwaddr phys_offset = 0;
  296. physmap = get_physmapping(start_addr, size, page_mask);
  297. if (physmap == NULL) {
  298. return -1;
  299. }
  300. phys_offset = physmap->phys_offset;
  301. size = physmap->size;
  302. DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
  303. "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
  304. size >>= target_page_bits;
  305. start_addr >>= target_page_bits;
  306. phys_offset >>= target_page_bits;
  307. rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr,
  308. phys_offset);
  309. if (rc) {
  310. int saved_errno = errno;
  311. error_report("relocate_memory "RAM_ADDR_FMT" pages"
  312. " from GFN %"HWADDR_PRIx
  313. " to GFN %"HWADDR_PRIx" failed: %s",
  314. size, start_addr, phys_offset, strerror(saved_errno));
  315. errno = saved_errno;
  316. return -1;
  317. }
  318. QLIST_REMOVE(physmap, list);
  319. if (log_for_dirtybit == physmap) {
  320. log_for_dirtybit = NULL;
  321. g_free(dirty_bitmap);
  322. dirty_bitmap = NULL;
  323. }
  324. g_free(physmap);
  325. return 0;
  326. }
  327. static void xen_sync_dirty_bitmap(XenIOState *state,
  328. hwaddr start_addr,
  329. ram_addr_t size)
  330. {
  331. unsigned target_page_bits = qemu_target_page_bits();
  332. int page_size = qemu_target_page_size();
  333. int page_mask = -page_size;
  334. hwaddr npages = size >> target_page_bits;
  335. const int width = sizeof(unsigned long) * 8;
  336. size_t bitmap_size = DIV_ROUND_UP(npages, width);
  337. int rc, i, j;
  338. const XenPhysmap *physmap = NULL;
  339. physmap = get_physmapping(start_addr, size, page_mask);
  340. if (physmap == NULL) {
  341. /* not handled */
  342. return;
  343. }
  344. if (log_for_dirtybit == NULL) {
  345. log_for_dirtybit = physmap;
  346. dirty_bitmap = g_new(unsigned long, bitmap_size);
  347. } else if (log_for_dirtybit != physmap) {
  348. /* Only one range for dirty bitmap can be tracked. */
  349. return;
  350. }
  351. rc = xen_track_dirty_vram(xen_domid, start_addr >> target_page_bits,
  352. npages, dirty_bitmap);
  353. if (rc < 0) {
  354. #ifndef ENODATA
  355. #define ENODATA ENOENT
  356. #endif
  357. if (errno == ENODATA) {
  358. memory_region_set_dirty(framebuffer, 0, size);
  359. DPRINTF("xen: track_dirty_vram failed (0x" HWADDR_FMT_plx
  360. ", 0x" HWADDR_FMT_plx "): %s\n",
  361. start_addr, start_addr + size, strerror(errno));
  362. }
  363. return;
  364. }
  365. for (i = 0; i < bitmap_size; i++) {
  366. unsigned long map = dirty_bitmap[i];
  367. while (map != 0) {
  368. j = ctzl(map);
  369. map &= ~(1ul << j);
  370. memory_region_set_dirty(framebuffer,
  371. (i * width + j) * page_size, page_size);
  372. };
  373. }
  374. }
  375. static void xen_log_start(MemoryListener *listener,
  376. MemoryRegionSection *section,
  377. int old, int new)
  378. {
  379. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  380. if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
  381. xen_sync_dirty_bitmap(state, section->offset_within_address_space,
  382. int128_get64(section->size));
  383. }
  384. }
  385. static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
  386. int old, int new)
  387. {
  388. if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
  389. log_for_dirtybit = NULL;
  390. g_free(dirty_bitmap);
  391. dirty_bitmap = NULL;
  392. /* Disable dirty bit tracking */
  393. xen_track_dirty_vram(xen_domid, 0, 0, NULL);
  394. }
  395. }
  396. static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
  397. {
  398. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  399. xen_sync_dirty_bitmap(state, section->offset_within_address_space,
  400. int128_get64(section->size));
  401. }
  402. static bool xen_log_global_start(MemoryListener *listener, Error **errp)
  403. {
  404. if (xen_enabled()) {
  405. xen_in_migration = true;
  406. }
  407. return true;
  408. }
  409. static void xen_log_global_stop(MemoryListener *listener)
  410. {
  411. xen_in_migration = false;
  412. }
  413. static const MemoryListener xen_memory_listener = {
  414. .name = "xen-memory",
  415. .region_add = xen_region_add,
  416. .region_del = xen_region_del,
  417. .log_start = xen_log_start,
  418. .log_stop = xen_log_stop,
  419. .log_sync = xen_log_sync,
  420. .log_global_start = xen_log_global_start,
  421. .log_global_stop = xen_log_global_stop,
  422. .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
  423. };
  424. static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
  425. {
  426. X86CPU *cpu;
  427. CPUX86State *env;
  428. cpu = X86_CPU(current_cpu);
  429. env = &cpu->env;
  430. env->regs[R_EAX] = req->data;
  431. env->regs[R_EBX] = vmport_regs->ebx;
  432. env->regs[R_ECX] = vmport_regs->ecx;
  433. env->regs[R_EDX] = vmport_regs->edx;
  434. env->regs[R_ESI] = vmport_regs->esi;
  435. env->regs[R_EDI] = vmport_regs->edi;
  436. }
  437. static void regs_from_cpu(vmware_regs_t *vmport_regs)
  438. {
  439. X86CPU *cpu = X86_CPU(current_cpu);
  440. CPUX86State *env = &cpu->env;
  441. vmport_regs->ebx = env->regs[R_EBX];
  442. vmport_regs->ecx = env->regs[R_ECX];
  443. vmport_regs->edx = env->regs[R_EDX];
  444. vmport_regs->esi = env->regs[R_ESI];
  445. vmport_regs->edi = env->regs[R_EDI];
  446. }
  447. static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
  448. {
  449. vmware_regs_t *vmport_regs;
  450. assert(shared_vmport_page);
  451. vmport_regs =
  452. &shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
  453. QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
  454. current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
  455. regs_to_cpu(vmport_regs, req);
  456. cpu_ioreq_pio(req);
  457. regs_from_cpu(vmport_regs);
  458. current_cpu = NULL;
  459. }
  460. #ifdef XEN_COMPAT_PHYSMAP
  461. static void xen_read_physmap(XenIOState *state)
  462. {
  463. XenPhysmap *physmap = NULL;
  464. unsigned int len, num, i;
  465. char path[80], *value = NULL;
  466. char **entries = NULL;
  467. snprintf(path, sizeof(path),
  468. "/local/domain/0/device-model/%d/physmap", xen_domid);
  469. entries = xs_directory(state->xenstore, 0, path, &num);
  470. if (entries == NULL)
  471. return;
  472. for (i = 0; i < num; i++) {
  473. physmap = g_new(XenPhysmap, 1);
  474. physmap->phys_offset = strtoull(entries[i], NULL, 16);
  475. snprintf(path, sizeof(path),
  476. "/local/domain/0/device-model/%d/physmap/%s/start_addr",
  477. xen_domid, entries[i]);
  478. value = xs_read(state->xenstore, 0, path, &len);
  479. if (value == NULL) {
  480. g_free(physmap);
  481. continue;
  482. }
  483. physmap->start_addr = strtoull(value, NULL, 16);
  484. free(value);
  485. snprintf(path, sizeof(path),
  486. "/local/domain/0/device-model/%d/physmap/%s/size",
  487. xen_domid, entries[i]);
  488. value = xs_read(state->xenstore, 0, path, &len);
  489. if (value == NULL) {
  490. g_free(physmap);
  491. continue;
  492. }
  493. physmap->size = strtoull(value, NULL, 16);
  494. free(value);
  495. snprintf(path, sizeof(path),
  496. "/local/domain/0/device-model/%d/physmap/%s/name",
  497. xen_domid, entries[i]);
  498. physmap->name = xs_read(state->xenstore, 0, path, &len);
  499. QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
  500. }
  501. free(entries);
  502. }
  503. #else
  504. static void xen_read_physmap(XenIOState *state)
  505. {
  506. }
  507. #endif
  508. static void xen_wakeup_notifier(Notifier *notifier, void *data)
  509. {
  510. xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
  511. }
  512. static bool xen_check_stubdomain(struct xs_handle *xsh)
  513. {
  514. char *dm_path = g_strdup_printf(
  515. "/local/domain/%d/image/device-model-domid", xen_domid);
  516. char *val;
  517. int32_t dm_domid;
  518. bool is_stubdom = false;
  519. val = xs_read(xsh, 0, dm_path, NULL);
  520. if (val) {
  521. if (sscanf(val, "%d", &dm_domid) == 1) {
  522. is_stubdom = dm_domid != 0;
  523. }
  524. free(val);
  525. }
  526. g_free(dm_path);
  527. return is_stubdom;
  528. }
  529. void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
  530. {
  531. MachineState *ms = MACHINE(pcms);
  532. unsigned int max_cpus = ms->smp.max_cpus;
  533. int rc;
  534. xen_pfn_t ioreq_pfn;
  535. XenIOState *state;
  536. state = g_new0(XenIOState, 1);
  537. xen_register_ioreq(state, max_cpus,
  538. HVM_IOREQSRV_BUFIOREQ_ATOMIC,
  539. &xen_memory_listener);
  540. xen_is_stubdomain = xen_check_stubdomain(state->xenstore);
  541. QLIST_INIT(&xen_physmap);
  542. xen_read_physmap(state);
  543. suspend.notify = xen_suspend_notifier;
  544. qemu_register_suspend_notifier(&suspend);
  545. wakeup.notify = xen_wakeup_notifier;
  546. qemu_register_wakeup_notifier(&wakeup);
  547. rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
  548. if (!rc) {
  549. DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
  550. shared_vmport_page =
  551. xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
  552. 1, &ioreq_pfn, NULL);
  553. if (shared_vmport_page == NULL) {
  554. error_report("map shared vmport IO page returned error %d handle=%p",
  555. errno, xen_xc);
  556. goto err;
  557. }
  558. } else if (rc != -ENOSYS) {
  559. error_report("get vmport regs pfn returned error %d, rc=%d",
  560. errno, rc);
  561. goto err;
  562. }
  563. xen_ram_init(pcms, ms->ram_size, ram_memory);
  564. /* Disable ACPI build because Xen handles it */
  565. pcms->acpi_build_enabled = false;
  566. return;
  567. err:
  568. error_report("xen hardware virtual machine initialisation failed");
  569. exit(1);
  570. }
  571. void xen_register_framebuffer(MemoryRegion *mr)
  572. {
  573. framebuffer = mr;
  574. }
  575. void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
  576. {
  577. unsigned target_page_bits = qemu_target_page_bits();
  578. int page_size = qemu_target_page_size();
  579. int page_mask = -page_size;
  580. if (unlikely(xen_in_migration)) {
  581. int rc;
  582. ram_addr_t start_pfn, nb_pages;
  583. start = xen_phys_offset_to_gaddr(start, length, page_mask);
  584. if (length == 0) {
  585. length = page_size;
  586. }
  587. start_pfn = start >> target_page_bits;
  588. nb_pages = ((start + length + page_size - 1) >> target_page_bits)
  589. - start_pfn;
  590. rc = xen_modified_memory(xen_domid, start_pfn, nb_pages);
  591. if (rc) {
  592. fprintf(stderr,
  593. "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
  594. __func__, start, nb_pages, errno, strerror(errno));
  595. }
  596. }
  597. }
  598. void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
  599. {
  600. if (enable) {
  601. memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION, errp);
  602. } else {
  603. memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
  604. }
  605. }
  606. void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section,
  607. bool add)
  608. {
  609. unsigned target_page_bits = qemu_target_page_bits();
  610. int page_size = qemu_target_page_size();
  611. int page_mask = -page_size;
  612. hwaddr start_addr = section->offset_within_address_space;
  613. ram_addr_t size = int128_get64(section->size);
  614. bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
  615. hvmmem_type_t mem_type;
  616. if (!memory_region_is_ram(section->mr)) {
  617. return;
  618. }
  619. if (log_dirty != add) {
  620. return;
  621. }
  622. trace_xen_client_set_memory(start_addr, size, log_dirty);
  623. start_addr &= page_mask;
  624. size = ROUND_UP(size, page_size);
  625. if (add) {
  626. if (!memory_region_is_rom(section->mr)) {
  627. xen_add_to_physmap(state, start_addr, size,
  628. section->mr, section->offset_within_region);
  629. } else {
  630. mem_type = HVMMEM_ram_ro;
  631. if (xen_set_mem_type(xen_domid, mem_type,
  632. start_addr >> target_page_bits,
  633. size >> target_page_bits)) {
  634. DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n",
  635. start_addr);
  636. }
  637. }
  638. } else {
  639. if (xen_remove_from_physmap(state, start_addr, size) < 0) {
  640. DPRINTF("physmapping does not exist at "HWADDR_FMT_plx"\n", start_addr);
  641. }
  642. }
  643. }
  644. void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
  645. {
  646. switch (req->type) {
  647. case IOREQ_TYPE_VMWARE_PORT:
  648. handle_vmport_ioreq(state, req);
  649. break;
  650. default:
  651. hw_error("Invalid ioreq type 0x%x\n", req->type);
  652. }
  653. return;
  654. }