xen-hvm.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. * Copyright (C) 2010 Citrix Ltd.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. *
  7. * Contributions after 2012-01-13 are licensed under the terms of the
  8. * GNU GPL, version 2 or (at your option) any later version.
  9. */
  10. #include <sys/mman.h>
  11. #include "hw/pci/pci.h"
  12. #include "hw/i386/pc.h"
  13. #include "hw/xen/xen_common.h"
  14. #include "hw/xen/xen_backend.h"
  15. #include "qmp-commands.h"
  16. #include "sysemu/char.h"
  17. #include "qemu/range.h"
  18. #include "sysemu/xen-mapcache.h"
  19. #include "trace.h"
  20. #include "exec/address-spaces.h"
  21. #include <xen/hvm/ioreq.h>
  22. #include <xen/hvm/params.h>
  23. #include <xen/hvm/e820.h>
  24. //#define DEBUG_XEN_HVM
  25. #ifdef DEBUG_XEN_HVM
  26. #define DPRINTF(fmt, ...) \
  27. do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
  28. #else
  29. #define DPRINTF(fmt, ...) \
  30. do { } while (0)
  31. #endif
  32. static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
  33. static MemoryRegion *framebuffer;
  34. static bool xen_in_migration;
  35. /* Compatibility with older version */
  36. /* This allows QEMU to build on a system that has Xen 4.5 or earlier
  37. * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
  38. * needs to be included before this block and hw/xen/xen_common.h needs to
  39. * be included before xen/hvm/ioreq.h
  40. */
  41. #ifndef IOREQ_TYPE_VMWARE_PORT
  42. #define IOREQ_TYPE_VMWARE_PORT 3
  43. struct vmware_regs {
  44. uint32_t esi;
  45. uint32_t edi;
  46. uint32_t ebx;
  47. uint32_t ecx;
  48. uint32_t edx;
  49. };
  50. typedef struct vmware_regs vmware_regs_t;
  51. struct shared_vmport_iopage {
  52. struct vmware_regs vcpu_vmport_regs[1];
  53. };
  54. typedef struct shared_vmport_iopage shared_vmport_iopage_t;
  55. #endif
  56. #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
  57. static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
  58. {
  59. return shared_page->vcpu_iodata[i].vp_eport;
  60. }
  61. static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
  62. {
  63. return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
  64. }
  65. # define FMT_ioreq_size PRIx64
  66. #else
  67. static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
  68. {
  69. return shared_page->vcpu_ioreq[i].vp_eport;
  70. }
  71. static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
  72. {
  73. return &shared_page->vcpu_ioreq[vcpu];
  74. }
  75. # define FMT_ioreq_size "u"
  76. #endif
  77. #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
  78. #define HVM_PARAM_BUFIOREQ_EVTCHN 26
  79. #endif
  80. #define BUFFER_IO_MAX_DELAY 100
  81. /* Leave some slack so that hvmloader does not complain about lack of
  82. * memory at boot time ("Could not allocate order=0 extent").
  83. * Once hvmloader is modified to cope with that situation without
  84. * printing warning messages, QEMU_SPARE_PAGES can be removed.
  85. */
  86. #define QEMU_SPARE_PAGES 16
  87. typedef struct XenPhysmap {
  88. hwaddr start_addr;
  89. ram_addr_t size;
  90. const char *name;
  91. hwaddr phys_offset;
  92. QLIST_ENTRY(XenPhysmap) list;
  93. } XenPhysmap;
  94. typedef struct XenIOState {
  95. shared_iopage_t *shared_page;
  96. shared_vmport_iopage_t *shared_vmport_page;
  97. buffered_iopage_t *buffered_io_page;
  98. QEMUTimer *buffered_io_timer;
  99. CPUState **cpu_by_vcpu_id;
  100. /* the evtchn port for polling the notification, */
  101. evtchn_port_t *ioreq_local_port;
  102. /* evtchn local port for buffered io */
  103. evtchn_port_t bufioreq_local_port;
  104. /* the evtchn fd for polling */
  105. XenEvtchn xce_handle;
  106. /* which vcpu we are serving */
  107. int send_vcpu;
  108. struct xs_handle *xenstore;
  109. MemoryListener memory_listener;
  110. QLIST_HEAD(, XenPhysmap) physmap;
  111. hwaddr free_phys_offset;
  112. const XenPhysmap *log_for_dirtybit;
  113. Notifier exit;
  114. Notifier suspend;
  115. Notifier wakeup;
  116. } XenIOState;
  117. /* Xen specific function for piix pci */
  118. int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
  119. {
  120. return irq_num + ((pci_dev->devfn >> 3) << 2);
  121. }
  122. void xen_piix3_set_irq(void *opaque, int irq_num, int level)
  123. {
  124. xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
  125. irq_num & 3, level);
  126. }
  127. void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
  128. {
  129. int i;
  130. /* Scan for updates to PCI link routes (0x60-0x63). */
  131. for (i = 0; i < len; i++) {
  132. uint8_t v = (val >> (8 * i)) & 0xff;
  133. if (v & 0x80) {
  134. v = 0;
  135. }
  136. v &= 0xf;
  137. if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
  138. xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
  139. }
  140. }
  141. }
  142. void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
  143. {
  144. xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);
  145. }
  146. static void xen_suspend_notifier(Notifier *notifier, void *data)
  147. {
  148. xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
  149. }
  150. /* Xen Interrupt Controller */
  151. static void xen_set_irq(void *opaque, int irq, int level)
  152. {
  153. xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
  154. }
  155. qemu_irq *xen_interrupt_controller_init(void)
  156. {
  157. return qemu_allocate_irqs(xen_set_irq, NULL, 16);
  158. }
  159. /* Memory Ops */
  160. static void xen_ram_init(ram_addr_t *below_4g_mem_size,
  161. ram_addr_t *above_4g_mem_size,
  162. ram_addr_t ram_size, MemoryRegion **ram_memory_p)
  163. {
  164. MemoryRegion *sysmem = get_system_memory();
  165. ram_addr_t block_len;
  166. uint64_t user_lowmem = object_property_get_int(qdev_get_machine(),
  167. PC_MACHINE_MAX_RAM_BELOW_4G,
  168. &error_abort);
  169. /* Handle the machine opt max-ram-below-4g. It is basically doing
  170. * min(xen limit, user limit).
  171. */
  172. if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
  173. user_lowmem = HVM_BELOW_4G_RAM_END;
  174. }
  175. if (ram_size >= user_lowmem) {
  176. *above_4g_mem_size = ram_size - user_lowmem;
  177. *below_4g_mem_size = user_lowmem;
  178. } else {
  179. *above_4g_mem_size = 0;
  180. *below_4g_mem_size = ram_size;
  181. }
  182. if (!*above_4g_mem_size) {
  183. block_len = ram_size;
  184. } else {
  185. /*
  186. * Xen does not allocate the memory continuously, it keeps a
  187. * hole of the size computed above or passed in.
  188. */
  189. block_len = (1ULL << 32) + *above_4g_mem_size;
  190. }
  191. memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
  192. &error_abort);
  193. *ram_memory_p = &ram_memory;
  194. vmstate_register_ram_global(&ram_memory);
  195. memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
  196. &ram_memory, 0, 0xa0000);
  197. memory_region_add_subregion(sysmem, 0, &ram_640k);
  198. /* Skip of the VGA IO memory space, it will be registered later by the VGA
  199. * emulated device.
  200. *
  201. * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
  202. * the Options ROM, so it is registered here as RAM.
  203. */
  204. memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
  205. &ram_memory, 0xc0000,
  206. *below_4g_mem_size - 0xc0000);
  207. memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
  208. if (*above_4g_mem_size > 0) {
  209. memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
  210. &ram_memory, 0x100000000ULL,
  211. *above_4g_mem_size);
  212. memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
  213. }
  214. }
  215. void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
  216. {
  217. unsigned long nr_pfn;
  218. xen_pfn_t *pfn_list;
  219. int i;
  220. xc_domaininfo_t info;
  221. unsigned long free_pages;
  222. if (runstate_check(RUN_STATE_INMIGRATE)) {
  223. /* RAM already populated in Xen */
  224. fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
  225. " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
  226. __func__, size, ram_addr);
  227. return;
  228. }
  229. if (mr == &ram_memory) {
  230. return;
  231. }
  232. trace_xen_ram_alloc(ram_addr, size);
  233. nr_pfn = size >> TARGET_PAGE_BITS;
  234. pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
  235. for (i = 0; i < nr_pfn; i++) {
  236. pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
  237. }
  238. if ((xc_domain_getinfolist(xen_xc, xen_domid, 1, &info) != 1) ||
  239. (info.domain != xen_domid)) {
  240. hw_error("xc_domain_getinfolist failed");
  241. }
  242. free_pages = info.max_pages - info.tot_pages;
  243. if (free_pages > QEMU_SPARE_PAGES) {
  244. free_pages -= QEMU_SPARE_PAGES;
  245. } else {
  246. free_pages = 0;
  247. }
  248. if ((free_pages < nr_pfn) &&
  249. (xc_domain_setmaxmem(xen_xc, xen_domid,
  250. ((info.max_pages + nr_pfn - free_pages)
  251. << (XC_PAGE_SHIFT - 10))) < 0)) {
  252. hw_error("xc_domain_setmaxmem failed");
  253. }
  254. if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
  255. hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
  256. }
  257. g_free(pfn_list);
  258. }
  259. static XenPhysmap *get_physmapping(XenIOState *state,
  260. hwaddr start_addr, ram_addr_t size)
  261. {
  262. XenPhysmap *physmap = NULL;
  263. start_addr &= TARGET_PAGE_MASK;
  264. QLIST_FOREACH(physmap, &state->physmap, list) {
  265. if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
  266. return physmap;
  267. }
  268. }
  269. return NULL;
  270. }
  271. static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
  272. ram_addr_t size, void *opaque)
  273. {
  274. hwaddr addr = start_addr & TARGET_PAGE_MASK;
  275. XenIOState *xen_io_state = opaque;
  276. XenPhysmap *physmap = NULL;
  277. QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
  278. if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
  279. return physmap->start_addr;
  280. }
  281. }
  282. return start_addr;
  283. }
  284. #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
  285. static int xen_add_to_physmap(XenIOState *state,
  286. hwaddr start_addr,
  287. ram_addr_t size,
  288. MemoryRegion *mr,
  289. hwaddr offset_within_region)
  290. {
  291. unsigned long i = 0;
  292. int rc = 0;
  293. XenPhysmap *physmap = NULL;
  294. hwaddr pfn, start_gpfn;
  295. hwaddr phys_offset = memory_region_get_ram_addr(mr);
  296. char path[80], value[17];
  297. const char *mr_name;
  298. if (get_physmapping(state, start_addr, size)) {
  299. return 0;
  300. }
  301. if (size <= 0) {
  302. return -1;
  303. }
  304. /* Xen can only handle a single dirty log region for now and we want
  305. * the linear framebuffer to be that region.
  306. * Avoid tracking any regions that is not videoram and avoid tracking
  307. * the legacy vga region. */
  308. if (mr == framebuffer && start_addr > 0xbffff) {
  309. goto go_physmap;
  310. }
  311. return -1;
  312. go_physmap:
  313. DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
  314. start_addr, start_addr + size);
  315. pfn = phys_offset >> TARGET_PAGE_BITS;
  316. start_gpfn = start_addr >> TARGET_PAGE_BITS;
  317. for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
  318. unsigned long idx = pfn + i;
  319. xen_pfn_t gpfn = start_gpfn + i;
  320. rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
  321. if (rc) {
  322. DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
  323. PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
  324. return -rc;
  325. }
  326. }
  327. mr_name = memory_region_name(mr);
  328. physmap = g_malloc(sizeof (XenPhysmap));
  329. physmap->start_addr = start_addr;
  330. physmap->size = size;
  331. physmap->name = mr_name;
  332. physmap->phys_offset = phys_offset;
  333. QLIST_INSERT_HEAD(&state->physmap, physmap, list);
  334. xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
  335. start_addr >> TARGET_PAGE_BITS,
  336. (start_addr + size - 1) >> TARGET_PAGE_BITS,
  337. XEN_DOMCTL_MEM_CACHEATTR_WB);
  338. snprintf(path, sizeof(path),
  339. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
  340. xen_domid, (uint64_t)phys_offset);
  341. snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
  342. if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
  343. return -1;
  344. }
  345. snprintf(path, sizeof(path),
  346. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
  347. xen_domid, (uint64_t)phys_offset);
  348. snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
  349. if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
  350. return -1;
  351. }
  352. if (mr_name) {
  353. snprintf(path, sizeof(path),
  354. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
  355. xen_domid, (uint64_t)phys_offset);
  356. if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) {
  357. return -1;
  358. }
  359. }
  360. return 0;
  361. }
  362. static int xen_remove_from_physmap(XenIOState *state,
  363. hwaddr start_addr,
  364. ram_addr_t size)
  365. {
  366. unsigned long i = 0;
  367. int rc = 0;
  368. XenPhysmap *physmap = NULL;
  369. hwaddr phys_offset = 0;
  370. physmap = get_physmapping(state, start_addr, size);
  371. if (physmap == NULL) {
  372. return -1;
  373. }
  374. phys_offset = physmap->phys_offset;
  375. size = physmap->size;
  376. DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
  377. "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
  378. size >>= TARGET_PAGE_BITS;
  379. start_addr >>= TARGET_PAGE_BITS;
  380. phys_offset >>= TARGET_PAGE_BITS;
  381. for (i = 0; i < size; i++) {
  382. xen_pfn_t idx = start_addr + i;
  383. xen_pfn_t gpfn = phys_offset + i;
  384. rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
  385. if (rc) {
  386. fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
  387. PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
  388. return -rc;
  389. }
  390. }
  391. QLIST_REMOVE(physmap, list);
  392. if (state->log_for_dirtybit == physmap) {
  393. state->log_for_dirtybit = NULL;
  394. }
  395. g_free(physmap);
  396. return 0;
  397. }
  398. #else
  399. static int xen_add_to_physmap(XenIOState *state,
  400. hwaddr start_addr,
  401. ram_addr_t size,
  402. MemoryRegion *mr,
  403. hwaddr offset_within_region)
  404. {
  405. return -ENOSYS;
  406. }
  407. static int xen_remove_from_physmap(XenIOState *state,
  408. hwaddr start_addr,
  409. ram_addr_t size)
  410. {
  411. return -ENOSYS;
  412. }
  413. #endif
  414. static void xen_set_memory(struct MemoryListener *listener,
  415. MemoryRegionSection *section,
  416. bool add)
  417. {
  418. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  419. hwaddr start_addr = section->offset_within_address_space;
  420. ram_addr_t size = int128_get64(section->size);
  421. bool log_dirty = memory_region_is_logging(section->mr);
  422. hvmmem_type_t mem_type;
  423. if (!memory_region_is_ram(section->mr)) {
  424. return;
  425. }
  426. if (!(section->mr != &ram_memory
  427. && ( (log_dirty && add) || (!log_dirty && !add)))) {
  428. return;
  429. }
  430. trace_xen_client_set_memory(start_addr, size, log_dirty);
  431. start_addr &= TARGET_PAGE_MASK;
  432. size = TARGET_PAGE_ALIGN(size);
  433. if (add) {
  434. if (!memory_region_is_rom(section->mr)) {
  435. xen_add_to_physmap(state, start_addr, size,
  436. section->mr, section->offset_within_region);
  437. } else {
  438. mem_type = HVMMEM_ram_ro;
  439. if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
  440. start_addr >> TARGET_PAGE_BITS,
  441. size >> TARGET_PAGE_BITS)) {
  442. DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
  443. start_addr);
  444. }
  445. }
  446. } else {
  447. if (xen_remove_from_physmap(state, start_addr, size) < 0) {
  448. DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
  449. }
  450. }
  451. }
  452. static void xen_region_add(MemoryListener *listener,
  453. MemoryRegionSection *section)
  454. {
  455. memory_region_ref(section->mr);
  456. xen_set_memory(listener, section, true);
  457. }
  458. static void xen_region_del(MemoryListener *listener,
  459. MemoryRegionSection *section)
  460. {
  461. xen_set_memory(listener, section, false);
  462. memory_region_unref(section->mr);
  463. }
  464. static void xen_sync_dirty_bitmap(XenIOState *state,
  465. hwaddr start_addr,
  466. ram_addr_t size)
  467. {
  468. hwaddr npages = size >> TARGET_PAGE_BITS;
  469. const int width = sizeof(unsigned long) * 8;
  470. unsigned long bitmap[(npages + width - 1) / width];
  471. int rc, i, j;
  472. const XenPhysmap *physmap = NULL;
  473. physmap = get_physmapping(state, start_addr, size);
  474. if (physmap == NULL) {
  475. /* not handled */
  476. return;
  477. }
  478. if (state->log_for_dirtybit == NULL) {
  479. state->log_for_dirtybit = physmap;
  480. } else if (state->log_for_dirtybit != physmap) {
  481. /* Only one range for dirty bitmap can be tracked. */
  482. return;
  483. }
  484. rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
  485. start_addr >> TARGET_PAGE_BITS, npages,
  486. bitmap);
  487. if (rc < 0) {
  488. #ifndef ENODATA
  489. #define ENODATA ENOENT
  490. #endif
  491. if (errno == ENODATA) {
  492. memory_region_set_dirty(framebuffer, 0, size);
  493. DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
  494. ", 0x" TARGET_FMT_plx "): %s\n",
  495. start_addr, start_addr + size, strerror(errno));
  496. }
  497. return;
  498. }
  499. for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
  500. unsigned long map = bitmap[i];
  501. while (map != 0) {
  502. j = ctzl(map);
  503. map &= ~(1ul << j);
  504. memory_region_set_dirty(framebuffer,
  505. (i * width + j) * TARGET_PAGE_SIZE,
  506. TARGET_PAGE_SIZE);
  507. };
  508. }
  509. }
  510. static void xen_log_start(MemoryListener *listener,
  511. MemoryRegionSection *section)
  512. {
  513. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  514. xen_sync_dirty_bitmap(state, section->offset_within_address_space,
  515. int128_get64(section->size));
  516. }
  517. static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
  518. {
  519. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  520. state->log_for_dirtybit = NULL;
  521. /* Disable dirty bit tracking */
  522. xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
  523. }
  524. static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
  525. {
  526. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  527. xen_sync_dirty_bitmap(state, section->offset_within_address_space,
  528. int128_get64(section->size));
  529. }
  530. static void xen_log_global_start(MemoryListener *listener)
  531. {
  532. if (xen_enabled()) {
  533. xen_in_migration = true;
  534. }
  535. }
  536. static void xen_log_global_stop(MemoryListener *listener)
  537. {
  538. xen_in_migration = false;
  539. }
  540. static MemoryListener xen_memory_listener = {
  541. .region_add = xen_region_add,
  542. .region_del = xen_region_del,
  543. .log_start = xen_log_start,
  544. .log_stop = xen_log_stop,
  545. .log_sync = xen_log_sync,
  546. .log_global_start = xen_log_global_start,
  547. .log_global_stop = xen_log_global_stop,
  548. .priority = 10,
  549. };
  550. /* get the ioreq packets from share mem */
  551. static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
  552. {
  553. ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
  554. if (req->state != STATE_IOREQ_READY) {
  555. DPRINTF("I/O request not ready: "
  556. "%x, ptr: %x, port: %"PRIx64", "
  557. "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
  558. req->state, req->data_is_ptr, req->addr,
  559. req->data, req->count, req->size);
  560. return NULL;
  561. }
  562. xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
  563. req->state = STATE_IOREQ_INPROCESS;
  564. return req;
  565. }
  566. /* use poll to get the port notification */
  567. /* ioreq_vec--out,the */
  568. /* retval--the number of ioreq packet */
  569. static ioreq_t *cpu_get_ioreq(XenIOState *state)
  570. {
  571. int i;
  572. evtchn_port_t port;
  573. port = xc_evtchn_pending(state->xce_handle);
  574. if (port == state->bufioreq_local_port) {
  575. timer_mod(state->buffered_io_timer,
  576. BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
  577. return NULL;
  578. }
  579. if (port != -1) {
  580. for (i = 0; i < max_cpus; i++) {
  581. if (state->ioreq_local_port[i] == port) {
  582. break;
  583. }
  584. }
  585. if (i == max_cpus) {
  586. hw_error("Fatal error while trying to get io event!\n");
  587. }
  588. /* unmask the wanted port again */
  589. xc_evtchn_unmask(state->xce_handle, port);
  590. /* get the io packet from shared memory */
  591. state->send_vcpu = i;
  592. return cpu_get_ioreq_from_shared_memory(state, i);
  593. }
  594. /* read error or read nothing */
  595. return NULL;
  596. }
  597. static uint32_t do_inp(pio_addr_t addr, unsigned long size)
  598. {
  599. switch (size) {
  600. case 1:
  601. return cpu_inb(addr);
  602. case 2:
  603. return cpu_inw(addr);
  604. case 4:
  605. return cpu_inl(addr);
  606. default:
  607. hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
  608. }
  609. }
  610. static void do_outp(pio_addr_t addr,
  611. unsigned long size, uint32_t val)
  612. {
  613. switch (size) {
  614. case 1:
  615. return cpu_outb(addr, val);
  616. case 2:
  617. return cpu_outw(addr, val);
  618. case 4:
  619. return cpu_outl(addr, val);
  620. default:
  621. hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
  622. }
  623. }
  624. /*
  625. * Helper functions which read/write an object from/to physical guest
  626. * memory, as part of the implementation of an ioreq.
  627. *
  628. * Equivalent to
  629. * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
  630. * val, req->size, 0/1)
  631. * except without the integer overflow problems.
  632. */
  633. static void rw_phys_req_item(hwaddr addr,
  634. ioreq_t *req, uint32_t i, void *val, int rw)
  635. {
  636. /* Do everything unsigned so overflow just results in a truncated result
  637. * and accesses to undesired parts of guest memory, which is up
  638. * to the guest */
  639. hwaddr offset = (hwaddr)req->size * i;
  640. if (req->df) {
  641. addr -= offset;
  642. } else {
  643. addr += offset;
  644. }
  645. cpu_physical_memory_rw(addr, val, req->size, rw);
  646. }
  647. static inline void read_phys_req_item(hwaddr addr,
  648. ioreq_t *req, uint32_t i, void *val)
  649. {
  650. rw_phys_req_item(addr, req, i, val, 0);
  651. }
  652. static inline void write_phys_req_item(hwaddr addr,
  653. ioreq_t *req, uint32_t i, void *val)
  654. {
  655. rw_phys_req_item(addr, req, i, val, 1);
  656. }
  657. static void cpu_ioreq_pio(ioreq_t *req)
  658. {
  659. uint32_t i;
  660. if (req->dir == IOREQ_READ) {
  661. if (!req->data_is_ptr) {
  662. req->data = do_inp(req->addr, req->size);
  663. } else {
  664. uint32_t tmp;
  665. for (i = 0; i < req->count; i++) {
  666. tmp = do_inp(req->addr, req->size);
  667. write_phys_req_item(req->data, req, i, &tmp);
  668. }
  669. }
  670. } else if (req->dir == IOREQ_WRITE) {
  671. if (!req->data_is_ptr) {
  672. do_outp(req->addr, req->size, req->data);
  673. } else {
  674. for (i = 0; i < req->count; i++) {
  675. uint32_t tmp = 0;
  676. read_phys_req_item(req->data, req, i, &tmp);
  677. do_outp(req->addr, req->size, tmp);
  678. }
  679. }
  680. }
  681. }
  682. static void cpu_ioreq_move(ioreq_t *req)
  683. {
  684. uint32_t i;
  685. if (!req->data_is_ptr) {
  686. if (req->dir == IOREQ_READ) {
  687. for (i = 0; i < req->count; i++) {
  688. read_phys_req_item(req->addr, req, i, &req->data);
  689. }
  690. } else if (req->dir == IOREQ_WRITE) {
  691. for (i = 0; i < req->count; i++) {
  692. write_phys_req_item(req->addr, req, i, &req->data);
  693. }
  694. }
  695. } else {
  696. uint64_t tmp;
  697. if (req->dir == IOREQ_READ) {
  698. for (i = 0; i < req->count; i++) {
  699. read_phys_req_item(req->addr, req, i, &tmp);
  700. write_phys_req_item(req->data, req, i, &tmp);
  701. }
  702. } else if (req->dir == IOREQ_WRITE) {
  703. for (i = 0; i < req->count; i++) {
  704. read_phys_req_item(req->data, req, i, &tmp);
  705. write_phys_req_item(req->addr, req, i, &tmp);
  706. }
  707. }
  708. }
  709. }
  710. static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
  711. {
  712. X86CPU *cpu;
  713. CPUX86State *env;
  714. cpu = X86_CPU(current_cpu);
  715. env = &cpu->env;
  716. env->regs[R_EAX] = req->data;
  717. env->regs[R_EBX] = vmport_regs->ebx;
  718. env->regs[R_ECX] = vmport_regs->ecx;
  719. env->regs[R_EDX] = vmport_regs->edx;
  720. env->regs[R_ESI] = vmport_regs->esi;
  721. env->regs[R_EDI] = vmport_regs->edi;
  722. }
  723. static void regs_from_cpu(vmware_regs_t *vmport_regs)
  724. {
  725. X86CPU *cpu = X86_CPU(current_cpu);
  726. CPUX86State *env = &cpu->env;
  727. vmport_regs->ebx = env->regs[R_EBX];
  728. vmport_regs->ecx = env->regs[R_ECX];
  729. vmport_regs->edx = env->regs[R_EDX];
  730. vmport_regs->esi = env->regs[R_ESI];
  731. vmport_regs->edi = env->regs[R_EDI];
  732. }
  733. static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
  734. {
  735. vmware_regs_t *vmport_regs;
  736. assert(state->shared_vmport_page);
  737. vmport_regs =
  738. &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
  739. QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
  740. current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
  741. regs_to_cpu(vmport_regs, req);
  742. cpu_ioreq_pio(req);
  743. regs_from_cpu(vmport_regs);
  744. current_cpu = NULL;
  745. }
  746. static void handle_ioreq(XenIOState *state, ioreq_t *req)
  747. {
  748. if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
  749. (req->size < sizeof (target_ulong))) {
  750. req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
  751. }
  752. switch (req->type) {
  753. case IOREQ_TYPE_PIO:
  754. cpu_ioreq_pio(req);
  755. break;
  756. case IOREQ_TYPE_COPY:
  757. cpu_ioreq_move(req);
  758. break;
  759. case IOREQ_TYPE_VMWARE_PORT:
  760. handle_vmport_ioreq(state, req);
  761. break;
  762. case IOREQ_TYPE_TIMEOFFSET:
  763. break;
  764. case IOREQ_TYPE_INVALIDATE:
  765. xen_invalidate_map_cache();
  766. break;
  767. default:
  768. hw_error("Invalid ioreq type 0x%x\n", req->type);
  769. }
  770. }
  771. static int handle_buffered_iopage(XenIOState *state)
  772. {
  773. buf_ioreq_t *buf_req = NULL;
  774. ioreq_t req;
  775. int qw;
  776. if (!state->buffered_io_page) {
  777. return 0;
  778. }
  779. memset(&req, 0x00, sizeof(req));
  780. while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
  781. buf_req = &state->buffered_io_page->buf_ioreq[
  782. state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
  783. req.size = 1UL << buf_req->size;
  784. req.count = 1;
  785. req.addr = buf_req->addr;
  786. req.data = buf_req->data;
  787. req.state = STATE_IOREQ_READY;
  788. req.dir = buf_req->dir;
  789. req.df = 1;
  790. req.type = buf_req->type;
  791. req.data_is_ptr = 0;
  792. qw = (req.size == 8);
  793. if (qw) {
  794. buf_req = &state->buffered_io_page->buf_ioreq[
  795. (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
  796. req.data |= ((uint64_t)buf_req->data) << 32;
  797. }
  798. handle_ioreq(state, &req);
  799. xen_mb();
  800. state->buffered_io_page->read_pointer += qw ? 2 : 1;
  801. }
  802. return req.count;
  803. }
  804. static void handle_buffered_io(void *opaque)
  805. {
  806. XenIOState *state = opaque;
  807. if (handle_buffered_iopage(state)) {
  808. timer_mod(state->buffered_io_timer,
  809. BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
  810. } else {
  811. timer_del(state->buffered_io_timer);
  812. xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
  813. }
  814. }
  815. static void cpu_handle_ioreq(void *opaque)
  816. {
  817. XenIOState *state = opaque;
  818. ioreq_t *req = cpu_get_ioreq(state);
  819. handle_buffered_iopage(state);
  820. if (req) {
  821. handle_ioreq(state, req);
  822. if (req->state != STATE_IOREQ_INPROCESS) {
  823. fprintf(stderr, "Badness in I/O request ... not in service?!: "
  824. "%x, ptr: %x, port: %"PRIx64", "
  825. "data: %"PRIx64", count: %" FMT_ioreq_size
  826. ", size: %" FMT_ioreq_size
  827. ", type: %"FMT_ioreq_size"\n",
  828. req->state, req->data_is_ptr, req->addr,
  829. req->data, req->count, req->size, req->type);
  830. destroy_hvm_domain(false);
  831. return;
  832. }
  833. xen_wmb(); /* Update ioreq contents /then/ update state. */
  834. /*
  835. * We do this before we send the response so that the tools
  836. * have the opportunity to pick up on the reset before the
  837. * guest resumes and does a hlt with interrupts disabled which
  838. * causes Xen to powerdown the domain.
  839. */
  840. if (runstate_is_running()) {
  841. if (qemu_shutdown_requested_get()) {
  842. destroy_hvm_domain(false);
  843. }
  844. if (qemu_reset_requested_get()) {
  845. qemu_system_reset(VMRESET_REPORT);
  846. destroy_hvm_domain(true);
  847. }
  848. }
  849. req->state = STATE_IORESP_READY;
  850. xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
  851. }
  852. }
  853. static void xen_main_loop_prepare(XenIOState *state)
  854. {
  855. int evtchn_fd = -1;
  856. if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
  857. evtchn_fd = xc_evtchn_fd(state->xce_handle);
  858. }
  859. state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
  860. state);
  861. if (evtchn_fd != -1) {
  862. CPUState *cpu_state;
  863. DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
  864. CPU_FOREACH(cpu_state) {
  865. DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
  866. __func__, cpu_state->cpu_index, cpu_state);
  867. state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
  868. }
  869. qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
  870. }
  871. }
  872. static void xen_hvm_change_state_handler(void *opaque, int running,
  873. RunState rstate)
  874. {
  875. if (running) {
  876. xen_main_loop_prepare((XenIOState *)opaque);
  877. }
  878. }
  879. static void xen_exit_notifier(Notifier *n, void *data)
  880. {
  881. XenIOState *state = container_of(n, XenIOState, exit);
  882. xc_evtchn_close(state->xce_handle);
  883. xs_daemon_close(state->xenstore);
  884. }
  885. static void xen_read_physmap(XenIOState *state)
  886. {
  887. XenPhysmap *physmap = NULL;
  888. unsigned int len, num, i;
  889. char path[80], *value = NULL;
  890. char **entries = NULL;
  891. snprintf(path, sizeof(path),
  892. "/local/domain/0/device-model/%d/physmap", xen_domid);
  893. entries = xs_directory(state->xenstore, 0, path, &num);
  894. if (entries == NULL)
  895. return;
  896. for (i = 0; i < num; i++) {
  897. physmap = g_malloc(sizeof (XenPhysmap));
  898. physmap->phys_offset = strtoull(entries[i], NULL, 16);
  899. snprintf(path, sizeof(path),
  900. "/local/domain/0/device-model/%d/physmap/%s/start_addr",
  901. xen_domid, entries[i]);
  902. value = xs_read(state->xenstore, 0, path, &len);
  903. if (value == NULL) {
  904. g_free(physmap);
  905. continue;
  906. }
  907. physmap->start_addr = strtoull(value, NULL, 16);
  908. free(value);
  909. snprintf(path, sizeof(path),
  910. "/local/domain/0/device-model/%d/physmap/%s/size",
  911. xen_domid, entries[i]);
  912. value = xs_read(state->xenstore, 0, path, &len);
  913. if (value == NULL) {
  914. g_free(physmap);
  915. continue;
  916. }
  917. physmap->size = strtoull(value, NULL, 16);
  918. free(value);
  919. snprintf(path, sizeof(path),
  920. "/local/domain/0/device-model/%d/physmap/%s/name",
  921. xen_domid, entries[i]);
  922. physmap->name = xs_read(state->xenstore, 0, path, &len);
  923. QLIST_INSERT_HEAD(&state->physmap, physmap, list);
  924. }
  925. free(entries);
  926. }
  927. static void xen_wakeup_notifier(Notifier *notifier, void *data)
  928. {
  929. xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
  930. }
  931. /* return 0 means OK, or -1 means critical issue -- will exit(1) */
  932. int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
  933. MemoryRegion **ram_memory)
  934. {
  935. int i, rc;
  936. unsigned long ioreq_pfn;
  937. unsigned long bufioreq_evtchn;
  938. XenIOState *state;
  939. state = g_malloc0(sizeof (XenIOState));
  940. state->xce_handle = xen_xc_evtchn_open(NULL, 0);
  941. if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
  942. perror("xen: event channel open");
  943. return -1;
  944. }
  945. state->xenstore = xs_daemon_open();
  946. if (state->xenstore == NULL) {
  947. perror("xen: xenstore open");
  948. return -1;
  949. }
  950. state->exit.notify = xen_exit_notifier;
  951. qemu_add_exit_notifier(&state->exit);
  952. state->suspend.notify = xen_suspend_notifier;
  953. qemu_register_suspend_notifier(&state->suspend);
  954. state->wakeup.notify = xen_wakeup_notifier;
  955. qemu_register_wakeup_notifier(&state->wakeup);
  956. xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
  957. DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
  958. state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
  959. PROT_READ|PROT_WRITE, ioreq_pfn);
  960. if (state->shared_page == NULL) {
  961. hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
  962. errno, xen_xc);
  963. }
  964. rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
  965. if (!rc) {
  966. DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
  967. state->shared_vmport_page =
  968. xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
  969. PROT_READ|PROT_WRITE, ioreq_pfn);
  970. if (state->shared_vmport_page == NULL) {
  971. hw_error("map shared vmport IO page returned error %d handle="
  972. XC_INTERFACE_FMT, errno, xen_xc);
  973. }
  974. } else if (rc != -ENOSYS) {
  975. hw_error("get vmport regs pfn returned error %d, rc=%d", errno, rc);
  976. }
  977. xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
  978. DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
  979. state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
  980. PROT_READ|PROT_WRITE, ioreq_pfn);
  981. if (state->buffered_io_page == NULL) {
  982. hw_error("map buffered IO page returned error %d", errno);
  983. }
  984. /* Note: cpus is empty at this point in init */
  985. state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
  986. state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
  987. /* FIXME: how about if we overflow the page here? */
  988. for (i = 0; i < max_cpus; i++) {
  989. rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
  990. xen_vcpu_eport(state->shared_page, i));
  991. if (rc == -1) {
  992. fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
  993. return -1;
  994. }
  995. state->ioreq_local_port[i] = rc;
  996. }
  997. rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
  998. &bufioreq_evtchn);
  999. if (rc < 0) {
  1000. fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
  1001. return -1;
  1002. }
  1003. rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
  1004. (uint32_t)bufioreq_evtchn);
  1005. if (rc == -1) {
  1006. fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
  1007. return -1;
  1008. }
  1009. state->bufioreq_local_port = rc;
  1010. /* Init RAM management */
  1011. xen_map_cache_init(xen_phys_offset_to_gaddr, state);
  1012. xen_ram_init(below_4g_mem_size, above_4g_mem_size, ram_size, ram_memory);
  1013. qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
  1014. state->memory_listener = xen_memory_listener;
  1015. QLIST_INIT(&state->physmap);
  1016. memory_listener_register(&state->memory_listener, &address_space_memory);
  1017. state->log_for_dirtybit = NULL;
  1018. /* Initialize backend core & drivers */
  1019. if (xen_be_init() != 0) {
  1020. fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
  1021. return -1;
  1022. }
  1023. xen_be_register("console", &xen_console_ops);
  1024. xen_be_register("vkbd", &xen_kbdmouse_ops);
  1025. xen_be_register("qdisk", &xen_blkdev_ops);
  1026. xen_read_physmap(state);
  1027. return 0;
  1028. }
  1029. void destroy_hvm_domain(bool reboot)
  1030. {
  1031. XenXC xc_handle;
  1032. int sts;
  1033. xc_handle = xen_xc_interface_open(0, 0, 0);
  1034. if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
  1035. fprintf(stderr, "Cannot acquire xenctrl handle\n");
  1036. } else {
  1037. sts = xc_domain_shutdown(xc_handle, xen_domid,
  1038. reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff);
  1039. if (sts != 0) {
  1040. fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
  1041. "sts %d, %s\n", reboot ? "reboot" : "poweroff",
  1042. sts, strerror(errno));
  1043. } else {
  1044. fprintf(stderr, "Issued domain %d %s\n", xen_domid,
  1045. reboot ? "reboot" : "poweroff");
  1046. }
  1047. xc_interface_close(xc_handle);
  1048. }
  1049. }
  1050. void xen_register_framebuffer(MemoryRegion *mr)
  1051. {
  1052. framebuffer = mr;
  1053. }
  1054. void xen_shutdown_fatal_error(const char *fmt, ...)
  1055. {
  1056. va_list ap;
  1057. va_start(ap, fmt);
  1058. vfprintf(stderr, fmt, ap);
  1059. va_end(ap);
  1060. fprintf(stderr, "Will destroy the domain.\n");
  1061. /* destroy the domain */
  1062. qemu_system_shutdown_request();
  1063. }
  1064. void xen_modified_memory(ram_addr_t start, ram_addr_t length)
  1065. {
  1066. if (unlikely(xen_in_migration)) {
  1067. int rc;
  1068. ram_addr_t start_pfn, nb_pages;
  1069. if (length == 0) {
  1070. length = TARGET_PAGE_SIZE;
  1071. }
  1072. start_pfn = start >> TARGET_PAGE_BITS;
  1073. nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
  1074. - start_pfn;
  1075. rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages);
  1076. if (rc) {
  1077. fprintf(stderr,
  1078. "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
  1079. __func__, start, nb_pages, rc, strerror(-rc));
  1080. }
  1081. }
  1082. }
  1083. void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
  1084. {
  1085. if (enable) {
  1086. memory_global_dirty_log_start();
  1087. } else {
  1088. memory_global_dirty_log_stop();
  1089. }
  1090. }