xen-all.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. /*
  2. * Copyright (C) 2010 Citrix Ltd.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. *
  7. * Contributions after 2012-01-13 are licensed under the terms of the
  8. * GNU GPL, version 2 or (at your option) any later version.
  9. */
  10. #include <sys/mman.h>
  11. #include "hw/pci/pci.h"
  12. #include "hw/pc.h"
  13. #include "hw/xen_common.h"
  14. #include "hw/xen_backend.h"
  15. #include "qmp-commands.h"
  16. #include "char/char.h"
  17. #include "qemu/range.h"
  18. #include "sysemu/xen-mapcache.h"
  19. #include "trace.h"
  20. #include "exec/address-spaces.h"
  21. #include <xen/hvm/ioreq.h>
  22. #include <xen/hvm/params.h>
  23. #include <xen/hvm/e820.h>
  24. //#define DEBUG_XEN
  25. #ifdef DEBUG_XEN
  26. #define DPRINTF(fmt, ...) \
  27. do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
  28. #else
  29. #define DPRINTF(fmt, ...) \
  30. do { } while (0)
  31. #endif
  32. static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
  33. static MemoryRegion *framebuffer;
  34. static bool xen_in_migration;
  35. /* Compatibility with older version */
  36. #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
  37. static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
  38. {
  39. return shared_page->vcpu_iodata[i].vp_eport;
  40. }
  41. static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
  42. {
  43. return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
  44. }
  45. # define FMT_ioreq_size PRIx64
  46. #else
  47. static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
  48. {
  49. return shared_page->vcpu_ioreq[i].vp_eport;
  50. }
  51. static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
  52. {
  53. return &shared_page->vcpu_ioreq[vcpu];
  54. }
  55. # define FMT_ioreq_size "u"
  56. #endif
  57. #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
  58. #define HVM_PARAM_BUFIOREQ_EVTCHN 26
  59. #endif
  60. #define BUFFER_IO_MAX_DELAY 100
  61. typedef struct XenPhysmap {
  62. hwaddr start_addr;
  63. ram_addr_t size;
  64. char *name;
  65. hwaddr phys_offset;
  66. QLIST_ENTRY(XenPhysmap) list;
  67. } XenPhysmap;
  68. typedef struct XenIOState {
  69. shared_iopage_t *shared_page;
  70. buffered_iopage_t *buffered_io_page;
  71. QEMUTimer *buffered_io_timer;
  72. /* the evtchn port for polling the notification, */
  73. evtchn_port_t *ioreq_local_port;
  74. /* evtchn local port for buffered io */
  75. evtchn_port_t bufioreq_local_port;
  76. /* the evtchn fd for polling */
  77. XenEvtchn xce_handle;
  78. /* which vcpu we are serving */
  79. int send_vcpu;
  80. struct xs_handle *xenstore;
  81. MemoryListener memory_listener;
  82. QLIST_HEAD(, XenPhysmap) physmap;
  83. hwaddr free_phys_offset;
  84. const XenPhysmap *log_for_dirtybit;
  85. Notifier exit;
  86. Notifier suspend;
  87. } XenIOState;
  88. /* Xen specific function for piix pci */
  89. int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
  90. {
  91. return irq_num + ((pci_dev->devfn >> 3) << 2);
  92. }
  93. void xen_piix3_set_irq(void *opaque, int irq_num, int level)
  94. {
  95. xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
  96. irq_num & 3, level);
  97. }
  98. void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
  99. {
  100. int i;
  101. /* Scan for updates to PCI link routes (0x60-0x63). */
  102. for (i = 0; i < len; i++) {
  103. uint8_t v = (val >> (8 * i)) & 0xff;
  104. if (v & 0x80) {
  105. v = 0;
  106. }
  107. v &= 0xf;
  108. if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
  109. xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
  110. }
  111. }
  112. }
  113. void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
  114. {
  115. xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);
  116. }
  117. static void xen_suspend_notifier(Notifier *notifier, void *data)
  118. {
  119. xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
  120. }
  121. /* Xen Interrupt Controller */
  122. static void xen_set_irq(void *opaque, int irq, int level)
  123. {
  124. xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
  125. }
  126. qemu_irq *xen_interrupt_controller_init(void)
  127. {
  128. return qemu_allocate_irqs(xen_set_irq, NULL, 16);
  129. }
  130. /* Memory Ops */
  131. static void xen_ram_init(ram_addr_t ram_size)
  132. {
  133. MemoryRegion *sysmem = get_system_memory();
  134. ram_addr_t below_4g_mem_size, above_4g_mem_size = 0;
  135. ram_addr_t block_len;
  136. block_len = ram_size;
  137. if (ram_size >= HVM_BELOW_4G_RAM_END) {
  138. /* Xen does not allocate the memory continuously, and keep a hole at
  139. * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH
  140. */
  141. block_len += HVM_BELOW_4G_MMIO_LENGTH;
  142. }
  143. memory_region_init_ram(&ram_memory, "xen.ram", block_len);
  144. vmstate_register_ram_global(&ram_memory);
  145. if (ram_size >= HVM_BELOW_4G_RAM_END) {
  146. above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
  147. below_4g_mem_size = HVM_BELOW_4G_RAM_END;
  148. } else {
  149. below_4g_mem_size = ram_size;
  150. }
  151. memory_region_init_alias(&ram_640k, "xen.ram.640k",
  152. &ram_memory, 0, 0xa0000);
  153. memory_region_add_subregion(sysmem, 0, &ram_640k);
  154. /* Skip of the VGA IO memory space, it will be registered later by the VGA
  155. * emulated device.
  156. *
  157. * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
  158. * the Options ROM, so it is registered here as RAM.
  159. */
  160. memory_region_init_alias(&ram_lo, "xen.ram.lo",
  161. &ram_memory, 0xc0000, below_4g_mem_size - 0xc0000);
  162. memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
  163. if (above_4g_mem_size > 0) {
  164. memory_region_init_alias(&ram_hi, "xen.ram.hi",
  165. &ram_memory, 0x100000000ULL,
  166. above_4g_mem_size);
  167. memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
  168. }
  169. }
  170. void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
  171. {
  172. unsigned long nr_pfn;
  173. xen_pfn_t *pfn_list;
  174. int i;
  175. if (runstate_check(RUN_STATE_INMIGRATE)) {
  176. /* RAM already populated in Xen */
  177. fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
  178. " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
  179. __func__, size, ram_addr);
  180. return;
  181. }
  182. if (mr == &ram_memory) {
  183. return;
  184. }
  185. trace_xen_ram_alloc(ram_addr, size);
  186. nr_pfn = size >> TARGET_PAGE_BITS;
  187. pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
  188. for (i = 0; i < nr_pfn; i++) {
  189. pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
  190. }
  191. if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
  192. hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
  193. }
  194. g_free(pfn_list);
  195. }
  196. static XenPhysmap *get_physmapping(XenIOState *state,
  197. hwaddr start_addr, ram_addr_t size)
  198. {
  199. XenPhysmap *physmap = NULL;
  200. start_addr &= TARGET_PAGE_MASK;
  201. QLIST_FOREACH(physmap, &state->physmap, list) {
  202. if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
  203. return physmap;
  204. }
  205. }
  206. return NULL;
  207. }
  208. static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
  209. ram_addr_t size, void *opaque)
  210. {
  211. hwaddr addr = start_addr & TARGET_PAGE_MASK;
  212. XenIOState *xen_io_state = opaque;
  213. XenPhysmap *physmap = NULL;
  214. QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
  215. if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
  216. return physmap->start_addr;
  217. }
  218. }
  219. return start_addr;
  220. }
  221. #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
  222. static int xen_add_to_physmap(XenIOState *state,
  223. hwaddr start_addr,
  224. ram_addr_t size,
  225. MemoryRegion *mr,
  226. hwaddr offset_within_region)
  227. {
  228. unsigned long i = 0;
  229. int rc = 0;
  230. XenPhysmap *physmap = NULL;
  231. hwaddr pfn, start_gpfn;
  232. hwaddr phys_offset = memory_region_get_ram_addr(mr);
  233. char path[80], value[17];
  234. if (get_physmapping(state, start_addr, size)) {
  235. return 0;
  236. }
  237. if (size <= 0) {
  238. return -1;
  239. }
  240. /* Xen can only handle a single dirty log region for now and we want
  241. * the linear framebuffer to be that region.
  242. * Avoid tracking any regions that is not videoram and avoid tracking
  243. * the legacy vga region. */
  244. if (mr == framebuffer && start_addr > 0xbffff) {
  245. goto go_physmap;
  246. }
  247. return -1;
  248. go_physmap:
  249. DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
  250. start_addr, start_addr + size);
  251. pfn = phys_offset >> TARGET_PAGE_BITS;
  252. start_gpfn = start_addr >> TARGET_PAGE_BITS;
  253. for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
  254. unsigned long idx = pfn + i;
  255. xen_pfn_t gpfn = start_gpfn + i;
  256. rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
  257. if (rc) {
  258. DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
  259. PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
  260. return -rc;
  261. }
  262. }
  263. physmap = g_malloc(sizeof (XenPhysmap));
  264. physmap->start_addr = start_addr;
  265. physmap->size = size;
  266. physmap->name = (char *)mr->name;
  267. physmap->phys_offset = phys_offset;
  268. QLIST_INSERT_HEAD(&state->physmap, physmap, list);
  269. xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
  270. start_addr >> TARGET_PAGE_BITS,
  271. (start_addr + size) >> TARGET_PAGE_BITS,
  272. XEN_DOMCTL_MEM_CACHEATTR_WB);
  273. snprintf(path, sizeof(path),
  274. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
  275. xen_domid, (uint64_t)phys_offset);
  276. snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
  277. if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
  278. return -1;
  279. }
  280. snprintf(path, sizeof(path),
  281. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
  282. xen_domid, (uint64_t)phys_offset);
  283. snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
  284. if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
  285. return -1;
  286. }
  287. if (mr->name) {
  288. snprintf(path, sizeof(path),
  289. "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
  290. xen_domid, (uint64_t)phys_offset);
  291. if (!xs_write(state->xenstore, 0, path, mr->name, strlen(mr->name))) {
  292. return -1;
  293. }
  294. }
  295. return 0;
  296. }
  297. static int xen_remove_from_physmap(XenIOState *state,
  298. hwaddr start_addr,
  299. ram_addr_t size)
  300. {
  301. unsigned long i = 0;
  302. int rc = 0;
  303. XenPhysmap *physmap = NULL;
  304. hwaddr phys_offset = 0;
  305. physmap = get_physmapping(state, start_addr, size);
  306. if (physmap == NULL) {
  307. return -1;
  308. }
  309. phys_offset = physmap->phys_offset;
  310. size = physmap->size;
  311. DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", from ",
  312. "%"HWADDR_PRIx"\n", phys_offset, phys_offset + size, start_addr);
  313. size >>= TARGET_PAGE_BITS;
  314. start_addr >>= TARGET_PAGE_BITS;
  315. phys_offset >>= TARGET_PAGE_BITS;
  316. for (i = 0; i < size; i++) {
  317. unsigned long idx = start_addr + i;
  318. xen_pfn_t gpfn = phys_offset + i;
  319. rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
  320. if (rc) {
  321. fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
  322. PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
  323. return -rc;
  324. }
  325. }
  326. QLIST_REMOVE(physmap, list);
  327. if (state->log_for_dirtybit == physmap) {
  328. state->log_for_dirtybit = NULL;
  329. }
  330. free(physmap);
  331. return 0;
  332. }
  333. #else
  334. static int xen_add_to_physmap(XenIOState *state,
  335. hwaddr start_addr,
  336. ram_addr_t size,
  337. MemoryRegion *mr,
  338. hwaddr offset_within_region)
  339. {
  340. return -ENOSYS;
  341. }
  342. static int xen_remove_from_physmap(XenIOState *state,
  343. hwaddr start_addr,
  344. ram_addr_t size)
  345. {
  346. return -ENOSYS;
  347. }
  348. #endif
  349. static void xen_set_memory(struct MemoryListener *listener,
  350. MemoryRegionSection *section,
  351. bool add)
  352. {
  353. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  354. hwaddr start_addr = section->offset_within_address_space;
  355. ram_addr_t size = section->size;
  356. bool log_dirty = memory_region_is_logging(section->mr);
  357. hvmmem_type_t mem_type;
  358. if (!memory_region_is_ram(section->mr)) {
  359. return;
  360. }
  361. if (!(section->mr != &ram_memory
  362. && ( (log_dirty && add) || (!log_dirty && !add)))) {
  363. return;
  364. }
  365. trace_xen_client_set_memory(start_addr, size, log_dirty);
  366. start_addr &= TARGET_PAGE_MASK;
  367. size = TARGET_PAGE_ALIGN(size);
  368. if (add) {
  369. if (!memory_region_is_rom(section->mr)) {
  370. xen_add_to_physmap(state, start_addr, size,
  371. section->mr, section->offset_within_region);
  372. } else {
  373. mem_type = HVMMEM_ram_ro;
  374. if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
  375. start_addr >> TARGET_PAGE_BITS,
  376. size >> TARGET_PAGE_BITS)) {
  377. DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
  378. start_addr);
  379. }
  380. }
  381. } else {
  382. if (xen_remove_from_physmap(state, start_addr, size) < 0) {
  383. DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
  384. }
  385. }
  386. }
  387. static void xen_region_add(MemoryListener *listener,
  388. MemoryRegionSection *section)
  389. {
  390. xen_set_memory(listener, section, true);
  391. }
  392. static void xen_region_del(MemoryListener *listener,
  393. MemoryRegionSection *section)
  394. {
  395. xen_set_memory(listener, section, false);
  396. }
  397. static void xen_sync_dirty_bitmap(XenIOState *state,
  398. hwaddr start_addr,
  399. ram_addr_t size)
  400. {
  401. hwaddr npages = size >> TARGET_PAGE_BITS;
  402. const int width = sizeof(unsigned long) * 8;
  403. unsigned long bitmap[(npages + width - 1) / width];
  404. int rc, i, j;
  405. const XenPhysmap *physmap = NULL;
  406. physmap = get_physmapping(state, start_addr, size);
  407. if (physmap == NULL) {
  408. /* not handled */
  409. return;
  410. }
  411. if (state->log_for_dirtybit == NULL) {
  412. state->log_for_dirtybit = physmap;
  413. } else if (state->log_for_dirtybit != physmap) {
  414. /* Only one range for dirty bitmap can be tracked. */
  415. return;
  416. }
  417. rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
  418. start_addr >> TARGET_PAGE_BITS, npages,
  419. bitmap);
  420. if (rc < 0) {
  421. if (rc != -ENODATA) {
  422. memory_region_set_dirty(framebuffer, 0, size);
  423. DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
  424. ", 0x" TARGET_FMT_plx "): %s\n",
  425. start_addr, start_addr + size, strerror(-rc));
  426. }
  427. return;
  428. }
  429. for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
  430. unsigned long map = bitmap[i];
  431. while (map != 0) {
  432. j = ffsl(map) - 1;
  433. map &= ~(1ul << j);
  434. memory_region_set_dirty(framebuffer,
  435. (i * width + j) * TARGET_PAGE_SIZE,
  436. TARGET_PAGE_SIZE);
  437. };
  438. }
  439. }
  440. static void xen_log_start(MemoryListener *listener,
  441. MemoryRegionSection *section)
  442. {
  443. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  444. xen_sync_dirty_bitmap(state, section->offset_within_address_space,
  445. section->size);
  446. }
  447. static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
  448. {
  449. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  450. state->log_for_dirtybit = NULL;
  451. /* Disable dirty bit tracking */
  452. xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
  453. }
  454. static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
  455. {
  456. XenIOState *state = container_of(listener, XenIOState, memory_listener);
  457. xen_sync_dirty_bitmap(state, section->offset_within_address_space,
  458. section->size);
  459. }
  460. static void xen_log_global_start(MemoryListener *listener)
  461. {
  462. if (xen_enabled()) {
  463. xen_in_migration = true;
  464. }
  465. }
  466. static void xen_log_global_stop(MemoryListener *listener)
  467. {
  468. xen_in_migration = false;
  469. }
  470. static MemoryListener xen_memory_listener = {
  471. .region_add = xen_region_add,
  472. .region_del = xen_region_del,
  473. .log_start = xen_log_start,
  474. .log_stop = xen_log_stop,
  475. .log_sync = xen_log_sync,
  476. .log_global_start = xen_log_global_start,
  477. .log_global_stop = xen_log_global_stop,
  478. .priority = 10,
  479. };
  480. void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
  481. {
  482. if (enable) {
  483. memory_global_dirty_log_start();
  484. } else {
  485. memory_global_dirty_log_stop();
  486. }
  487. }
  488. /* VCPU Operations, MMIO, IO ring ... */
  489. static void xen_reset_vcpu(void *opaque)
  490. {
  491. CPUArchState *env = opaque;
  492. env->halted = 1;
  493. }
  494. void xen_vcpu_init(void)
  495. {
  496. if (first_cpu != NULL) {
  497. qemu_register_reset(xen_reset_vcpu, first_cpu);
  498. xen_reset_vcpu(first_cpu);
  499. }
  500. /* if rtc_clock is left to default (host_clock), disable it */
  501. if (rtc_clock == host_clock) {
  502. qemu_clock_enable(rtc_clock, false);
  503. }
  504. }
  505. /* get the ioreq packets from share mem */
  506. static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
  507. {
  508. ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
  509. if (req->state != STATE_IOREQ_READY) {
  510. DPRINTF("I/O request not ready: "
  511. "%x, ptr: %x, port: %"PRIx64", "
  512. "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
  513. req->state, req->data_is_ptr, req->addr,
  514. req->data, req->count, req->size);
  515. return NULL;
  516. }
  517. xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
  518. req->state = STATE_IOREQ_INPROCESS;
  519. return req;
  520. }
  521. /* use poll to get the port notification */
  522. /* ioreq_vec--out,the */
  523. /* retval--the number of ioreq packet */
  524. static ioreq_t *cpu_get_ioreq(XenIOState *state)
  525. {
  526. int i;
  527. evtchn_port_t port;
  528. port = xc_evtchn_pending(state->xce_handle);
  529. if (port == state->bufioreq_local_port) {
  530. qemu_mod_timer(state->buffered_io_timer,
  531. BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
  532. return NULL;
  533. }
  534. if (port != -1) {
  535. for (i = 0; i < smp_cpus; i++) {
  536. if (state->ioreq_local_port[i] == port) {
  537. break;
  538. }
  539. }
  540. if (i == smp_cpus) {
  541. hw_error("Fatal error while trying to get io event!\n");
  542. }
  543. /* unmask the wanted port again */
  544. xc_evtchn_unmask(state->xce_handle, port);
  545. /* get the io packet from shared memory */
  546. state->send_vcpu = i;
  547. return cpu_get_ioreq_from_shared_memory(state, i);
  548. }
  549. /* read error or read nothing */
  550. return NULL;
  551. }
  552. static uint32_t do_inp(pio_addr_t addr, unsigned long size)
  553. {
  554. switch (size) {
  555. case 1:
  556. return cpu_inb(addr);
  557. case 2:
  558. return cpu_inw(addr);
  559. case 4:
  560. return cpu_inl(addr);
  561. default:
  562. hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
  563. }
  564. }
  565. static void do_outp(pio_addr_t addr,
  566. unsigned long size, uint32_t val)
  567. {
  568. switch (size) {
  569. case 1:
  570. return cpu_outb(addr, val);
  571. case 2:
  572. return cpu_outw(addr, val);
  573. case 4:
  574. return cpu_outl(addr, val);
  575. default:
  576. hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
  577. }
  578. }
  579. /*
  580. * Helper functions which read/write an object from/to physical guest
  581. * memory, as part of the implementation of an ioreq.
  582. *
  583. * Equivalent to
  584. * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
  585. * val, req->size, 0/1)
  586. * except without the integer overflow problems.
  587. */
  588. static void rw_phys_req_item(hwaddr addr,
  589. ioreq_t *req, uint32_t i, void *val, int rw)
  590. {
  591. /* Do everything unsigned so overflow just results in a truncated result
  592. * and accesses to undesired parts of guest memory, which is up
  593. * to the guest */
  594. hwaddr offset = (hwaddr)req->size * i;
  595. if (req->df) {
  596. addr -= offset;
  597. } else {
  598. addr += offset;
  599. }
  600. cpu_physical_memory_rw(addr, val, req->size, rw);
  601. }
  602. static inline void read_phys_req_item(hwaddr addr,
  603. ioreq_t *req, uint32_t i, void *val)
  604. {
  605. rw_phys_req_item(addr, req, i, val, 0);
  606. }
  607. static inline void write_phys_req_item(hwaddr addr,
  608. ioreq_t *req, uint32_t i, void *val)
  609. {
  610. rw_phys_req_item(addr, req, i, val, 1);
  611. }
  612. static void cpu_ioreq_pio(ioreq_t *req)
  613. {
  614. uint32_t i;
  615. if (req->dir == IOREQ_READ) {
  616. if (!req->data_is_ptr) {
  617. req->data = do_inp(req->addr, req->size);
  618. } else {
  619. uint32_t tmp;
  620. for (i = 0; i < req->count; i++) {
  621. tmp = do_inp(req->addr, req->size);
  622. write_phys_req_item(req->data, req, i, &tmp);
  623. }
  624. }
  625. } else if (req->dir == IOREQ_WRITE) {
  626. if (!req->data_is_ptr) {
  627. do_outp(req->addr, req->size, req->data);
  628. } else {
  629. for (i = 0; i < req->count; i++) {
  630. uint32_t tmp = 0;
  631. read_phys_req_item(req->data, req, i, &tmp);
  632. do_outp(req->addr, req->size, tmp);
  633. }
  634. }
  635. }
  636. }
  637. static void cpu_ioreq_move(ioreq_t *req)
  638. {
  639. uint32_t i;
  640. if (!req->data_is_ptr) {
  641. if (req->dir == IOREQ_READ) {
  642. for (i = 0; i < req->count; i++) {
  643. read_phys_req_item(req->addr, req, i, &req->data);
  644. }
  645. } else if (req->dir == IOREQ_WRITE) {
  646. for (i = 0; i < req->count; i++) {
  647. write_phys_req_item(req->addr, req, i, &req->data);
  648. }
  649. }
  650. } else {
  651. uint64_t tmp;
  652. if (req->dir == IOREQ_READ) {
  653. for (i = 0; i < req->count; i++) {
  654. read_phys_req_item(req->addr, req, i, &tmp);
  655. write_phys_req_item(req->data, req, i, &tmp);
  656. }
  657. } else if (req->dir == IOREQ_WRITE) {
  658. for (i = 0; i < req->count; i++) {
  659. read_phys_req_item(req->data, req, i, &tmp);
  660. write_phys_req_item(req->addr, req, i, &tmp);
  661. }
  662. }
  663. }
  664. }
  665. static void handle_ioreq(ioreq_t *req)
  666. {
  667. if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
  668. (req->size < sizeof (target_ulong))) {
  669. req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
  670. }
  671. switch (req->type) {
  672. case IOREQ_TYPE_PIO:
  673. cpu_ioreq_pio(req);
  674. break;
  675. case IOREQ_TYPE_COPY:
  676. cpu_ioreq_move(req);
  677. break;
  678. case IOREQ_TYPE_TIMEOFFSET:
  679. break;
  680. case IOREQ_TYPE_INVALIDATE:
  681. xen_invalidate_map_cache();
  682. break;
  683. default:
  684. hw_error("Invalid ioreq type 0x%x\n", req->type);
  685. }
  686. }
  687. static int handle_buffered_iopage(XenIOState *state)
  688. {
  689. buf_ioreq_t *buf_req = NULL;
  690. ioreq_t req;
  691. int qw;
  692. if (!state->buffered_io_page) {
  693. return 0;
  694. }
  695. memset(&req, 0x00, sizeof(req));
  696. while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
  697. buf_req = &state->buffered_io_page->buf_ioreq[
  698. state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
  699. req.size = 1UL << buf_req->size;
  700. req.count = 1;
  701. req.addr = buf_req->addr;
  702. req.data = buf_req->data;
  703. req.state = STATE_IOREQ_READY;
  704. req.dir = buf_req->dir;
  705. req.df = 1;
  706. req.type = buf_req->type;
  707. req.data_is_ptr = 0;
  708. qw = (req.size == 8);
  709. if (qw) {
  710. buf_req = &state->buffered_io_page->buf_ioreq[
  711. (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
  712. req.data |= ((uint64_t)buf_req->data) << 32;
  713. }
  714. handle_ioreq(&req);
  715. xen_mb();
  716. state->buffered_io_page->read_pointer += qw ? 2 : 1;
  717. }
  718. return req.count;
  719. }
  720. static void handle_buffered_io(void *opaque)
  721. {
  722. XenIOState *state = opaque;
  723. if (handle_buffered_iopage(state)) {
  724. qemu_mod_timer(state->buffered_io_timer,
  725. BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
  726. } else {
  727. qemu_del_timer(state->buffered_io_timer);
  728. xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
  729. }
  730. }
  731. static void cpu_handle_ioreq(void *opaque)
  732. {
  733. XenIOState *state = opaque;
  734. ioreq_t *req = cpu_get_ioreq(state);
  735. handle_buffered_iopage(state);
  736. if (req) {
  737. handle_ioreq(req);
  738. if (req->state != STATE_IOREQ_INPROCESS) {
  739. fprintf(stderr, "Badness in I/O request ... not in service?!: "
  740. "%x, ptr: %x, port: %"PRIx64", "
  741. "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
  742. req->state, req->data_is_ptr, req->addr,
  743. req->data, req->count, req->size);
  744. destroy_hvm_domain(false);
  745. return;
  746. }
  747. xen_wmb(); /* Update ioreq contents /then/ update state. */
  748. /*
  749. * We do this before we send the response so that the tools
  750. * have the opportunity to pick up on the reset before the
  751. * guest resumes and does a hlt with interrupts disabled which
  752. * causes Xen to powerdown the domain.
  753. */
  754. if (runstate_is_running()) {
  755. if (qemu_shutdown_requested_get()) {
  756. destroy_hvm_domain(false);
  757. }
  758. if (qemu_reset_requested_get()) {
  759. qemu_system_reset(VMRESET_REPORT);
  760. destroy_hvm_domain(true);
  761. }
  762. }
  763. req->state = STATE_IORESP_READY;
  764. xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
  765. }
  766. }
  767. static int store_dev_info(int domid, CharDriverState *cs, const char *string)
  768. {
  769. struct xs_handle *xs = NULL;
  770. char *path = NULL;
  771. char *newpath = NULL;
  772. char *pts = NULL;
  773. int ret = -1;
  774. /* Only continue if we're talking to a pty. */
  775. if (strncmp(cs->filename, "pty:", 4)) {
  776. return 0;
  777. }
  778. pts = cs->filename + 4;
  779. /* We now have everything we need to set the xenstore entry. */
  780. xs = xs_open(0);
  781. if (xs == NULL) {
  782. fprintf(stderr, "Could not contact XenStore\n");
  783. goto out;
  784. }
  785. path = xs_get_domain_path(xs, domid);
  786. if (path == NULL) {
  787. fprintf(stderr, "xs_get_domain_path() error\n");
  788. goto out;
  789. }
  790. newpath = realloc(path, (strlen(path) + strlen(string) +
  791. strlen("/tty") + 1));
  792. if (newpath == NULL) {
  793. fprintf(stderr, "realloc error\n");
  794. goto out;
  795. }
  796. path = newpath;
  797. strcat(path, string);
  798. strcat(path, "/tty");
  799. if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
  800. fprintf(stderr, "xs_write for '%s' fail", string);
  801. goto out;
  802. }
  803. ret = 0;
  804. out:
  805. free(path);
  806. xs_close(xs);
  807. return ret;
  808. }
  809. void xenstore_store_pv_console_info(int i, CharDriverState *chr)
  810. {
  811. if (i == 0) {
  812. store_dev_info(xen_domid, chr, "/console");
  813. } else {
  814. char buf[32];
  815. snprintf(buf, sizeof(buf), "/device/console/%d", i);
  816. store_dev_info(xen_domid, chr, buf);
  817. }
  818. }
  819. static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
  820. {
  821. char path[50];
  822. if (xs == NULL) {
  823. fprintf(stderr, "xenstore connection not initialized\n");
  824. exit(1);
  825. }
  826. snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid);
  827. if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) {
  828. fprintf(stderr, "error recording dm state\n");
  829. exit(1);
  830. }
  831. }
  832. static void xen_main_loop_prepare(XenIOState *state)
  833. {
  834. int evtchn_fd = -1;
  835. if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
  836. evtchn_fd = xc_evtchn_fd(state->xce_handle);
  837. }
  838. state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
  839. state);
  840. if (evtchn_fd != -1) {
  841. qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
  842. }
  843. }
  844. /* Initialise Xen */
  845. static void xen_change_state_handler(void *opaque, int running,
  846. RunState state)
  847. {
  848. if (running) {
  849. /* record state running */
  850. xenstore_record_dm_state(xenstore, "running");
  851. }
  852. }
  853. static void xen_hvm_change_state_handler(void *opaque, int running,
  854. RunState rstate)
  855. {
  856. XenIOState *xstate = opaque;
  857. if (running) {
  858. xen_main_loop_prepare(xstate);
  859. }
  860. }
  861. static void xen_exit_notifier(Notifier *n, void *data)
  862. {
  863. XenIOState *state = container_of(n, XenIOState, exit);
  864. xc_evtchn_close(state->xce_handle);
  865. xs_daemon_close(state->xenstore);
  866. }
  867. int xen_init(void)
  868. {
  869. xen_xc = xen_xc_interface_open(0, 0, 0);
  870. if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
  871. xen_be_printf(NULL, 0, "can't open xen interface\n");
  872. return -1;
  873. }
  874. qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
  875. return 0;
  876. }
  877. static void xen_read_physmap(XenIOState *state)
  878. {
  879. XenPhysmap *physmap = NULL;
  880. unsigned int len, num, i;
  881. char path[80], *value = NULL;
  882. char **entries = NULL;
  883. snprintf(path, sizeof(path),
  884. "/local/domain/0/device-model/%d/physmap", xen_domid);
  885. entries = xs_directory(state->xenstore, 0, path, &num);
  886. if (entries == NULL)
  887. return;
  888. for (i = 0; i < num; i++) {
  889. physmap = g_malloc(sizeof (XenPhysmap));
  890. physmap->phys_offset = strtoull(entries[i], NULL, 16);
  891. snprintf(path, sizeof(path),
  892. "/local/domain/0/device-model/%d/physmap/%s/start_addr",
  893. xen_domid, entries[i]);
  894. value = xs_read(state->xenstore, 0, path, &len);
  895. if (value == NULL) {
  896. free(physmap);
  897. continue;
  898. }
  899. physmap->start_addr = strtoull(value, NULL, 16);
  900. free(value);
  901. snprintf(path, sizeof(path),
  902. "/local/domain/0/device-model/%d/physmap/%s/size",
  903. xen_domid, entries[i]);
  904. value = xs_read(state->xenstore, 0, path, &len);
  905. if (value == NULL) {
  906. free(physmap);
  907. continue;
  908. }
  909. physmap->size = strtoull(value, NULL, 16);
  910. free(value);
  911. snprintf(path, sizeof(path),
  912. "/local/domain/0/device-model/%d/physmap/%s/name",
  913. xen_domid, entries[i]);
  914. physmap->name = xs_read(state->xenstore, 0, path, &len);
  915. QLIST_INSERT_HEAD(&state->physmap, physmap, list);
  916. }
  917. free(entries);
  918. }
  919. int xen_hvm_init(void)
  920. {
  921. int i, rc;
  922. unsigned long ioreq_pfn;
  923. unsigned long bufioreq_evtchn;
  924. XenIOState *state;
  925. state = g_malloc0(sizeof (XenIOState));
  926. state->xce_handle = xen_xc_evtchn_open(NULL, 0);
  927. if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
  928. perror("xen: event channel open");
  929. return -errno;
  930. }
  931. state->xenstore = xs_daemon_open();
  932. if (state->xenstore == NULL) {
  933. perror("xen: xenstore open");
  934. return -errno;
  935. }
  936. state->exit.notify = xen_exit_notifier;
  937. qemu_add_exit_notifier(&state->exit);
  938. state->suspend.notify = xen_suspend_notifier;
  939. qemu_register_suspend_notifier(&state->suspend);
  940. xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
  941. DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
  942. state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
  943. PROT_READ|PROT_WRITE, ioreq_pfn);
  944. if (state->shared_page == NULL) {
  945. hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
  946. errno, xen_xc);
  947. }
  948. xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
  949. DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
  950. state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
  951. PROT_READ|PROT_WRITE, ioreq_pfn);
  952. if (state->buffered_io_page == NULL) {
  953. hw_error("map buffered IO page returned error %d", errno);
  954. }
  955. state->ioreq_local_port = g_malloc0(smp_cpus * sizeof (evtchn_port_t));
  956. /* FIXME: how about if we overflow the page here? */
  957. for (i = 0; i < smp_cpus; i++) {
  958. rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
  959. xen_vcpu_eport(state->shared_page, i));
  960. if (rc == -1) {
  961. fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
  962. return -1;
  963. }
  964. state->ioreq_local_port[i] = rc;
  965. }
  966. rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
  967. &bufioreq_evtchn);
  968. if (rc < 0) {
  969. fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
  970. return -1;
  971. }
  972. rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
  973. (uint32_t)bufioreq_evtchn);
  974. if (rc == -1) {
  975. fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
  976. return -1;
  977. }
  978. state->bufioreq_local_port = rc;
  979. /* Init RAM management */
  980. xen_map_cache_init(xen_phys_offset_to_gaddr, state);
  981. xen_ram_init(ram_size);
  982. qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
  983. state->memory_listener = xen_memory_listener;
  984. QLIST_INIT(&state->physmap);
  985. memory_listener_register(&state->memory_listener, &address_space_memory);
  986. state->log_for_dirtybit = NULL;
  987. /* Initialize backend core & drivers */
  988. if (xen_be_init() != 0) {
  989. fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
  990. exit(1);
  991. }
  992. xen_be_register("console", &xen_console_ops);
  993. xen_be_register("vkbd", &xen_kbdmouse_ops);
  994. xen_be_register("qdisk", &xen_blkdev_ops);
  995. xen_read_physmap(state);
  996. return 0;
  997. }
  998. void destroy_hvm_domain(bool reboot)
  999. {
  1000. XenXC xc_handle;
  1001. int sts;
  1002. xc_handle = xen_xc_interface_open(0, 0, 0);
  1003. if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
  1004. fprintf(stderr, "Cannot acquire xenctrl handle\n");
  1005. } else {
  1006. sts = xc_domain_shutdown(xc_handle, xen_domid,
  1007. reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff);
  1008. if (sts != 0) {
  1009. fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
  1010. "sts %d, %s\n", reboot ? "reboot" : "poweroff",
  1011. sts, strerror(errno));
  1012. } else {
  1013. fprintf(stderr, "Issued domain %d %s\n", xen_domid,
  1014. reboot ? "reboot" : "poweroff");
  1015. }
  1016. xc_interface_close(xc_handle);
  1017. }
  1018. }
  1019. void xen_register_framebuffer(MemoryRegion *mr)
  1020. {
  1021. framebuffer = mr;
  1022. }
  1023. void xen_shutdown_fatal_error(const char *fmt, ...)
  1024. {
  1025. va_list ap;
  1026. va_start(ap, fmt);
  1027. vfprintf(stderr, fmt, ap);
  1028. va_end(ap);
  1029. fprintf(stderr, "Will destroy the domain.\n");
  1030. /* destroy the domain */
  1031. qemu_system_shutdown_request();
  1032. }
  1033. void xen_modified_memory(ram_addr_t start, ram_addr_t length)
  1034. {
  1035. if (unlikely(xen_in_migration)) {
  1036. int rc;
  1037. ram_addr_t start_pfn, nb_pages;
  1038. if (length == 0) {
  1039. length = TARGET_PAGE_SIZE;
  1040. }
  1041. start_pfn = start >> TARGET_PAGE_BITS;
  1042. nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
  1043. - start_pfn;
  1044. rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages);
  1045. if (rc) {
  1046. fprintf(stderr,
  1047. "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
  1048. __func__, start, nb_pages, rc, strerror(-rc));
  1049. }
  1050. }
  1051. }