xen_native.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. #ifndef QEMU_HW_XEN_NATIVE_H
  2. #define QEMU_HW_XEN_NATIVE_H
  3. #ifdef __XEN_INTERFACE_VERSION__
  4. #error In Xen native files, include xen_native.h before other Xen headers
  5. #endif
  6. /*
  7. * If we have new enough libxenctrl then we do not want/need these compat
  8. * interfaces, despite what the user supplied cflags might say. They
  9. * must be undefined before including xenctrl.h
  10. */
  11. #undef XC_WANT_COMPAT_EVTCHN_API
  12. #undef XC_WANT_COMPAT_GNTTAB_API
  13. #undef XC_WANT_COMPAT_MAP_FOREIGN_API
  14. #include <xenctrl.h>
  15. #include <xenstore.h>
  16. #include "hw/xen/xen.h"
  17. #include "hw/pci/pci_device.h"
  18. #include "hw/xen/trace.h"
  19. extern xc_interface *xen_xc;
  20. /*
  21. * We don't support Xen prior to 4.7.1.
  22. */
  23. #include <xenforeignmemory.h>
  24. extern xenforeignmemory_handle *xen_fmem;
  25. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
  26. typedef xc_interface xendevicemodel_handle;
  27. #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
  28. #undef XC_WANT_COMPAT_DEVICEMODEL_API
  29. #include <xendevicemodel.h>
  30. #endif
  31. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
  32. static inline int xendevicemodel_relocate_memory(
  33. xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
  34. uint64_t dst_gfn)
  35. {
  36. uint32_t i;
  37. int rc;
  38. for (i = 0; i < size; i++) {
  39. unsigned long idx = src_gfn + i;
  40. xen_pfn_t gpfn = dst_gfn + i;
  41. rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
  42. gpfn);
  43. if (rc) {
  44. return rc;
  45. }
  46. }
  47. return 0;
  48. }
  49. static inline int xendevicemodel_pin_memory_cacheattr(
  50. xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
  51. uint32_t type)
  52. {
  53. return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
  54. }
  55. typedef void xenforeignmemory_resource_handle;
  56. #define XENMEM_resource_ioreq_server 0
  57. #define XENMEM_resource_ioreq_server_frame_bufioreq 0
  58. #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
  59. static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
  60. xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
  61. unsigned int id, unsigned long frame, unsigned long nr_frames,
  62. void **paddr, int prot, int flags)
  63. {
  64. errno = EOPNOTSUPP;
  65. return NULL;
  66. }
  67. static inline int xenforeignmemory_unmap_resource(
  68. xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
  69. {
  70. return 0;
  71. }
  72. #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
  73. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
  74. #define XEN_COMPAT_PHYSMAP
  75. static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
  76. uint32_t dom, void *addr,
  77. int prot, int flags, size_t pages,
  78. const xen_pfn_t arr[/*pages*/],
  79. int err[/*pages*/])
  80. {
  81. assert(addr == NULL && flags == 0);
  82. return xenforeignmemory_map(h, dom, prot, pages, arr, err);
  83. }
  84. static inline int xentoolcore_restrict_all(domid_t domid)
  85. {
  86. errno = ENOTTY;
  87. return -1;
  88. }
  89. static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
  90. domid_t domid, unsigned int reason)
  91. {
  92. errno = ENOTTY;
  93. return -1;
  94. }
  95. #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
  96. #include <xentoolcore.h>
  97. #endif
  98. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
  99. static inline xendevicemodel_handle *xendevicemodel_open(
  100. struct xentoollog_logger *logger, unsigned int open_flags)
  101. {
  102. return xen_xc;
  103. }
  104. static inline int xendevicemodel_create_ioreq_server(
  105. xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
  106. ioservid_t *id)
  107. {
  108. return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
  109. id);
  110. }
  111. static inline int xendevicemodel_get_ioreq_server_info(
  112. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
  113. xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
  114. evtchn_port_t *bufioreq_port)
  115. {
  116. return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
  117. bufioreq_pfn, bufioreq_port);
  118. }
  119. static inline int xendevicemodel_map_io_range_to_ioreq_server(
  120. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
  121. uint64_t start, uint64_t end)
  122. {
  123. return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
  124. start, end);
  125. }
  126. static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
  127. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
  128. uint64_t start, uint64_t end)
  129. {
  130. return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
  131. start, end);
  132. }
  133. static inline int xendevicemodel_map_pcidev_to_ioreq_server(
  134. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
  135. uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
  136. {
  137. return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
  138. bus, device, function);
  139. }
  140. static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
  141. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
  142. uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
  143. {
  144. return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
  145. bus, device, function);
  146. }
  147. static inline int xendevicemodel_destroy_ioreq_server(
  148. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
  149. {
  150. return xc_hvm_destroy_ioreq_server(dmod, domid, id);
  151. }
  152. static inline int xendevicemodel_set_ioreq_server_state(
  153. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
  154. {
  155. return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
  156. }
  157. static inline int xendevicemodel_set_pci_intx_level(
  158. xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
  159. uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
  160. {
  161. return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
  162. intx, level);
  163. }
  164. static inline int xendevicemodel_set_isa_irq_level(
  165. xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
  166. unsigned int level)
  167. {
  168. return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
  169. }
  170. static inline int xendevicemodel_set_pci_link_route(
  171. xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
  172. {
  173. return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
  174. }
  175. static inline int xendevicemodel_inject_msi(
  176. xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
  177. uint32_t msi_data)
  178. {
  179. return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
  180. }
  181. static inline int xendevicemodel_track_dirty_vram(
  182. xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
  183. uint32_t nr, unsigned long *dirty_bitmap)
  184. {
  185. return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
  186. dirty_bitmap);
  187. }
  188. static inline int xendevicemodel_modified_memory(
  189. xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
  190. uint32_t nr)
  191. {
  192. return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
  193. }
  194. static inline int xendevicemodel_set_mem_type(
  195. xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
  196. uint64_t first_pfn, uint32_t nr)
  197. {
  198. return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
  199. }
  200. #endif
  201. extern xendevicemodel_handle *xen_dmod;
  202. static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
  203. uint64_t first_pfn, uint32_t nr)
  204. {
  205. return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
  206. nr);
  207. }
  208. static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
  209. uint8_t bus, uint8_t device,
  210. uint8_t intx, unsigned int level)
  211. {
  212. return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
  213. device, intx, level);
  214. }
  215. static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
  216. uint32_t msi_data)
  217. {
  218. return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
  219. }
  220. static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
  221. unsigned int level)
  222. {
  223. return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
  224. }
  225. static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
  226. uint32_t nr, unsigned long *bitmap)
  227. {
  228. return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
  229. bitmap);
  230. }
  231. static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
  232. uint32_t nr)
  233. {
  234. return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
  235. }
  236. static inline int xen_restrict(domid_t domid)
  237. {
  238. int rc;
  239. rc = xentoolcore_restrict_all(domid);
  240. trace_xen_domid_restrict(rc ? errno : 0);
  241. return rc;
  242. }
  243. void destroy_hvm_domain(bool reboot);
  244. /* shutdown/destroy current domain because of an error */
  245. void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
  246. #ifdef HVM_PARAM_VMPORT_REGS_PFN
  247. static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
  248. xen_pfn_t *vmport_regs_pfn)
  249. {
  250. int rc;
  251. uint64_t value;
  252. rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
  253. if (rc >= 0) {
  254. *vmport_regs_pfn = (xen_pfn_t) value;
  255. }
  256. return rc;
  257. }
  258. #else
  259. static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
  260. xen_pfn_t *vmport_regs_pfn)
  261. {
  262. return -ENOSYS;
  263. }
  264. #endif
  265. static inline int xen_get_default_ioreq_server_info(domid_t dom,
  266. xen_pfn_t *ioreq_pfn,
  267. xen_pfn_t *bufioreq_pfn,
  268. evtchn_port_t
  269. *bufioreq_evtchn)
  270. {
  271. unsigned long param;
  272. int rc;
  273. rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
  274. if (rc < 0) {
  275. fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
  276. return -1;
  277. }
  278. *ioreq_pfn = param;
  279. rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
  280. if (rc < 0) {
  281. fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
  282. return -1;
  283. }
  284. *bufioreq_pfn = param;
  285. rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
  286. &param);
  287. if (rc < 0) {
  288. fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
  289. return -1;
  290. }
  291. *bufioreq_evtchn = param;
  292. return 0;
  293. }
  294. static bool use_default_ioreq_server;
  295. static inline void xen_map_memory_section(domid_t dom,
  296. ioservid_t ioservid,
  297. MemoryRegionSection *section)
  298. {
  299. hwaddr start_addr = section->offset_within_address_space;
  300. ram_addr_t size = int128_get64(section->size);
  301. hwaddr end_addr = start_addr + size - 1;
  302. if (use_default_ioreq_server) {
  303. return;
  304. }
  305. trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
  306. xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
  307. start_addr, end_addr);
  308. }
  309. static inline void xen_unmap_memory_section(domid_t dom,
  310. ioservid_t ioservid,
  311. MemoryRegionSection *section)
  312. {
  313. hwaddr start_addr = section->offset_within_address_space;
  314. ram_addr_t size = int128_get64(section->size);
  315. hwaddr end_addr = start_addr + size - 1;
  316. if (use_default_ioreq_server) {
  317. return;
  318. }
  319. trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
  320. xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
  321. 1, start_addr, end_addr);
  322. }
  323. static inline void xen_map_io_section(domid_t dom,
  324. ioservid_t ioservid,
  325. MemoryRegionSection *section)
  326. {
  327. hwaddr start_addr = section->offset_within_address_space;
  328. ram_addr_t size = int128_get64(section->size);
  329. hwaddr end_addr = start_addr + size - 1;
  330. if (use_default_ioreq_server) {
  331. return;
  332. }
  333. trace_xen_map_portio_range(ioservid, start_addr, end_addr);
  334. xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
  335. start_addr, end_addr);
  336. }
  337. static inline void xen_unmap_io_section(domid_t dom,
  338. ioservid_t ioservid,
  339. MemoryRegionSection *section)
  340. {
  341. hwaddr start_addr = section->offset_within_address_space;
  342. ram_addr_t size = int128_get64(section->size);
  343. hwaddr end_addr = start_addr + size - 1;
  344. if (use_default_ioreq_server) {
  345. return;
  346. }
  347. trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
  348. xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
  349. 0, start_addr, end_addr);
  350. }
  351. static inline void xen_map_pcidev(domid_t dom,
  352. ioservid_t ioservid,
  353. PCIDevice *pci_dev)
  354. {
  355. if (use_default_ioreq_server) {
  356. return;
  357. }
  358. trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
  359. PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
  360. xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
  361. pci_dev_bus_num(pci_dev),
  362. PCI_SLOT(pci_dev->devfn),
  363. PCI_FUNC(pci_dev->devfn));
  364. }
  365. static inline void xen_unmap_pcidev(domid_t dom,
  366. ioservid_t ioservid,
  367. PCIDevice *pci_dev)
  368. {
  369. if (use_default_ioreq_server) {
  370. return;
  371. }
  372. trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
  373. PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
  374. xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
  375. pci_dev_bus_num(pci_dev),
  376. PCI_SLOT(pci_dev->devfn),
  377. PCI_FUNC(pci_dev->devfn));
  378. }
  379. static inline void xen_create_ioreq_server(domid_t dom,
  380. ioservid_t *ioservid)
  381. {
  382. int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
  383. HVM_IOREQSRV_BUFIOREQ_ATOMIC,
  384. ioservid);
  385. if (rc == 0) {
  386. trace_xen_ioreq_server_create(*ioservid);
  387. return;
  388. }
  389. *ioservid = 0;
  390. use_default_ioreq_server = true;
  391. trace_xen_default_ioreq_server();
  392. }
  393. static inline void xen_destroy_ioreq_server(domid_t dom,
  394. ioservid_t ioservid)
  395. {
  396. if (use_default_ioreq_server) {
  397. return;
  398. }
  399. trace_xen_ioreq_server_destroy(ioservid);
  400. xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
  401. }
  402. static inline int xen_get_ioreq_server_info(domid_t dom,
  403. ioservid_t ioservid,
  404. xen_pfn_t *ioreq_pfn,
  405. xen_pfn_t *bufioreq_pfn,
  406. evtchn_port_t *bufioreq_evtchn)
  407. {
  408. if (use_default_ioreq_server) {
  409. return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
  410. bufioreq_pfn,
  411. bufioreq_evtchn);
  412. }
  413. return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
  414. ioreq_pfn, bufioreq_pfn,
  415. bufioreq_evtchn);
  416. }
  417. static inline int xen_set_ioreq_server_state(domid_t dom,
  418. ioservid_t ioservid,
  419. bool enable)
  420. {
  421. if (use_default_ioreq_server) {
  422. return 0;
  423. }
  424. trace_xen_ioreq_server_state(ioservid, enable);
  425. return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
  426. enable);
  427. }
  428. #endif /* QEMU_HW_XEN_NATIVE_H */