xen_common.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. #ifndef QEMU_HW_XEN_COMMON_H
  2. #define QEMU_HW_XEN_COMMON_H
  3. /*
  4. * If we have new enough libxenctrl then we do not want/need these compat
  5. * interfaces, despite what the user supplied cflags might say. They
  6. * must be undefined before including xenctrl.h
  7. */
  8. #undef XC_WANT_COMPAT_EVTCHN_API
  9. #undef XC_WANT_COMPAT_GNTTAB_API
  10. #undef XC_WANT_COMPAT_MAP_FOREIGN_API
  11. #include <xenctrl.h>
  12. #include <xenstore.h>
  13. #include "hw/xen/interface/io/xenbus.h"
  14. #include "hw/xen/xen.h"
  15. #include "hw/pci/pci_device.h"
  16. #include "hw/xen/trace.h"
  17. extern xc_interface *xen_xc;
  18. /*
  19. * We don't support Xen prior to 4.2.0.
  20. */
  21. /* Xen 4.2 through 4.6 */
  22. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
  23. typedef xc_interface xenforeignmemory_handle;
  24. typedef xc_evtchn xenevtchn_handle;
  25. typedef xc_gnttab xengnttab_handle;
  26. typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
  27. #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
  28. #define xenevtchn_close(h) xc_evtchn_close(h)
  29. #define xenevtchn_fd(h) xc_evtchn_fd(h)
  30. #define xenevtchn_pending(h) xc_evtchn_pending(h)
  31. #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
  32. #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
  33. #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
  34. #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
  35. #define xengnttab_open(l, f) xc_gnttab_open(l, f)
  36. #define xengnttab_close(h) xc_gnttab_close(h)
  37. #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
  38. #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
  39. #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
  40. #define xengnttab_map_grant_refs(h, c, d, r, p) \
  41. xc_gnttab_map_grant_refs(h, c, d, r, p)
  42. #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
  43. xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
  44. #define xenforeignmemory_open(l, f) xen_xc
  45. #define xenforeignmemory_close(h)
  46. static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
  47. int prot, size_t pages,
  48. const xen_pfn_t arr[/*pages*/],
  49. int err[/*pages*/])
  50. {
  51. if (err)
  52. return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
  53. else
  54. return xc_map_foreign_pages(h, dom, prot, arr, pages);
  55. }
  56. #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
  57. #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
  58. #include <xenevtchn.h>
  59. #include <xengnttab.h>
  60. #include <xenforeignmemory.h>
  61. #endif
  62. extern xenforeignmemory_handle *xen_fmem;
  63. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
  64. typedef xc_interface xendevicemodel_handle;
  65. #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
  66. #undef XC_WANT_COMPAT_DEVICEMODEL_API
  67. #include <xendevicemodel.h>
  68. #endif
  69. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
  70. static inline int xendevicemodel_relocate_memory(
  71. xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
  72. uint64_t dst_gfn)
  73. {
  74. uint32_t i;
  75. int rc;
  76. for (i = 0; i < size; i++) {
  77. unsigned long idx = src_gfn + i;
  78. xen_pfn_t gpfn = dst_gfn + i;
  79. rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
  80. gpfn);
  81. if (rc) {
  82. return rc;
  83. }
  84. }
  85. return 0;
  86. }
  87. static inline int xendevicemodel_pin_memory_cacheattr(
  88. xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
  89. uint32_t type)
  90. {
  91. return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
  92. }
  93. typedef void xenforeignmemory_resource_handle;
  94. #define XENMEM_resource_ioreq_server 0
  95. #define XENMEM_resource_ioreq_server_frame_bufioreq 0
  96. #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
  97. static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
  98. xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
  99. unsigned int id, unsigned long frame, unsigned long nr_frames,
  100. void **paddr, int prot, int flags)
  101. {
  102. errno = EOPNOTSUPP;
  103. return NULL;
  104. }
  105. static inline int xenforeignmemory_unmap_resource(
  106. xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
  107. {
  108. return 0;
  109. }
  110. #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
  111. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
  112. #define XEN_COMPAT_PHYSMAP
  113. static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
  114. uint32_t dom, void *addr,
  115. int prot, int flags, size_t pages,
  116. const xen_pfn_t arr[/*pages*/],
  117. int err[/*pages*/])
  118. {
  119. assert(addr == NULL && flags == 0);
  120. return xenforeignmemory_map(h, dom, prot, pages, arr, err);
  121. }
  122. static inline int xentoolcore_restrict_all(domid_t domid)
  123. {
  124. errno = ENOTTY;
  125. return -1;
  126. }
  127. static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
  128. domid_t domid, unsigned int reason)
  129. {
  130. errno = ENOTTY;
  131. return -1;
  132. }
  133. #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
  134. #include <xentoolcore.h>
  135. #endif
  136. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
  137. static inline xendevicemodel_handle *xendevicemodel_open(
  138. struct xentoollog_logger *logger, unsigned int open_flags)
  139. {
  140. return xen_xc;
  141. }
  142. #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
  143. static inline int xendevicemodel_create_ioreq_server(
  144. xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
  145. ioservid_t *id)
  146. {
  147. return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
  148. id);
  149. }
  150. static inline int xendevicemodel_get_ioreq_server_info(
  151. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
  152. xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
  153. evtchn_port_t *bufioreq_port)
  154. {
  155. return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
  156. bufioreq_pfn, bufioreq_port);
  157. }
  158. static inline int xendevicemodel_map_io_range_to_ioreq_server(
  159. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
  160. uint64_t start, uint64_t end)
  161. {
  162. return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
  163. start, end);
  164. }
  165. static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
  166. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
  167. uint64_t start, uint64_t end)
  168. {
  169. return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
  170. start, end);
  171. }
  172. static inline int xendevicemodel_map_pcidev_to_ioreq_server(
  173. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
  174. uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
  175. {
  176. return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
  177. bus, device, function);
  178. }
  179. static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
  180. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
  181. uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
  182. {
  183. return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
  184. bus, device, function);
  185. }
  186. static inline int xendevicemodel_destroy_ioreq_server(
  187. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
  188. {
  189. return xc_hvm_destroy_ioreq_server(dmod, domid, id);
  190. }
  191. static inline int xendevicemodel_set_ioreq_server_state(
  192. xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
  193. {
  194. return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
  195. }
  196. #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
  197. static inline int xendevicemodel_set_pci_intx_level(
  198. xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
  199. uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
  200. {
  201. return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
  202. intx, level);
  203. }
  204. static inline int xendevicemodel_set_isa_irq_level(
  205. xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
  206. unsigned int level)
  207. {
  208. return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
  209. }
  210. static inline int xendevicemodel_set_pci_link_route(
  211. xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
  212. {
  213. return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
  214. }
  215. static inline int xendevicemodel_inject_msi(
  216. xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
  217. uint32_t msi_data)
  218. {
  219. return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
  220. }
  221. static inline int xendevicemodel_track_dirty_vram(
  222. xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
  223. uint32_t nr, unsigned long *dirty_bitmap)
  224. {
  225. return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
  226. dirty_bitmap);
  227. }
  228. static inline int xendevicemodel_modified_memory(
  229. xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
  230. uint32_t nr)
  231. {
  232. return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
  233. }
  234. static inline int xendevicemodel_set_mem_type(
  235. xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
  236. uint64_t first_pfn, uint32_t nr)
  237. {
  238. return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
  239. }
  240. #endif
  241. extern xendevicemodel_handle *xen_dmod;
  242. static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
  243. uint64_t first_pfn, uint32_t nr)
  244. {
  245. return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
  246. nr);
  247. }
  248. static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
  249. uint8_t bus, uint8_t device,
  250. uint8_t intx, unsigned int level)
  251. {
  252. return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
  253. device, intx, level);
  254. }
  255. static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
  256. uint32_t msi_data)
  257. {
  258. return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
  259. }
  260. static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
  261. unsigned int level)
  262. {
  263. return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
  264. }
  265. static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
  266. uint32_t nr, unsigned long *bitmap)
  267. {
  268. return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
  269. bitmap);
  270. }
  271. static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
  272. uint32_t nr)
  273. {
  274. return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
  275. }
  276. static inline int xen_restrict(domid_t domid)
  277. {
  278. int rc;
  279. rc = xentoolcore_restrict_all(domid);
  280. trace_xen_domid_restrict(rc ? errno : 0);
  281. return rc;
  282. }
  283. void destroy_hvm_domain(bool reboot);
  284. /* shutdown/destroy current domain because of an error */
  285. void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
  286. #ifdef HVM_PARAM_VMPORT_REGS_PFN
  287. static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
  288. xen_pfn_t *vmport_regs_pfn)
  289. {
  290. int rc;
  291. uint64_t value;
  292. rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
  293. if (rc >= 0) {
  294. *vmport_regs_pfn = (xen_pfn_t) value;
  295. }
  296. return rc;
  297. }
  298. #else
  299. static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
  300. xen_pfn_t *vmport_regs_pfn)
  301. {
  302. return -ENOSYS;
  303. }
  304. #endif
  305. /* Xen before 4.6 */
  306. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
  307. #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
  308. #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
  309. #endif
  310. #endif
  311. static inline int xen_get_default_ioreq_server_info(domid_t dom,
  312. xen_pfn_t *ioreq_pfn,
  313. xen_pfn_t *bufioreq_pfn,
  314. evtchn_port_t
  315. *bufioreq_evtchn)
  316. {
  317. unsigned long param;
  318. int rc;
  319. rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
  320. if (rc < 0) {
  321. fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
  322. return -1;
  323. }
  324. *ioreq_pfn = param;
  325. rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
  326. if (rc < 0) {
  327. fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
  328. return -1;
  329. }
  330. *bufioreq_pfn = param;
  331. rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
  332. &param);
  333. if (rc < 0) {
  334. fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
  335. return -1;
  336. }
  337. *bufioreq_evtchn = param;
  338. return 0;
  339. }
  340. /* Xen before 4.5 */
  341. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
  342. #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
  343. #define HVM_PARAM_BUFIOREQ_EVTCHN 26
  344. #endif
  345. #define IOREQ_TYPE_PCI_CONFIG 2
  346. typedef uint16_t ioservid_t;
  347. static inline void xen_map_memory_section(domid_t dom,
  348. ioservid_t ioservid,
  349. MemoryRegionSection *section)
  350. {
  351. }
  352. static inline void xen_unmap_memory_section(domid_t dom,
  353. ioservid_t ioservid,
  354. MemoryRegionSection *section)
  355. {
  356. }
  357. static inline void xen_map_io_section(domid_t dom,
  358. ioservid_t ioservid,
  359. MemoryRegionSection *section)
  360. {
  361. }
  362. static inline void xen_unmap_io_section(domid_t dom,
  363. ioservid_t ioservid,
  364. MemoryRegionSection *section)
  365. {
  366. }
  367. static inline void xen_map_pcidev(domid_t dom,
  368. ioservid_t ioservid,
  369. PCIDevice *pci_dev)
  370. {
  371. }
  372. static inline void xen_unmap_pcidev(domid_t dom,
  373. ioservid_t ioservid,
  374. PCIDevice *pci_dev)
  375. {
  376. }
  377. static inline void xen_create_ioreq_server(domid_t dom,
  378. ioservid_t *ioservid)
  379. {
  380. }
  381. static inline void xen_destroy_ioreq_server(domid_t dom,
  382. ioservid_t ioservid)
  383. {
  384. }
  385. static inline int xen_get_ioreq_server_info(domid_t dom,
  386. ioservid_t ioservid,
  387. xen_pfn_t *ioreq_pfn,
  388. xen_pfn_t *bufioreq_pfn,
  389. evtchn_port_t *bufioreq_evtchn)
  390. {
  391. return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
  392. bufioreq_pfn,
  393. bufioreq_evtchn);
  394. }
  395. static inline int xen_set_ioreq_server_state(domid_t dom,
  396. ioservid_t ioservid,
  397. bool enable)
  398. {
  399. return 0;
  400. }
  401. /* Xen 4.5 */
  402. #else
  403. static bool use_default_ioreq_server;
  404. static inline void xen_map_memory_section(domid_t dom,
  405. ioservid_t ioservid,
  406. MemoryRegionSection *section)
  407. {
  408. hwaddr start_addr = section->offset_within_address_space;
  409. ram_addr_t size = int128_get64(section->size);
  410. hwaddr end_addr = start_addr + size - 1;
  411. if (use_default_ioreq_server) {
  412. return;
  413. }
  414. trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
  415. xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
  416. start_addr, end_addr);
  417. }
  418. static inline void xen_unmap_memory_section(domid_t dom,
  419. ioservid_t ioservid,
  420. MemoryRegionSection *section)
  421. {
  422. hwaddr start_addr = section->offset_within_address_space;
  423. ram_addr_t size = int128_get64(section->size);
  424. hwaddr end_addr = start_addr + size - 1;
  425. if (use_default_ioreq_server) {
  426. return;
  427. }
  428. trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
  429. xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
  430. 1, start_addr, end_addr);
  431. }
  432. static inline void xen_map_io_section(domid_t dom,
  433. ioservid_t ioservid,
  434. MemoryRegionSection *section)
  435. {
  436. hwaddr start_addr = section->offset_within_address_space;
  437. ram_addr_t size = int128_get64(section->size);
  438. hwaddr end_addr = start_addr + size - 1;
  439. if (use_default_ioreq_server) {
  440. return;
  441. }
  442. trace_xen_map_portio_range(ioservid, start_addr, end_addr);
  443. xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
  444. start_addr, end_addr);
  445. }
  446. static inline void xen_unmap_io_section(domid_t dom,
  447. ioservid_t ioservid,
  448. MemoryRegionSection *section)
  449. {
  450. hwaddr start_addr = section->offset_within_address_space;
  451. ram_addr_t size = int128_get64(section->size);
  452. hwaddr end_addr = start_addr + size - 1;
  453. if (use_default_ioreq_server) {
  454. return;
  455. }
  456. trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
  457. xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
  458. 0, start_addr, end_addr);
  459. }
  460. static inline void xen_map_pcidev(domid_t dom,
  461. ioservid_t ioservid,
  462. PCIDevice *pci_dev)
  463. {
  464. if (use_default_ioreq_server) {
  465. return;
  466. }
  467. trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
  468. PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
  469. xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
  470. pci_dev_bus_num(pci_dev),
  471. PCI_SLOT(pci_dev->devfn),
  472. PCI_FUNC(pci_dev->devfn));
  473. }
  474. static inline void xen_unmap_pcidev(domid_t dom,
  475. ioservid_t ioservid,
  476. PCIDevice *pci_dev)
  477. {
  478. if (use_default_ioreq_server) {
  479. return;
  480. }
  481. trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
  482. PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
  483. xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
  484. pci_dev_bus_num(pci_dev),
  485. PCI_SLOT(pci_dev->devfn),
  486. PCI_FUNC(pci_dev->devfn));
  487. }
  488. static inline void xen_create_ioreq_server(domid_t dom,
  489. ioservid_t *ioservid)
  490. {
  491. int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
  492. HVM_IOREQSRV_BUFIOREQ_ATOMIC,
  493. ioservid);
  494. if (rc == 0) {
  495. trace_xen_ioreq_server_create(*ioservid);
  496. return;
  497. }
  498. *ioservid = 0;
  499. use_default_ioreq_server = true;
  500. trace_xen_default_ioreq_server();
  501. }
  502. static inline void xen_destroy_ioreq_server(domid_t dom,
  503. ioservid_t ioservid)
  504. {
  505. if (use_default_ioreq_server) {
  506. return;
  507. }
  508. trace_xen_ioreq_server_destroy(ioservid);
  509. xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
  510. }
  511. static inline int xen_get_ioreq_server_info(domid_t dom,
  512. ioservid_t ioservid,
  513. xen_pfn_t *ioreq_pfn,
  514. xen_pfn_t *bufioreq_pfn,
  515. evtchn_port_t *bufioreq_evtchn)
  516. {
  517. if (use_default_ioreq_server) {
  518. return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
  519. bufioreq_pfn,
  520. bufioreq_evtchn);
  521. }
  522. return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
  523. ioreq_pfn, bufioreq_pfn,
  524. bufioreq_evtchn);
  525. }
  526. static inline int xen_set_ioreq_server_state(domid_t dom,
  527. ioservid_t ioservid,
  528. bool enable)
  529. {
  530. if (use_default_ioreq_server) {
  531. return 0;
  532. }
  533. trace_xen_ioreq_server_state(ioservid, enable);
  534. return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
  535. enable);
  536. }
  537. #endif
  538. /* Xen before 4.8 */
  539. #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
  540. struct xengnttab_grant_copy_segment {
  541. union xengnttab_copy_ptr {
  542. void *virt;
  543. struct {
  544. uint32_t ref;
  545. uint16_t offset;
  546. uint16_t domid;
  547. } foreign;
  548. } source, dest;
  549. uint16_t len;
  550. uint16_t flags;
  551. int16_t status;
  552. };
  553. typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
  554. static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
  555. xengnttab_grant_copy_segment_t *segs)
  556. {
  557. return -ENOSYS;
  558. }
  559. #endif
  560. #endif /* QEMU_HW_XEN_COMMON_H */