xen_pt_msi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. * Copyright (c) 2007, Intel Corporation.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. *
  7. * Jiang Yunhong <yunhong.jiang@intel.com>
  8. *
  9. * This file implements direct PCI assignment to a HVM guest
  10. */
  11. #include <sys/mman.h>
  12. #include "xen_backend.h"
  13. #include "xen_pt.h"
  14. #include "apic-msidef.h"
  15. #define XEN_PT_AUTO_ASSIGN -1
  16. /* shift count for gflags */
  17. #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0
  18. #define XEN_PT_GFLAGS_SHIFT_RH 8
  19. #define XEN_PT_GFLAGS_SHIFT_DM 9
  20. #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12
  21. #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15
  22. /*
  23. * Helpers
  24. */
  25. static inline uint8_t msi_vector(uint32_t data)
  26. {
  27. return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
  28. }
  29. static inline uint8_t msi_dest_id(uint32_t addr)
  30. {
  31. return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
  32. }
  33. static inline uint32_t msi_ext_dest_id(uint32_t addr_hi)
  34. {
  35. return addr_hi & 0xffffff00;
  36. }
  37. static uint32_t msi_gflags(uint32_t data, uint64_t addr)
  38. {
  39. uint32_t result = 0;
  40. int rh, dm, dest_id, deliv_mode, trig_mode;
  41. rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
  42. dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
  43. dest_id = msi_dest_id(addr);
  44. deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
  45. trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
  46. result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH)
  47. | (dm << XEN_PT_GFLAGS_SHIFT_DM)
  48. | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE)
  49. | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE);
  50. return result;
  51. }
  52. static inline uint64_t msi_addr64(XenPTMSI *msi)
  53. {
  54. return (uint64_t)msi->addr_hi << 32 | msi->addr_lo;
  55. }
  56. static int msi_msix_enable(XenPCIPassthroughState *s,
  57. uint32_t address,
  58. uint16_t flag,
  59. bool enable)
  60. {
  61. uint16_t val = 0;
  62. if (!address) {
  63. return -1;
  64. }
  65. xen_host_pci_get_word(&s->real_device, address, &val);
  66. if (enable) {
  67. val |= flag;
  68. } else {
  69. val &= ~flag;
  70. }
  71. xen_host_pci_set_word(&s->real_device, address, val);
  72. return 0;
  73. }
  74. static int msi_msix_setup(XenPCIPassthroughState *s,
  75. uint64_t addr,
  76. uint32_t data,
  77. int *ppirq,
  78. bool is_msix,
  79. int msix_entry,
  80. bool is_not_mapped)
  81. {
  82. uint8_t gvec = msi_vector(data);
  83. int rc = 0;
  84. assert((!is_msix && msix_entry == 0) || is_msix);
  85. if (gvec == 0) {
  86. /* if gvec is 0, the guest is asking for a particular pirq that
  87. * is passed as dest_id */
  88. *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr);
  89. if (!*ppirq) {
  90. /* this probably identifies an misconfiguration of the guest,
  91. * try the emulated path */
  92. *ppirq = XEN_PT_UNASSIGNED_PIRQ;
  93. } else {
  94. XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s"
  95. " (vec: %#x, entry: %#x)\n",
  96. *ppirq, is_msix ? "-X" : "", gvec, msix_entry);
  97. }
  98. }
  99. if (is_not_mapped) {
  100. uint64_t table_base = 0;
  101. if (is_msix) {
  102. table_base = s->msix->table_base;
  103. }
  104. rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN,
  105. ppirq, PCI_DEVFN(s->real_device.dev,
  106. s->real_device.func),
  107. s->real_device.bus,
  108. msix_entry, table_base);
  109. if (rc) {
  110. XEN_PT_ERR(&s->dev,
  111. "Mapping of MSI%s (rc: %i, vec: %#x, entry %#x)\n",
  112. is_msix ? "-X" : "", rc, gvec, msix_entry);
  113. return rc;
  114. }
  115. }
  116. return 0;
  117. }
  118. static int msi_msix_update(XenPCIPassthroughState *s,
  119. uint64_t addr,
  120. uint32_t data,
  121. int pirq,
  122. bool is_msix,
  123. int msix_entry,
  124. int *old_pirq)
  125. {
  126. PCIDevice *d = &s->dev;
  127. uint8_t gvec = msi_vector(data);
  128. uint32_t gflags = msi_gflags(data, addr);
  129. int rc = 0;
  130. uint64_t table_addr = 0;
  131. XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x"
  132. " (entry: %#x)\n",
  133. is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry);
  134. if (is_msix) {
  135. table_addr = s->msix->mmio_base_addr;
  136. }
  137. rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec,
  138. pirq, gflags, table_addr);
  139. if (rc) {
  140. XEN_PT_ERR(d, "Updating of MSI%s failed. (rc: %d)\n",
  141. is_msix ? "-X" : "", rc);
  142. if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) {
  143. XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed.\n",
  144. is_msix ? "-X" : "", *old_pirq);
  145. }
  146. *old_pirq = XEN_PT_UNASSIGNED_PIRQ;
  147. }
  148. return rc;
  149. }
  150. static int msi_msix_disable(XenPCIPassthroughState *s,
  151. uint64_t addr,
  152. uint32_t data,
  153. int pirq,
  154. bool is_msix,
  155. bool is_binded)
  156. {
  157. PCIDevice *d = &s->dev;
  158. uint8_t gvec = msi_vector(data);
  159. uint32_t gflags = msi_gflags(data, addr);
  160. int rc = 0;
  161. if (pirq == XEN_PT_UNASSIGNED_PIRQ) {
  162. return 0;
  163. }
  164. if (is_binded) {
  165. XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n",
  166. is_msix ? "-X" : "", pirq, gvec);
  167. rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags);
  168. if (rc) {
  169. XEN_PT_ERR(d, "Unbinding of MSI%s failed. (pirq: %d, gvec: %#x)\n",
  170. is_msix ? "-X" : "", pirq, gvec);
  171. return rc;
  172. }
  173. }
  174. XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq);
  175. rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq);
  176. if (rc) {
  177. XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (rc: %i)\n",
  178. is_msix ? "-X" : "", pirq, rc);
  179. return rc;
  180. }
  181. return 0;
  182. }
  183. /*
  184. * MSI virtualization functions
  185. */
  186. int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable)
  187. {
  188. XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling");
  189. if (!s->msi) {
  190. return -1;
  191. }
  192. return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE,
  193. enable);
  194. }
  195. /* setup physical msi, but don't enable it */
  196. int xen_pt_msi_setup(XenPCIPassthroughState *s)
  197. {
  198. int pirq = XEN_PT_UNASSIGNED_PIRQ;
  199. int rc = 0;
  200. XenPTMSI *msi = s->msi;
  201. if (msi->initialized) {
  202. XEN_PT_ERR(&s->dev,
  203. "Setup physical MSI when it has been properly initialized.\n");
  204. return -1;
  205. }
  206. rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true);
  207. if (rc) {
  208. return rc;
  209. }
  210. if (pirq < 0) {
  211. XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq);
  212. return -1;
  213. }
  214. msi->pirq = pirq;
  215. XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq);
  216. return 0;
  217. }
  218. int xen_pt_msi_update(XenPCIPassthroughState *s)
  219. {
  220. XenPTMSI *msi = s->msi;
  221. return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq,
  222. false, 0, &msi->pirq);
  223. }
  224. void xen_pt_msi_disable(XenPCIPassthroughState *s)
  225. {
  226. XenPTMSI *msi = s->msi;
  227. if (!msi) {
  228. return;
  229. }
  230. xen_pt_msi_set_enable(s, false);
  231. msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false,
  232. msi->initialized);
  233. /* clear msi info */
  234. msi->flags = 0;
  235. msi->mapped = false;
  236. msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
  237. }
  238. /*
  239. * MSI-X virtualization functions
  240. */
  241. static int msix_set_enable(XenPCIPassthroughState *s, bool enabled)
  242. {
  243. XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling");
  244. if (!s->msix) {
  245. return -1;
  246. }
  247. return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE,
  248. enabled);
  249. }
  250. static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr)
  251. {
  252. XenPTMSIXEntry *entry = NULL;
  253. int pirq;
  254. int rc;
  255. if (entry_nr < 0 || entry_nr >= s->msix->total_entries) {
  256. return -EINVAL;
  257. }
  258. entry = &s->msix->msix_entry[entry_nr];
  259. if (!entry->updated) {
  260. return 0;
  261. }
  262. pirq = entry->pirq;
  263. rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr,
  264. entry->pirq == XEN_PT_UNASSIGNED_PIRQ);
  265. if (rc) {
  266. return rc;
  267. }
  268. if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) {
  269. entry->pirq = pirq;
  270. }
  271. rc = msi_msix_update(s, entry->addr, entry->data, pirq, true,
  272. entry_nr, &entry->pirq);
  273. if (!rc) {
  274. entry->updated = false;
  275. }
  276. return rc;
  277. }
  278. int xen_pt_msix_update(XenPCIPassthroughState *s)
  279. {
  280. XenPTMSIX *msix = s->msix;
  281. int i;
  282. for (i = 0; i < msix->total_entries; i++) {
  283. xen_pt_msix_update_one(s, i);
  284. }
  285. return 0;
  286. }
  287. void xen_pt_msix_disable(XenPCIPassthroughState *s)
  288. {
  289. int i = 0;
  290. msix_set_enable(s, false);
  291. for (i = 0; i < s->msix->total_entries; i++) {
  292. XenPTMSIXEntry *entry = &s->msix->msix_entry[i];
  293. msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true);
  294. /* clear MSI-X info */
  295. entry->pirq = XEN_PT_UNASSIGNED_PIRQ;
  296. entry->updated = false;
  297. }
  298. }
  299. int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index)
  300. {
  301. XenPTMSIXEntry *entry;
  302. int i, ret;
  303. if (!(s->msix && s->msix->bar_index == bar_index)) {
  304. return 0;
  305. }
  306. for (i = 0; i < s->msix->total_entries; i++) {
  307. entry = &s->msix->msix_entry[i];
  308. if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
  309. ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq,
  310. PT_IRQ_TYPE_MSI, 0, 0, 0, 0);
  311. if (ret) {
  312. XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed\n",
  313. entry->pirq);
  314. }
  315. entry->updated = true;
  316. }
  317. }
  318. return xen_pt_msix_update(s);
  319. }
  320. static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset)
  321. {
  322. switch (offset) {
  323. case PCI_MSIX_ENTRY_LOWER_ADDR:
  324. return e->addr & UINT32_MAX;
  325. case PCI_MSIX_ENTRY_UPPER_ADDR:
  326. return e->addr >> 32;
  327. case PCI_MSIX_ENTRY_DATA:
  328. return e->data;
  329. case PCI_MSIX_ENTRY_VECTOR_CTRL:
  330. return e->vector_ctrl;
  331. default:
  332. return 0;
  333. }
  334. }
  335. static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val)
  336. {
  337. switch (offset) {
  338. case PCI_MSIX_ENTRY_LOWER_ADDR:
  339. e->addr = (e->addr & ((uint64_t)UINT32_MAX << 32)) | val;
  340. break;
  341. case PCI_MSIX_ENTRY_UPPER_ADDR:
  342. e->addr = (uint64_t)val << 32 | (e->addr & UINT32_MAX);
  343. break;
  344. case PCI_MSIX_ENTRY_DATA:
  345. e->data = val;
  346. break;
  347. case PCI_MSIX_ENTRY_VECTOR_CTRL:
  348. e->vector_ctrl = val;
  349. break;
  350. }
  351. }
  352. static void pci_msix_write(void *opaque, hwaddr addr,
  353. uint64_t val, unsigned size)
  354. {
  355. XenPCIPassthroughState *s = opaque;
  356. XenPTMSIX *msix = s->msix;
  357. XenPTMSIXEntry *entry;
  358. int entry_nr, offset;
  359. entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
  360. if (entry_nr < 0 || entry_nr >= msix->total_entries) {
  361. XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
  362. return;
  363. }
  364. entry = &msix->msix_entry[entry_nr];
  365. offset = addr % PCI_MSIX_ENTRY_SIZE;
  366. if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) {
  367. const volatile uint32_t *vec_ctrl;
  368. if (get_entry_value(entry, offset) == val) {
  369. return;
  370. }
  371. /*
  372. * If Xen intercepts the mask bit access, entry->vec_ctrl may not be
  373. * up-to-date. Read from hardware directly.
  374. */
  375. vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE
  376. + PCI_MSIX_ENTRY_VECTOR_CTRL;
  377. if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
  378. XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is"
  379. " already enabled.\n", entry_nr);
  380. return;
  381. }
  382. entry->updated = true;
  383. }
  384. set_entry_value(entry, offset, val);
  385. if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) {
  386. if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
  387. xen_pt_msix_update_one(s, entry_nr);
  388. }
  389. }
  390. }
  391. static uint64_t pci_msix_read(void *opaque, hwaddr addr,
  392. unsigned size)
  393. {
  394. XenPCIPassthroughState *s = opaque;
  395. XenPTMSIX *msix = s->msix;
  396. int entry_nr, offset;
  397. entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
  398. if (entry_nr < 0) {
  399. XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
  400. return 0;
  401. }
  402. offset = addr % PCI_MSIX_ENTRY_SIZE;
  403. if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) {
  404. return get_entry_value(&msix->msix_entry[entry_nr], offset);
  405. } else {
  406. /* Pending Bit Array (PBA) */
  407. return *(uint32_t *)(msix->phys_iomem_base + addr);
  408. }
  409. }
  410. static const MemoryRegionOps pci_msix_ops = {
  411. .read = pci_msix_read,
  412. .write = pci_msix_write,
  413. .endianness = DEVICE_NATIVE_ENDIAN,
  414. .valid = {
  415. .min_access_size = 4,
  416. .max_access_size = 4,
  417. .unaligned = false,
  418. },
  419. };
  420. int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base)
  421. {
  422. uint8_t id = 0;
  423. uint16_t control = 0;
  424. uint32_t table_off = 0;
  425. int i, total_entries, bar_index;
  426. XenHostPCIDevice *hd = &s->real_device;
  427. PCIDevice *d = &s->dev;
  428. int fd = -1;
  429. XenPTMSIX *msix = NULL;
  430. int rc = 0;
  431. rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id);
  432. if (rc) {
  433. return rc;
  434. }
  435. if (id != PCI_CAP_ID_MSIX) {
  436. XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base);
  437. return -1;
  438. }
  439. xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control);
  440. total_entries = control & PCI_MSIX_FLAGS_QSIZE;
  441. total_entries += 1;
  442. s->msix = g_malloc0(sizeof (XenPTMSIX)
  443. + total_entries * sizeof (XenPTMSIXEntry));
  444. msix = s->msix;
  445. msix->total_entries = total_entries;
  446. for (i = 0; i < total_entries; i++) {
  447. msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ;
  448. }
  449. memory_region_init_io(&msix->mmio, &pci_msix_ops, s, "xen-pci-pt-msix",
  450. (total_entries * PCI_MSIX_ENTRY_SIZE
  451. + XC_PAGE_SIZE - 1)
  452. & XC_PAGE_MASK);
  453. xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off);
  454. bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK;
  455. table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK;
  456. msix->table_base = s->real_device.io_regions[bar_index].base_addr;
  457. XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base);
  458. fd = open("/dev/mem", O_RDWR);
  459. if (fd == -1) {
  460. rc = -errno;
  461. XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno));
  462. goto error_out;
  463. }
  464. XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n",
  465. table_off, total_entries);
  466. msix->table_offset_adjust = table_off & 0x0fff;
  467. msix->phys_iomem_base =
  468. mmap(NULL,
  469. total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust,
  470. PROT_READ,
  471. MAP_SHARED | MAP_LOCKED,
  472. fd,
  473. msix->table_base + table_off - msix->table_offset_adjust);
  474. close(fd);
  475. if (msix->phys_iomem_base == MAP_FAILED) {
  476. rc = -errno;
  477. XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno));
  478. goto error_out;
  479. }
  480. msix->phys_iomem_base = (char *)msix->phys_iomem_base
  481. + msix->table_offset_adjust;
  482. XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n",
  483. msix->phys_iomem_base);
  484. memory_region_add_subregion_overlap(&s->bar[bar_index], table_off,
  485. &msix->mmio,
  486. 2); /* Priority: pci default + 1 */
  487. return 0;
  488. error_out:
  489. memory_region_destroy(&msix->mmio);
  490. g_free(s->msix);
  491. s->msix = NULL;
  492. return rc;
  493. }
  494. void xen_pt_msix_delete(XenPCIPassthroughState *s)
  495. {
  496. XenPTMSIX *msix = s->msix;
  497. if (!msix) {
  498. return;
  499. }
  500. /* unmap the MSI-X memory mapped register area */
  501. if (msix->phys_iomem_base) {
  502. XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n",
  503. msix->phys_iomem_base);
  504. munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE
  505. + msix->table_offset_adjust);
  506. }
  507. memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio);
  508. memory_region_destroy(&msix->mmio);
  509. g_free(s->msix);
  510. s->msix = NULL;
  511. }