msix.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. /*
  2. * MSI-X device support
  3. *
  4. * This module includes support for MSI-X in pci devices.
  5. *
  6. * Author: Michael S. Tsirkin <mst@redhat.com>
  7. *
  8. * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2. See
  11. * the COPYING file in the top-level directory.
  12. *
  13. * Contributions after 2012-01-13 are licensed under the terms of the
  14. * GNU GPL, version 2 or (at your option) any later version.
  15. */
  16. #include "qemu/osdep.h"
  17. #include "hw/pci/msi.h"
  18. #include "hw/pci/msix.h"
  19. #include "hw/pci/pci.h"
  20. #include "hw/xen/xen.h"
  21. #include "sysemu/xen.h"
  22. #include "migration/qemu-file-types.h"
  23. #include "migration/vmstate.h"
  24. #include "qemu/range.h"
  25. #include "qapi/error.h"
  26. #include "trace.h"
  27. #include "hw/i386/kvm/xen_evtchn.h"
  28. /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
  29. #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
  30. #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
  31. #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
  32. static MSIMessage msix_prepare_message(PCIDevice *dev, unsigned vector)
  33. {
  34. uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  35. MSIMessage msg;
  36. msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
  37. msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
  38. return msg;
  39. }
  40. MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
  41. {
  42. return dev->msix_prepare_message(dev, vector);
  43. }
  44. /*
  45. * Special API for POWER to configure the vectors through
  46. * a side channel. Should never be used by devices.
  47. */
  48. void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
  49. {
  50. uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  51. pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
  52. pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
  53. table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
  54. }
  55. static uint8_t msix_pending_mask(int vector)
  56. {
  57. return 1 << (vector % 8);
  58. }
  59. static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
  60. {
  61. return dev->msix_pba + vector / 8;
  62. }
  63. static int msix_is_pending(PCIDevice *dev, int vector)
  64. {
  65. return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
  66. }
  67. void msix_set_pending(PCIDevice *dev, unsigned int vector)
  68. {
  69. *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
  70. }
  71. void msix_clr_pending(PCIDevice *dev, int vector)
  72. {
  73. *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
  74. }
  75. static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
  76. {
  77. unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
  78. uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
  79. /* MSIs on Xen can be remapped into pirqs. In those cases, masking
  80. * and unmasking go through the PV evtchn path. */
  81. if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
  82. return false;
  83. }
  84. return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
  85. PCI_MSIX_ENTRY_CTRL_MASKBIT;
  86. }
  87. bool msix_is_masked(PCIDevice *dev, unsigned int vector)
  88. {
  89. return msix_vector_masked(dev, vector, dev->msix_function_masked);
  90. }
  91. static void msix_fire_vector_notifier(PCIDevice *dev,
  92. unsigned int vector, bool is_masked)
  93. {
  94. MSIMessage msg;
  95. int ret;
  96. if (!dev->msix_vector_use_notifier) {
  97. return;
  98. }
  99. if (is_masked) {
  100. dev->msix_vector_release_notifier(dev, vector);
  101. } else {
  102. msg = msix_get_message(dev, vector);
  103. ret = dev->msix_vector_use_notifier(dev, vector, msg);
  104. assert(ret >= 0);
  105. }
  106. }
  107. static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
  108. {
  109. bool is_masked = msix_is_masked(dev, vector);
  110. if (xen_mode == XEN_EMULATE) {
  111. MSIMessage msg = msix_prepare_message(dev, vector);
  112. xen_evtchn_snoop_msi(dev, true, vector, msg.address, msg.data,
  113. is_masked);
  114. }
  115. if (is_masked == was_masked) {
  116. return;
  117. }
  118. msix_fire_vector_notifier(dev, vector, is_masked);
  119. if (!is_masked && msix_is_pending(dev, vector)) {
  120. msix_clr_pending(dev, vector);
  121. msix_notify(dev, vector);
  122. }
  123. }
  124. void msix_set_mask(PCIDevice *dev, int vector, bool mask)
  125. {
  126. unsigned offset;
  127. bool was_masked;
  128. assert(vector < dev->msix_entries_nr);
  129. offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
  130. was_masked = msix_is_masked(dev, vector);
  131. if (mask) {
  132. dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
  133. } else {
  134. dev->msix_table[offset] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
  135. }
  136. msix_handle_mask_update(dev, vector, was_masked);
  137. }
  138. static bool msix_masked(PCIDevice *dev)
  139. {
  140. return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK;
  141. }
  142. static void msix_update_function_masked(PCIDevice *dev)
  143. {
  144. dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev);
  145. }
  146. /* Handle MSI-X capability config write. */
  147. void msix_write_config(PCIDevice *dev, uint32_t addr,
  148. uint32_t val, int len)
  149. {
  150. unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
  151. int vector;
  152. bool was_masked;
  153. if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
  154. return;
  155. }
  156. trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev));
  157. was_masked = dev->msix_function_masked;
  158. msix_update_function_masked(dev);
  159. if (!msix_enabled(dev)) {
  160. return;
  161. }
  162. pci_device_deassert_intx(dev);
  163. if (dev->msix_function_masked == was_masked) {
  164. return;
  165. }
  166. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  167. msix_handle_mask_update(dev, vector,
  168. msix_vector_masked(dev, vector, was_masked));
  169. }
  170. }
  171. static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
  172. unsigned size)
  173. {
  174. PCIDevice *dev = opaque;
  175. assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
  176. return pci_get_long(dev->msix_table + addr);
  177. }
  178. static void msix_table_mmio_write(void *opaque, hwaddr addr,
  179. uint64_t val, unsigned size)
  180. {
  181. PCIDevice *dev = opaque;
  182. int vector = addr / PCI_MSIX_ENTRY_SIZE;
  183. bool was_masked;
  184. assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
  185. was_masked = msix_is_masked(dev, vector);
  186. pci_set_long(dev->msix_table + addr, val);
  187. msix_handle_mask_update(dev, vector, was_masked);
  188. }
  189. static const MemoryRegionOps msix_table_mmio_ops = {
  190. .read = msix_table_mmio_read,
  191. .write = msix_table_mmio_write,
  192. .endianness = DEVICE_LITTLE_ENDIAN,
  193. .valid = {
  194. .min_access_size = 4,
  195. .max_access_size = 8,
  196. },
  197. .impl = {
  198. .max_access_size = 4,
  199. },
  200. };
  201. static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
  202. unsigned size)
  203. {
  204. PCIDevice *dev = opaque;
  205. if (dev->msix_vector_poll_notifier) {
  206. unsigned vector_start = addr * 8;
  207. unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
  208. dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
  209. }
  210. return pci_get_long(dev->msix_pba + addr);
  211. }
  212. static void msix_pba_mmio_write(void *opaque, hwaddr addr,
  213. uint64_t val, unsigned size)
  214. {
  215. }
  216. static const MemoryRegionOps msix_pba_mmio_ops = {
  217. .read = msix_pba_mmio_read,
  218. .write = msix_pba_mmio_write,
  219. .endianness = DEVICE_LITTLE_ENDIAN,
  220. .valid = {
  221. .min_access_size = 4,
  222. .max_access_size = 8,
  223. },
  224. .impl = {
  225. .max_access_size = 4,
  226. },
  227. };
  228. static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
  229. {
  230. int vector;
  231. for (vector = 0; vector < nentries; ++vector) {
  232. unsigned offset =
  233. vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
  234. bool was_masked = msix_is_masked(dev, vector);
  235. dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
  236. msix_handle_mask_update(dev, vector, was_masked);
  237. }
  238. }
  239. /*
  240. * Make PCI device @dev MSI-X capable
  241. * @nentries is the max number of MSI-X vectors that the device support.
  242. * @table_bar is the MemoryRegion that MSI-X table structure resides.
  243. * @table_bar_nr is number of base address register corresponding to @table_bar.
  244. * @table_offset indicates the offset that the MSI-X table structure starts with
  245. * in @table_bar.
  246. * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides.
  247. * @pba_bar_nr is number of base address register corresponding to @pba_bar.
  248. * @pba_offset indicates the offset that the Pending Bit Array structure
  249. * starts with in @pba_bar.
  250. * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space.
  251. * @errp is for returning errors.
  252. *
  253. * Return 0 on success; set @errp and return -errno on error:
  254. * -ENOTSUP means lacking msi support for a msi-capable platform.
  255. * -EINVAL means capability overlap, happens when @cap_pos is non-zero,
  256. * also means a programming error, except device assignment, which can check
  257. * if a real HW is broken.
  258. */
  259. int msix_init(struct PCIDevice *dev, unsigned short nentries,
  260. MemoryRegion *table_bar, uint8_t table_bar_nr,
  261. unsigned table_offset, MemoryRegion *pba_bar,
  262. uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos,
  263. Error **errp)
  264. {
  265. int cap;
  266. unsigned table_size, pba_size;
  267. uint8_t *config;
  268. /* Nothing to do if MSI is not supported by interrupt controller */
  269. if (!msi_nonbroken) {
  270. error_setg(errp, "MSI-X is not supported by interrupt controller");
  271. return -ENOTSUP;
  272. }
  273. if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
  274. error_setg(errp, "The number of MSI-X vectors is invalid");
  275. return -EINVAL;
  276. }
  277. table_size = nentries * PCI_MSIX_ENTRY_SIZE;
  278. pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
  279. /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
  280. if ((table_bar_nr == pba_bar_nr &&
  281. ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
  282. table_offset + table_size > memory_region_size(table_bar) ||
  283. pba_offset + pba_size > memory_region_size(pba_bar) ||
  284. (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
  285. error_setg(errp, "table & pba overlap, or they don't fit in BARs,"
  286. " or don't align");
  287. return -EINVAL;
  288. }
  289. cap = pci_add_capability(dev, PCI_CAP_ID_MSIX,
  290. cap_pos, MSIX_CAP_LENGTH, errp);
  291. if (cap < 0) {
  292. return cap;
  293. }
  294. dev->msix_cap = cap;
  295. dev->cap_present |= QEMU_PCI_CAP_MSIX;
  296. config = dev->config + cap;
  297. pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
  298. dev->msix_entries_nr = nentries;
  299. dev->msix_function_masked = true;
  300. pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
  301. pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
  302. /* Make flags bit writable. */
  303. dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
  304. MSIX_MASKALL_MASK;
  305. dev->msix_table = g_malloc0(table_size);
  306. dev->msix_pba = g_malloc0(pba_size);
  307. dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
  308. msix_mask_all(dev, nentries);
  309. memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev,
  310. "msix-table", table_size);
  311. memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
  312. memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev,
  313. "msix-pba", pba_size);
  314. memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
  315. dev->msix_prepare_message = msix_prepare_message;
  316. return 0;
  317. }
  318. int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
  319. uint8_t bar_nr, Error **errp)
  320. {
  321. int ret;
  322. char *name;
  323. uint32_t bar_size = 4096;
  324. uint32_t bar_pba_offset = bar_size / 2;
  325. uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
  326. /*
  327. * Migration compatibility dictates that this remains a 4k
  328. * BAR with the vector table in the lower half and PBA in
  329. * the upper half for nentries which is lower or equal to 128.
  330. * No need to care about using more than 65 entries for legacy
  331. * machine types who has at most 64 queues.
  332. */
  333. if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
  334. bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
  335. }
  336. if (bar_pba_offset + bar_pba_size > 4096) {
  337. bar_size = bar_pba_offset + bar_pba_size;
  338. }
  339. bar_size = pow2ceil(bar_size);
  340. name = g_strdup_printf("%s-msix", dev->name);
  341. memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
  342. g_free(name);
  343. ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
  344. 0, &dev->msix_exclusive_bar,
  345. bar_nr, bar_pba_offset,
  346. 0, errp);
  347. if (ret) {
  348. return ret;
  349. }
  350. pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
  351. &dev->msix_exclusive_bar);
  352. return 0;
  353. }
  354. static void msix_free_irq_entries(PCIDevice *dev)
  355. {
  356. int vector;
  357. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  358. dev->msix_entry_used[vector] = 0;
  359. msix_clr_pending(dev, vector);
  360. }
  361. }
  362. static void msix_clear_all_vectors(PCIDevice *dev)
  363. {
  364. int vector;
  365. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  366. msix_clr_pending(dev, vector);
  367. }
  368. }
  369. /* Clean up resources for the device. */
  370. void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
  371. {
  372. if (!msix_present(dev)) {
  373. return;
  374. }
  375. pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
  376. dev->msix_cap = 0;
  377. msix_free_irq_entries(dev);
  378. dev->msix_entries_nr = 0;
  379. memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
  380. g_free(dev->msix_pba);
  381. dev->msix_pba = NULL;
  382. memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
  383. g_free(dev->msix_table);
  384. dev->msix_table = NULL;
  385. g_free(dev->msix_entry_used);
  386. dev->msix_entry_used = NULL;
  387. dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
  388. dev->msix_prepare_message = NULL;
  389. }
  390. void msix_uninit_exclusive_bar(PCIDevice *dev)
  391. {
  392. if (msix_present(dev)) {
  393. msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
  394. }
  395. }
  396. void msix_save(PCIDevice *dev, QEMUFile *f)
  397. {
  398. unsigned n = dev->msix_entries_nr;
  399. if (!msix_present(dev)) {
  400. return;
  401. }
  402. qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
  403. qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
  404. }
  405. /* Should be called after restoring the config space. */
  406. void msix_load(PCIDevice *dev, QEMUFile *f)
  407. {
  408. unsigned n = dev->msix_entries_nr;
  409. unsigned int vector;
  410. if (!msix_present(dev)) {
  411. return;
  412. }
  413. msix_clear_all_vectors(dev);
  414. qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
  415. qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
  416. msix_update_function_masked(dev);
  417. for (vector = 0; vector < n; vector++) {
  418. msix_handle_mask_update(dev, vector, true);
  419. }
  420. }
  421. /* Does device support MSI-X? */
  422. int msix_present(PCIDevice *dev)
  423. {
  424. return dev->cap_present & QEMU_PCI_CAP_MSIX;
  425. }
  426. /* Is MSI-X enabled? */
  427. int msix_enabled(PCIDevice *dev)
  428. {
  429. return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
  430. (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  431. MSIX_ENABLE_MASK);
  432. }
  433. /* Send an MSI-X message */
  434. void msix_notify(PCIDevice *dev, unsigned vector)
  435. {
  436. MSIMessage msg;
  437. assert(vector < dev->msix_entries_nr);
  438. if (!dev->msix_entry_used[vector]) {
  439. return;
  440. }
  441. if (msix_is_masked(dev, vector)) {
  442. msix_set_pending(dev, vector);
  443. return;
  444. }
  445. msg = msix_get_message(dev, vector);
  446. msi_send_message(dev, msg);
  447. }
  448. void msix_reset(PCIDevice *dev)
  449. {
  450. if (!msix_present(dev)) {
  451. return;
  452. }
  453. msix_clear_all_vectors(dev);
  454. dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
  455. ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
  456. memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
  457. memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
  458. msix_mask_all(dev, dev->msix_entries_nr);
  459. }
  460. /* PCI spec suggests that devices make it possible for software to configure
  461. * less vectors than supported by the device, but does not specify a standard
  462. * mechanism for devices to do so.
  463. *
  464. * We support this by asking devices to declare vectors software is going to
  465. * actually use, and checking this on the notification path. Devices that
  466. * don't want to follow the spec suggestion can declare all vectors as used. */
  467. /* Mark vector as used. */
  468. void msix_vector_use(PCIDevice *dev, unsigned vector)
  469. {
  470. assert(vector < dev->msix_entries_nr);
  471. dev->msix_entry_used[vector]++;
  472. }
  473. /* Mark vector as unused. */
  474. void msix_vector_unuse(PCIDevice *dev, unsigned vector)
  475. {
  476. assert(vector < dev->msix_entries_nr);
  477. if (!dev->msix_entry_used[vector]) {
  478. return;
  479. }
  480. if (--dev->msix_entry_used[vector]) {
  481. return;
  482. }
  483. msix_clr_pending(dev, vector);
  484. }
  485. void msix_unuse_all_vectors(PCIDevice *dev)
  486. {
  487. if (!msix_present(dev)) {
  488. return;
  489. }
  490. msix_free_irq_entries(dev);
  491. }
  492. unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
  493. {
  494. return dev->msix_entries_nr;
  495. }
  496. static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
  497. {
  498. MSIMessage msg;
  499. if (msix_is_masked(dev, vector)) {
  500. return 0;
  501. }
  502. msg = msix_get_message(dev, vector);
  503. return dev->msix_vector_use_notifier(dev, vector, msg);
  504. }
  505. static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
  506. {
  507. if (msix_is_masked(dev, vector)) {
  508. return;
  509. }
  510. dev->msix_vector_release_notifier(dev, vector);
  511. }
  512. int msix_set_vector_notifiers(PCIDevice *dev,
  513. MSIVectorUseNotifier use_notifier,
  514. MSIVectorReleaseNotifier release_notifier,
  515. MSIVectorPollNotifier poll_notifier)
  516. {
  517. int vector, ret;
  518. assert(use_notifier && release_notifier);
  519. dev->msix_vector_use_notifier = use_notifier;
  520. dev->msix_vector_release_notifier = release_notifier;
  521. dev->msix_vector_poll_notifier = poll_notifier;
  522. if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  523. (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
  524. for (vector = 0; vector < dev->msix_entries_nr; vector++) {
  525. ret = msix_set_notifier_for_vector(dev, vector);
  526. if (ret < 0) {
  527. goto undo;
  528. }
  529. }
  530. }
  531. if (dev->msix_vector_poll_notifier) {
  532. dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
  533. }
  534. return 0;
  535. undo:
  536. while (--vector >= 0) {
  537. msix_unset_notifier_for_vector(dev, vector);
  538. }
  539. dev->msix_vector_use_notifier = NULL;
  540. dev->msix_vector_release_notifier = NULL;
  541. return ret;
  542. }
  543. void msix_unset_vector_notifiers(PCIDevice *dev)
  544. {
  545. int vector;
  546. assert(dev->msix_vector_use_notifier &&
  547. dev->msix_vector_release_notifier);
  548. if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  549. (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
  550. for (vector = 0; vector < dev->msix_entries_nr; vector++) {
  551. msix_unset_notifier_for_vector(dev, vector);
  552. }
  553. }
  554. dev->msix_vector_use_notifier = NULL;
  555. dev->msix_vector_release_notifier = NULL;
  556. dev->msix_vector_poll_notifier = NULL;
  557. }
  558. static int put_msix_state(QEMUFile *f, void *pv, size_t size,
  559. const VMStateField *field, JSONWriter *vmdesc)
  560. {
  561. msix_save(pv, f);
  562. return 0;
  563. }
  564. static int get_msix_state(QEMUFile *f, void *pv, size_t size,
  565. const VMStateField *field)
  566. {
  567. msix_load(pv, f);
  568. return 0;
  569. }
  570. static VMStateInfo vmstate_info_msix = {
  571. .name = "msix state",
  572. .get = get_msix_state,
  573. .put = put_msix_state,
  574. };
  575. const VMStateDescription vmstate_msix = {
  576. .name = "msix",
  577. .fields = (VMStateField[]) {
  578. {
  579. .name = "msix",
  580. .version_id = 0,
  581. .field_exists = NULL,
  582. .size = 0, /* ouch */
  583. .info = &vmstate_info_msix,
  584. .flags = VMS_SINGLE,
  585. .offset = 0,
  586. },
  587. VMSTATE_END_OF_LIST()
  588. }
  589. };