msix.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * MSI-X device support
  3. *
  4. * This module includes support for MSI-X in pci devices.
  5. *
  6. * Author: Michael S. Tsirkin <mst@redhat.com>
  7. *
  8. * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2. See
  11. * the COPYING file in the top-level directory.
  12. *
  13. * Contributions after 2012-01-13 are licensed under the terms of the
  14. * GNU GPL, version 2 or (at your option) any later version.
  15. */
  16. #include "qemu/osdep.h"
  17. #include "hw/pci/msi.h"
  18. #include "hw/pci/msix.h"
  19. #include "hw/pci/pci.h"
  20. #include "hw/xen/xen.h"
  21. #include "migration/qemu-file-types.h"
  22. #include "migration/vmstate.h"
  23. #include "qemu/range.h"
  24. #include "qapi/error.h"
  25. #include "trace.h"
  26. /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
  27. #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
  28. #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
  29. #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
  30. MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
  31. {
  32. uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  33. MSIMessage msg;
  34. msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
  35. msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
  36. return msg;
  37. }
  38. /*
  39. * Special API for POWER to configure the vectors through
  40. * a side channel. Should never be used by devices.
  41. */
  42. void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
  43. {
  44. uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  45. pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
  46. pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
  47. table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
  48. }
  49. static uint8_t msix_pending_mask(int vector)
  50. {
  51. return 1 << (vector % 8);
  52. }
  53. static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
  54. {
  55. return dev->msix_pba + vector / 8;
  56. }
  57. static int msix_is_pending(PCIDevice *dev, int vector)
  58. {
  59. return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
  60. }
  61. void msix_set_pending(PCIDevice *dev, unsigned int vector)
  62. {
  63. *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
  64. }
  65. void msix_clr_pending(PCIDevice *dev, int vector)
  66. {
  67. *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
  68. }
  69. static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
  70. {
  71. unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
  72. uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
  73. /* MSIs on Xen can be remapped into pirqs. In those cases, masking
  74. * and unmasking go through the PV evtchn path. */
  75. if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
  76. return false;
  77. }
  78. return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
  79. PCI_MSIX_ENTRY_CTRL_MASKBIT;
  80. }
  81. bool msix_is_masked(PCIDevice *dev, unsigned int vector)
  82. {
  83. return msix_vector_masked(dev, vector, dev->msix_function_masked);
  84. }
  85. static void msix_fire_vector_notifier(PCIDevice *dev,
  86. unsigned int vector, bool is_masked)
  87. {
  88. MSIMessage msg;
  89. int ret;
  90. if (!dev->msix_vector_use_notifier) {
  91. return;
  92. }
  93. if (is_masked) {
  94. dev->msix_vector_release_notifier(dev, vector);
  95. } else {
  96. msg = msix_get_message(dev, vector);
  97. ret = dev->msix_vector_use_notifier(dev, vector, msg);
  98. assert(ret >= 0);
  99. }
  100. }
  101. static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
  102. {
  103. bool is_masked = msix_is_masked(dev, vector);
  104. if (is_masked == was_masked) {
  105. return;
  106. }
  107. msix_fire_vector_notifier(dev, vector, is_masked);
  108. if (!is_masked && msix_is_pending(dev, vector)) {
  109. msix_clr_pending(dev, vector);
  110. msix_notify(dev, vector);
  111. }
  112. }
  113. static bool msix_masked(PCIDevice *dev)
  114. {
  115. return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK;
  116. }
  117. static void msix_update_function_masked(PCIDevice *dev)
  118. {
  119. dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev);
  120. }
  121. /* Handle MSI-X capability config write. */
  122. void msix_write_config(PCIDevice *dev, uint32_t addr,
  123. uint32_t val, int len)
  124. {
  125. unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
  126. int vector;
  127. bool was_masked;
  128. if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
  129. return;
  130. }
  131. trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev));
  132. was_masked = dev->msix_function_masked;
  133. msix_update_function_masked(dev);
  134. if (!msix_enabled(dev)) {
  135. return;
  136. }
  137. pci_device_deassert_intx(dev);
  138. if (dev->msix_function_masked == was_masked) {
  139. return;
  140. }
  141. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  142. msix_handle_mask_update(dev, vector,
  143. msix_vector_masked(dev, vector, was_masked));
  144. }
  145. }
  146. static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
  147. unsigned size)
  148. {
  149. PCIDevice *dev = opaque;
  150. return pci_get_long(dev->msix_table + addr);
  151. }
  152. static void msix_table_mmio_write(void *opaque, hwaddr addr,
  153. uint64_t val, unsigned size)
  154. {
  155. PCIDevice *dev = opaque;
  156. int vector = addr / PCI_MSIX_ENTRY_SIZE;
  157. bool was_masked;
  158. was_masked = msix_is_masked(dev, vector);
  159. pci_set_long(dev->msix_table + addr, val);
  160. msix_handle_mask_update(dev, vector, was_masked);
  161. }
  162. static const MemoryRegionOps msix_table_mmio_ops = {
  163. .read = msix_table_mmio_read,
  164. .write = msix_table_mmio_write,
  165. .endianness = DEVICE_LITTLE_ENDIAN,
  166. .valid = {
  167. .min_access_size = 4,
  168. .max_access_size = 4,
  169. },
  170. };
  171. static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
  172. unsigned size)
  173. {
  174. PCIDevice *dev = opaque;
  175. if (dev->msix_vector_poll_notifier) {
  176. unsigned vector_start = addr * 8;
  177. unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
  178. dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
  179. }
  180. return pci_get_long(dev->msix_pba + addr);
  181. }
  182. static void msix_pba_mmio_write(void *opaque, hwaddr addr,
  183. uint64_t val, unsigned size)
  184. {
  185. }
  186. static const MemoryRegionOps msix_pba_mmio_ops = {
  187. .read = msix_pba_mmio_read,
  188. .write = msix_pba_mmio_write,
  189. .endianness = DEVICE_LITTLE_ENDIAN,
  190. .valid = {
  191. .min_access_size = 4,
  192. .max_access_size = 4,
  193. },
  194. };
  195. static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
  196. {
  197. int vector;
  198. for (vector = 0; vector < nentries; ++vector) {
  199. unsigned offset =
  200. vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
  201. bool was_masked = msix_is_masked(dev, vector);
  202. dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
  203. msix_handle_mask_update(dev, vector, was_masked);
  204. }
  205. }
  206. /*
  207. * Make PCI device @dev MSI-X capable
  208. * @nentries is the max number of MSI-X vectors that the device support.
  209. * @table_bar is the MemoryRegion that MSI-X table structure resides.
  210. * @table_bar_nr is number of base address register corresponding to @table_bar.
  211. * @table_offset indicates the offset that the MSI-X table structure starts with
  212. * in @table_bar.
  213. * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides.
  214. * @pba_bar_nr is number of base address register corresponding to @pba_bar.
  215. * @pba_offset indicates the offset that the Pending Bit Array structure
  216. * starts with in @pba_bar.
  217. * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space.
  218. * @errp is for returning errors.
  219. *
  220. * Return 0 on success; set @errp and return -errno on error:
  221. * -ENOTSUP means lacking msi support for a msi-capable platform.
  222. * -EINVAL means capability overlap, happens when @cap_pos is non-zero,
  223. * also means a programming error, except device assignment, which can check
  224. * if a real HW is broken.
  225. */
  226. int msix_init(struct PCIDevice *dev, unsigned short nentries,
  227. MemoryRegion *table_bar, uint8_t table_bar_nr,
  228. unsigned table_offset, MemoryRegion *pba_bar,
  229. uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos,
  230. Error **errp)
  231. {
  232. int cap;
  233. unsigned table_size, pba_size;
  234. uint8_t *config;
  235. /* Nothing to do if MSI is not supported by interrupt controller */
  236. if (!msi_nonbroken) {
  237. error_setg(errp, "MSI-X is not supported by interrupt controller");
  238. return -ENOTSUP;
  239. }
  240. if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
  241. error_setg(errp, "The number of MSI-X vectors is invalid");
  242. return -EINVAL;
  243. }
  244. table_size = nentries * PCI_MSIX_ENTRY_SIZE;
  245. pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
  246. /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
  247. if ((table_bar_nr == pba_bar_nr &&
  248. ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
  249. table_offset + table_size > memory_region_size(table_bar) ||
  250. pba_offset + pba_size > memory_region_size(pba_bar) ||
  251. (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
  252. error_setg(errp, "table & pba overlap, or they don't fit in BARs,"
  253. " or don't align");
  254. return -EINVAL;
  255. }
  256. cap = pci_add_capability(dev, PCI_CAP_ID_MSIX,
  257. cap_pos, MSIX_CAP_LENGTH, errp);
  258. if (cap < 0) {
  259. return cap;
  260. }
  261. dev->msix_cap = cap;
  262. dev->cap_present |= QEMU_PCI_CAP_MSIX;
  263. config = dev->config + cap;
  264. pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
  265. dev->msix_entries_nr = nentries;
  266. dev->msix_function_masked = true;
  267. pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
  268. pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
  269. /* Make flags bit writable. */
  270. dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
  271. MSIX_MASKALL_MASK;
  272. dev->msix_table = g_malloc0(table_size);
  273. dev->msix_pba = g_malloc0(pba_size);
  274. dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
  275. msix_mask_all(dev, nentries);
  276. memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev,
  277. "msix-table", table_size);
  278. memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
  279. memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev,
  280. "msix-pba", pba_size);
  281. memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
  282. return 0;
  283. }
  284. int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
  285. uint8_t bar_nr, Error **errp)
  286. {
  287. int ret;
  288. char *name;
  289. uint32_t bar_size = 4096;
  290. uint32_t bar_pba_offset = bar_size / 2;
  291. uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
  292. /*
  293. * Migration compatibility dictates that this remains a 4k
  294. * BAR with the vector table in the lower half and PBA in
  295. * the upper half for nentries which is lower or equal to 128.
  296. * No need to care about using more than 65 entries for legacy
  297. * machine types who has at most 64 queues.
  298. */
  299. if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
  300. bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
  301. }
  302. if (bar_pba_offset + bar_pba_size > 4096) {
  303. bar_size = bar_pba_offset + bar_pba_size;
  304. }
  305. bar_size = pow2ceil(bar_size);
  306. name = g_strdup_printf("%s-msix", dev->name);
  307. memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
  308. g_free(name);
  309. ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
  310. 0, &dev->msix_exclusive_bar,
  311. bar_nr, bar_pba_offset,
  312. 0, errp);
  313. if (ret) {
  314. return ret;
  315. }
  316. pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
  317. &dev->msix_exclusive_bar);
  318. return 0;
  319. }
  320. static void msix_free_irq_entries(PCIDevice *dev)
  321. {
  322. int vector;
  323. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  324. dev->msix_entry_used[vector] = 0;
  325. msix_clr_pending(dev, vector);
  326. }
  327. }
  328. static void msix_clear_all_vectors(PCIDevice *dev)
  329. {
  330. int vector;
  331. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  332. msix_clr_pending(dev, vector);
  333. }
  334. }
  335. /* Clean up resources for the device. */
  336. void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
  337. {
  338. if (!msix_present(dev)) {
  339. return;
  340. }
  341. pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
  342. dev->msix_cap = 0;
  343. msix_free_irq_entries(dev);
  344. dev->msix_entries_nr = 0;
  345. memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
  346. g_free(dev->msix_pba);
  347. dev->msix_pba = NULL;
  348. memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
  349. g_free(dev->msix_table);
  350. dev->msix_table = NULL;
  351. g_free(dev->msix_entry_used);
  352. dev->msix_entry_used = NULL;
  353. dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
  354. }
  355. void msix_uninit_exclusive_bar(PCIDevice *dev)
  356. {
  357. if (msix_present(dev)) {
  358. msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
  359. }
  360. }
  361. void msix_save(PCIDevice *dev, QEMUFile *f)
  362. {
  363. unsigned n = dev->msix_entries_nr;
  364. if (!msix_present(dev)) {
  365. return;
  366. }
  367. qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
  368. qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
  369. }
  370. /* Should be called after restoring the config space. */
  371. void msix_load(PCIDevice *dev, QEMUFile *f)
  372. {
  373. unsigned n = dev->msix_entries_nr;
  374. unsigned int vector;
  375. if (!msix_present(dev)) {
  376. return;
  377. }
  378. msix_clear_all_vectors(dev);
  379. qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
  380. qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
  381. msix_update_function_masked(dev);
  382. for (vector = 0; vector < n; vector++) {
  383. msix_handle_mask_update(dev, vector, true);
  384. }
  385. }
  386. /* Does device support MSI-X? */
  387. int msix_present(PCIDevice *dev)
  388. {
  389. return dev->cap_present & QEMU_PCI_CAP_MSIX;
  390. }
  391. /* Is MSI-X enabled? */
  392. int msix_enabled(PCIDevice *dev)
  393. {
  394. return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
  395. (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  396. MSIX_ENABLE_MASK);
  397. }
  398. /* Send an MSI-X message */
  399. void msix_notify(PCIDevice *dev, unsigned vector)
  400. {
  401. MSIMessage msg;
  402. if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
  403. return;
  404. }
  405. if (msix_is_masked(dev, vector)) {
  406. msix_set_pending(dev, vector);
  407. return;
  408. }
  409. msg = msix_get_message(dev, vector);
  410. msi_send_message(dev, msg);
  411. }
  412. void msix_reset(PCIDevice *dev)
  413. {
  414. if (!msix_present(dev)) {
  415. return;
  416. }
  417. msix_clear_all_vectors(dev);
  418. dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
  419. ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
  420. memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
  421. memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
  422. msix_mask_all(dev, dev->msix_entries_nr);
  423. }
  424. /* PCI spec suggests that devices make it possible for software to configure
  425. * less vectors than supported by the device, but does not specify a standard
  426. * mechanism for devices to do so.
  427. *
  428. * We support this by asking devices to declare vectors software is going to
  429. * actually use, and checking this on the notification path. Devices that
  430. * don't want to follow the spec suggestion can declare all vectors as used. */
  431. /* Mark vector as used. */
  432. int msix_vector_use(PCIDevice *dev, unsigned vector)
  433. {
  434. if (vector >= dev->msix_entries_nr) {
  435. return -EINVAL;
  436. }
  437. dev->msix_entry_used[vector]++;
  438. return 0;
  439. }
  440. /* Mark vector as unused. */
  441. void msix_vector_unuse(PCIDevice *dev, unsigned vector)
  442. {
  443. if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
  444. return;
  445. }
  446. if (--dev->msix_entry_used[vector]) {
  447. return;
  448. }
  449. msix_clr_pending(dev, vector);
  450. }
  451. void msix_unuse_all_vectors(PCIDevice *dev)
  452. {
  453. if (!msix_present(dev)) {
  454. return;
  455. }
  456. msix_free_irq_entries(dev);
  457. }
  458. unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
  459. {
  460. return dev->msix_entries_nr;
  461. }
  462. static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
  463. {
  464. MSIMessage msg;
  465. if (msix_is_masked(dev, vector)) {
  466. return 0;
  467. }
  468. msg = msix_get_message(dev, vector);
  469. return dev->msix_vector_use_notifier(dev, vector, msg);
  470. }
  471. static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
  472. {
  473. if (msix_is_masked(dev, vector)) {
  474. return;
  475. }
  476. dev->msix_vector_release_notifier(dev, vector);
  477. }
  478. int msix_set_vector_notifiers(PCIDevice *dev,
  479. MSIVectorUseNotifier use_notifier,
  480. MSIVectorReleaseNotifier release_notifier,
  481. MSIVectorPollNotifier poll_notifier)
  482. {
  483. int vector, ret;
  484. assert(use_notifier && release_notifier);
  485. dev->msix_vector_use_notifier = use_notifier;
  486. dev->msix_vector_release_notifier = release_notifier;
  487. dev->msix_vector_poll_notifier = poll_notifier;
  488. if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  489. (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
  490. for (vector = 0; vector < dev->msix_entries_nr; vector++) {
  491. ret = msix_set_notifier_for_vector(dev, vector);
  492. if (ret < 0) {
  493. goto undo;
  494. }
  495. }
  496. }
  497. if (dev->msix_vector_poll_notifier) {
  498. dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
  499. }
  500. return 0;
  501. undo:
  502. while (--vector >= 0) {
  503. msix_unset_notifier_for_vector(dev, vector);
  504. }
  505. dev->msix_vector_use_notifier = NULL;
  506. dev->msix_vector_release_notifier = NULL;
  507. return ret;
  508. }
  509. void msix_unset_vector_notifiers(PCIDevice *dev)
  510. {
  511. int vector;
  512. assert(dev->msix_vector_use_notifier &&
  513. dev->msix_vector_release_notifier);
  514. if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  515. (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
  516. for (vector = 0; vector < dev->msix_entries_nr; vector++) {
  517. msix_unset_notifier_for_vector(dev, vector);
  518. }
  519. }
  520. dev->msix_vector_use_notifier = NULL;
  521. dev->msix_vector_release_notifier = NULL;
  522. dev->msix_vector_poll_notifier = NULL;
  523. }
  524. static int put_msix_state(QEMUFile *f, void *pv, size_t size,
  525. const VMStateField *field, QJSON *vmdesc)
  526. {
  527. msix_save(pv, f);
  528. return 0;
  529. }
  530. static int get_msix_state(QEMUFile *f, void *pv, size_t size,
  531. const VMStateField *field)
  532. {
  533. msix_load(pv, f);
  534. return 0;
  535. }
  536. static VMStateInfo vmstate_info_msix = {
  537. .name = "msix state",
  538. .get = get_msix_state,
  539. .put = put_msix_state,
  540. };
  541. const VMStateDescription vmstate_msix = {
  542. .name = "msix",
  543. .fields = (VMStateField[]) {
  544. {
  545. .name = "msix",
  546. .version_id = 0,
  547. .field_exists = NULL,
  548. .size = 0, /* ouch */
  549. .info = &vmstate_info_msix,
  550. .flags = VMS_SINGLE,
  551. .offset = 0,
  552. },
  553. VMSTATE_END_OF_LIST()
  554. }
  555. };