pci-quirks.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596
  1. /*
  2. * device quirks for PCI devices
  3. *
  4. * Copyright Red Hat, Inc. 2012-2015
  5. *
  6. * Authors:
  7. * Alex Williamson <alex.williamson@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. */
  12. #include "qemu/osdep.h"
  13. #include CONFIG_DEVICES
  14. #include "exec/memop.h"
  15. #include "qemu/units.h"
  16. #include "qemu/log.h"
  17. #include "qemu/error-report.h"
  18. #include "qemu/main-loop.h"
  19. #include "qemu/module.h"
  20. #include "qemu/range.h"
  21. #include "qapi/error.h"
  22. #include "qapi/visitor.h"
  23. #include <sys/ioctl.h>
  24. #include "hw/nvram/fw_cfg.h"
  25. #include "hw/qdev-properties.h"
  26. #include "pci.h"
  27. #include "pci-quirks.h"
  28. #include "trace.h"
  29. /*
  30. * List of device ids/vendor ids for which to disable
  31. * option rom loading. This avoids the guest hangs during rom
  32. * execution as noticed with the BCM 57810 card for lack of a
  33. * more better way to handle such issues.
  34. * The user can still override by specifying a romfile or
  35. * rombar=1.
  36. * Please see https://bugs.launchpad.net/qemu/+bug/1284874
  37. * for an analysis of the 57810 card hang. When adding
  38. * a new vendor id/device id combination below, please also add
  39. * your card/environment details and information that could
  40. * help in debugging to the bug tracking this issue
  41. */
  42. static const struct {
  43. uint32_t vendor;
  44. uint32_t device;
  45. } rom_denylist[] = {
  46. { 0x14e4, 0x168e }, /* Broadcom BCM 57810 */
  47. };
  48. bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev)
  49. {
  50. int i;
  51. for (i = 0 ; i < ARRAY_SIZE(rom_denylist); i++) {
  52. if (vfio_pci_is(vdev, rom_denylist[i].vendor, rom_denylist[i].device)) {
  53. trace_vfio_quirk_rom_in_denylist(vdev->vbasedev.name,
  54. rom_denylist[i].vendor,
  55. rom_denylist[i].device);
  56. return true;
  57. }
  58. }
  59. return false;
  60. }
  61. /*
  62. * Device specific region quirks (mostly backdoors to PCI config space)
  63. */
  64. static uint64_t vfio_generic_window_quirk_address_read(void *opaque,
  65. hwaddr addr,
  66. unsigned size)
  67. {
  68. VFIOConfigWindowQuirk *window = opaque;
  69. VFIOPCIDevice *vdev = window->vdev;
  70. return vfio_region_read(&vdev->bars[window->bar].region,
  71. addr + window->address_offset, size);
  72. }
  73. static void vfio_generic_window_quirk_address_write(void *opaque, hwaddr addr,
  74. uint64_t data,
  75. unsigned size)
  76. {
  77. VFIOConfigWindowQuirk *window = opaque;
  78. VFIOPCIDevice *vdev = window->vdev;
  79. int i;
  80. window->window_enabled = false;
  81. vfio_region_write(&vdev->bars[window->bar].region,
  82. addr + window->address_offset, data, size);
  83. for (i = 0; i < window->nr_matches; i++) {
  84. if ((data & ~window->matches[i].mask) == window->matches[i].match) {
  85. window->window_enabled = true;
  86. window->address_val = data & window->matches[i].mask;
  87. trace_vfio_quirk_generic_window_address_write(vdev->vbasedev.name,
  88. memory_region_name(window->addr_mem), data);
  89. break;
  90. }
  91. }
  92. }
  93. const MemoryRegionOps vfio_generic_window_address_quirk = {
  94. .read = vfio_generic_window_quirk_address_read,
  95. .write = vfio_generic_window_quirk_address_write,
  96. .endianness = DEVICE_LITTLE_ENDIAN,
  97. };
  98. static uint64_t vfio_generic_window_quirk_data_read(void *opaque,
  99. hwaddr addr, unsigned size)
  100. {
  101. VFIOConfigWindowQuirk *window = opaque;
  102. VFIOPCIDevice *vdev = window->vdev;
  103. uint64_t data;
  104. /* Always read data reg, discard if window enabled */
  105. data = vfio_region_read(&vdev->bars[window->bar].region,
  106. addr + window->data_offset, size);
  107. if (window->window_enabled) {
  108. data = vfio_pci_read_config(&vdev->pdev, window->address_val, size);
  109. trace_vfio_quirk_generic_window_data_read(vdev->vbasedev.name,
  110. memory_region_name(window->data_mem), data);
  111. }
  112. return data;
  113. }
  114. static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr,
  115. uint64_t data, unsigned size)
  116. {
  117. VFIOConfigWindowQuirk *window = opaque;
  118. VFIOPCIDevice *vdev = window->vdev;
  119. if (window->window_enabled) {
  120. vfio_pci_write_config(&vdev->pdev, window->address_val, data, size);
  121. trace_vfio_quirk_generic_window_data_write(vdev->vbasedev.name,
  122. memory_region_name(window->data_mem), data);
  123. return;
  124. }
  125. vfio_region_write(&vdev->bars[window->bar].region,
  126. addr + window->data_offset, data, size);
  127. }
  128. const MemoryRegionOps vfio_generic_window_data_quirk = {
  129. .read = vfio_generic_window_quirk_data_read,
  130. .write = vfio_generic_window_quirk_data_write,
  131. .endianness = DEVICE_LITTLE_ENDIAN,
  132. };
  133. static uint64_t vfio_generic_quirk_mirror_read(void *opaque,
  134. hwaddr addr, unsigned size)
  135. {
  136. VFIOConfigMirrorQuirk *mirror = opaque;
  137. VFIOPCIDevice *vdev = mirror->vdev;
  138. uint64_t data;
  139. /* Read and discard in case the hardware cares */
  140. (void)vfio_region_read(&vdev->bars[mirror->bar].region,
  141. addr + mirror->offset, size);
  142. addr += mirror->config_offset;
  143. data = vfio_pci_read_config(&vdev->pdev, addr, size);
  144. trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name,
  145. memory_region_name(mirror->mem),
  146. addr, data);
  147. return data;
  148. }
  149. static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr,
  150. uint64_t data, unsigned size)
  151. {
  152. VFIOConfigMirrorQuirk *mirror = opaque;
  153. VFIOPCIDevice *vdev = mirror->vdev;
  154. addr += mirror->config_offset;
  155. vfio_pci_write_config(&vdev->pdev, addr, data, size);
  156. trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name,
  157. memory_region_name(mirror->mem),
  158. addr, data);
  159. }
  160. const MemoryRegionOps vfio_generic_mirror_quirk = {
  161. .read = vfio_generic_quirk_mirror_read,
  162. .write = vfio_generic_quirk_mirror_write,
  163. .endianness = DEVICE_LITTLE_ENDIAN,
  164. };
  165. /* Is range1 fully contained within range2? */
  166. static bool vfio_range_contained(uint64_t first1, uint64_t len1,
  167. uint64_t first2, uint64_t len2) {
  168. return (first1 >= first2 && first1 + len1 <= first2 + len2);
  169. }
  170. #define PCI_VENDOR_ID_ATI 0x1002
  171. /*
  172. * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
  173. * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
  174. * BAR4 (older cards like the X550 used BAR1, but we don't care to support
  175. * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
  176. * I/O port BAR address. Originally this was coded to return the virtual BAR
  177. * address only if the physical register read returns the actual BAR address,
  178. * but users have reported greater success if we return the virtual address
  179. * unconditionally.
  180. */
  181. static uint64_t vfio_ati_3c3_quirk_read(void *opaque,
  182. hwaddr addr, unsigned size)
  183. {
  184. VFIOPCIDevice *vdev = opaque;
  185. uint64_t data = vfio_pci_read_config(&vdev->pdev,
  186. PCI_BASE_ADDRESS_4 + 1, size);
  187. trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data);
  188. return data;
  189. }
  190. static void vfio_ati_3c3_quirk_write(void *opaque, hwaddr addr,
  191. uint64_t data, unsigned size)
  192. {
  193. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
  194. }
  195. static const MemoryRegionOps vfio_ati_3c3_quirk = {
  196. .read = vfio_ati_3c3_quirk_read,
  197. .write = vfio_ati_3c3_quirk_write,
  198. .endianness = DEVICE_LITTLE_ENDIAN,
  199. };
  200. VFIOQuirk *vfio_quirk_alloc(int nr_mem)
  201. {
  202. VFIOQuirk *quirk = g_new0(VFIOQuirk, 1);
  203. QLIST_INIT(&quirk->ioeventfds);
  204. quirk->mem = g_new0(MemoryRegion, nr_mem);
  205. quirk->nr_mem = nr_mem;
  206. return quirk;
  207. }
  208. static void vfio_ioeventfd_exit(VFIOPCIDevice *vdev, VFIOIOEventFD *ioeventfd)
  209. {
  210. QLIST_REMOVE(ioeventfd, next);
  211. memory_region_del_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size,
  212. true, ioeventfd->data, &ioeventfd->e);
  213. if (ioeventfd->vfio) {
  214. struct vfio_device_ioeventfd vfio_ioeventfd;
  215. vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
  216. vfio_ioeventfd.flags = ioeventfd->size;
  217. vfio_ioeventfd.data = ioeventfd->data;
  218. vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
  219. ioeventfd->region_addr;
  220. vfio_ioeventfd.fd = -1;
  221. if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd)) {
  222. error_report("Failed to remove vfio ioeventfd for %s+0x%"
  223. HWADDR_PRIx"[%d]:0x%"PRIx64" (%m)",
  224. memory_region_name(ioeventfd->mr), ioeventfd->addr,
  225. ioeventfd->size, ioeventfd->data);
  226. }
  227. } else {
  228. qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
  229. NULL, NULL, NULL);
  230. }
  231. event_notifier_cleanup(&ioeventfd->e);
  232. trace_vfio_ioeventfd_exit(memory_region_name(ioeventfd->mr),
  233. (uint64_t)ioeventfd->addr, ioeventfd->size,
  234. ioeventfd->data);
  235. g_free(ioeventfd);
  236. }
  237. static void vfio_drop_dynamic_eventfds(VFIOPCIDevice *vdev, VFIOQuirk *quirk)
  238. {
  239. VFIOIOEventFD *ioeventfd, *tmp;
  240. QLIST_FOREACH_SAFE(ioeventfd, &quirk->ioeventfds, next, tmp) {
  241. if (ioeventfd->dynamic) {
  242. vfio_ioeventfd_exit(vdev, ioeventfd);
  243. }
  244. }
  245. }
  246. static void vfio_ioeventfd_handler(void *opaque)
  247. {
  248. VFIOIOEventFD *ioeventfd = opaque;
  249. if (event_notifier_test_and_clear(&ioeventfd->e)) {
  250. vfio_region_write(ioeventfd->region, ioeventfd->region_addr,
  251. ioeventfd->data, ioeventfd->size);
  252. trace_vfio_ioeventfd_handler(memory_region_name(ioeventfd->mr),
  253. (uint64_t)ioeventfd->addr, ioeventfd->size,
  254. ioeventfd->data);
  255. }
  256. }
  257. static VFIOIOEventFD *vfio_ioeventfd_init(VFIOPCIDevice *vdev,
  258. MemoryRegion *mr, hwaddr addr,
  259. unsigned size, uint64_t data,
  260. VFIORegion *region,
  261. hwaddr region_addr, bool dynamic)
  262. {
  263. VFIOIOEventFD *ioeventfd;
  264. if (vdev->no_kvm_ioeventfd) {
  265. return NULL;
  266. }
  267. ioeventfd = g_malloc0(sizeof(*ioeventfd));
  268. if (event_notifier_init(&ioeventfd->e, 0)) {
  269. g_free(ioeventfd);
  270. return NULL;
  271. }
  272. /*
  273. * MemoryRegion and relative offset, plus additional ioeventfd setup
  274. * parameters for configuring and later tearing down KVM ioeventfd.
  275. */
  276. ioeventfd->mr = mr;
  277. ioeventfd->addr = addr;
  278. ioeventfd->size = size;
  279. ioeventfd->data = data;
  280. ioeventfd->dynamic = dynamic;
  281. /*
  282. * VFIORegion and relative offset for implementing the userspace
  283. * handler. data & size fields shared for both uses.
  284. */
  285. ioeventfd->region = region;
  286. ioeventfd->region_addr = region_addr;
  287. if (!vdev->no_vfio_ioeventfd) {
  288. struct vfio_device_ioeventfd vfio_ioeventfd;
  289. vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
  290. vfio_ioeventfd.flags = ioeventfd->size;
  291. vfio_ioeventfd.data = ioeventfd->data;
  292. vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
  293. ioeventfd->region_addr;
  294. vfio_ioeventfd.fd = event_notifier_get_fd(&ioeventfd->e);
  295. ioeventfd->vfio = !ioctl(vdev->vbasedev.fd,
  296. VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd);
  297. }
  298. if (!ioeventfd->vfio) {
  299. qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
  300. vfio_ioeventfd_handler, NULL, ioeventfd);
  301. }
  302. memory_region_add_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size,
  303. true, ioeventfd->data, &ioeventfd->e);
  304. trace_vfio_ioeventfd_init(memory_region_name(mr), (uint64_t)addr,
  305. size, data, ioeventfd->vfio);
  306. return ioeventfd;
  307. }
  308. static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev)
  309. {
  310. VFIOQuirk *quirk;
  311. /*
  312. * As long as the BAR is >= 256 bytes it will be aligned such that the
  313. * lower byte is always zero. Filter out anything else, if it exists.
  314. */
  315. if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
  316. !vdev->bars[4].ioport || vdev->bars[4].region.size < 256) {
  317. return;
  318. }
  319. quirk = vfio_quirk_alloc(1);
  320. memory_region_init_io(quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, vdev,
  321. "vfio-ati-3c3-quirk", 1);
  322. memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
  323. 3 /* offset 3 bytes from 0x3c0 */, quirk->mem);
  324. QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks,
  325. quirk, next);
  326. trace_vfio_quirk_ati_3c3_probe(vdev->vbasedev.name);
  327. }
  328. /*
  329. * Newer ATI/AMD devices, including HD5450 and HD7850, have a mirror to PCI
  330. * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
  331. * the MMIO space directly, but a window to this space is provided through
  332. * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
  333. * data register. When the address is programmed to a range of 0x4000-0x4fff
  334. * PCI configuration space is available. Experimentation seems to indicate
  335. * that read-only may be provided by hardware.
  336. */
  337. static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr)
  338. {
  339. VFIOQuirk *quirk;
  340. VFIOConfigWindowQuirk *window;
  341. /* This windows doesn't seem to be used except by legacy VGA code */
  342. if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
  343. !vdev->vga || nr != 4 || !vdev->bars[4].ioport) {
  344. return;
  345. }
  346. quirk = vfio_quirk_alloc(2);
  347. window = quirk->data = g_malloc0(sizeof(*window) +
  348. sizeof(VFIOConfigWindowMatch));
  349. window->vdev = vdev;
  350. window->address_offset = 0;
  351. window->data_offset = 4;
  352. window->nr_matches = 1;
  353. window->matches[0].match = 0x4000;
  354. window->matches[0].mask = vdev->config_size - 1;
  355. window->bar = nr;
  356. window->addr_mem = &quirk->mem[0];
  357. window->data_mem = &quirk->mem[1];
  358. memory_region_init_io(window->addr_mem, OBJECT(vdev),
  359. &vfio_generic_window_address_quirk, window,
  360. "vfio-ati-bar4-window-address-quirk", 4);
  361. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  362. window->address_offset,
  363. window->addr_mem, 1);
  364. memory_region_init_io(window->data_mem, OBJECT(vdev),
  365. &vfio_generic_window_data_quirk, window,
  366. "vfio-ati-bar4-window-data-quirk", 4);
  367. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  368. window->data_offset,
  369. window->data_mem, 1);
  370. QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
  371. trace_vfio_quirk_ati_bar4_probe(vdev->vbasedev.name);
  372. }
  373. /*
  374. * Trap the BAR2 MMIO mirror to config space as well.
  375. */
  376. static void vfio_probe_ati_bar2_quirk(VFIOPCIDevice *vdev, int nr)
  377. {
  378. VFIOQuirk *quirk;
  379. VFIOConfigMirrorQuirk *mirror;
  380. /* Only enable on newer devices where BAR2 is 64bit */
  381. if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
  382. !vdev->vga || nr != 2 || !vdev->bars[2].mem64) {
  383. return;
  384. }
  385. quirk = vfio_quirk_alloc(1);
  386. mirror = quirk->data = g_malloc0(sizeof(*mirror));
  387. mirror->mem = quirk->mem;
  388. mirror->vdev = vdev;
  389. mirror->offset = 0x4000;
  390. mirror->bar = nr;
  391. memory_region_init_io(mirror->mem, OBJECT(vdev),
  392. &vfio_generic_mirror_quirk, mirror,
  393. "vfio-ati-bar2-4000-quirk", PCI_CONFIG_SPACE_SIZE);
  394. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  395. mirror->offset, mirror->mem, 1);
  396. QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
  397. trace_vfio_quirk_ati_bar2_probe(vdev->vbasedev.name);
  398. }
  399. /*
  400. * Older ATI/AMD cards like the X550 have a similar window to that above.
  401. * I/O port BAR1 provides a window to a mirror of PCI config space located
  402. * in BAR2 at offset 0xf00. We don't care to support such older cards, but
  403. * note it for future reference.
  404. */
  405. /*
  406. * Nvidia has several different methods to get to config space, the
  407. * nouveu project has several of these documented here:
  408. * https://github.com/pathscale/envytools/tree/master/hwdocs
  409. *
  410. * The first quirk is actually not documented in envytools and is found
  411. * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
  412. * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
  413. * the mirror of PCI config space found at BAR0 offset 0x1800. The access
  414. * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
  415. * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
  416. * is written for a write to 0x3d4. The BAR0 offset is then accessible
  417. * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
  418. * that use the I/O port BAR5 window but it doesn't hurt to leave it.
  419. */
  420. typedef enum {NONE = 0, SELECT, WINDOW, READ, WRITE} VFIONvidia3d0State;
  421. static const char *nv3d0_states[] = { "NONE", "SELECT",
  422. "WINDOW", "READ", "WRITE" };
  423. typedef struct VFIONvidia3d0Quirk {
  424. VFIOPCIDevice *vdev;
  425. VFIONvidia3d0State state;
  426. uint32_t offset;
  427. } VFIONvidia3d0Quirk;
  428. static uint64_t vfio_nvidia_3d4_quirk_read(void *opaque,
  429. hwaddr addr, unsigned size)
  430. {
  431. VFIONvidia3d0Quirk *quirk = opaque;
  432. VFIOPCIDevice *vdev = quirk->vdev;
  433. quirk->state = NONE;
  434. return vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
  435. addr + 0x14, size);
  436. }
  437. static void vfio_nvidia_3d4_quirk_write(void *opaque, hwaddr addr,
  438. uint64_t data, unsigned size)
  439. {
  440. VFIONvidia3d0Quirk *quirk = opaque;
  441. VFIOPCIDevice *vdev = quirk->vdev;
  442. VFIONvidia3d0State old_state = quirk->state;
  443. quirk->state = NONE;
  444. switch (data) {
  445. case 0x338:
  446. if (old_state == NONE) {
  447. quirk->state = SELECT;
  448. trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
  449. nv3d0_states[quirk->state]);
  450. }
  451. break;
  452. case 0x538:
  453. if (old_state == WINDOW) {
  454. quirk->state = READ;
  455. trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
  456. nv3d0_states[quirk->state]);
  457. }
  458. break;
  459. case 0x738:
  460. if (old_state == WINDOW) {
  461. quirk->state = WRITE;
  462. trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
  463. nv3d0_states[quirk->state]);
  464. }
  465. break;
  466. }
  467. vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
  468. addr + 0x14, data, size);
  469. }
  470. static const MemoryRegionOps vfio_nvidia_3d4_quirk = {
  471. .read = vfio_nvidia_3d4_quirk_read,
  472. .write = vfio_nvidia_3d4_quirk_write,
  473. .endianness = DEVICE_LITTLE_ENDIAN,
  474. };
  475. static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque,
  476. hwaddr addr, unsigned size)
  477. {
  478. VFIONvidia3d0Quirk *quirk = opaque;
  479. VFIOPCIDevice *vdev = quirk->vdev;
  480. VFIONvidia3d0State old_state = quirk->state;
  481. uint64_t data = vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
  482. addr + 0x10, size);
  483. quirk->state = NONE;
  484. if (old_state == READ &&
  485. (quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) {
  486. uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1);
  487. data = vfio_pci_read_config(&vdev->pdev, offset, size);
  488. trace_vfio_quirk_nvidia_3d0_read(vdev->vbasedev.name,
  489. offset, size, data);
  490. }
  491. return data;
  492. }
  493. static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr,
  494. uint64_t data, unsigned size)
  495. {
  496. VFIONvidia3d0Quirk *quirk = opaque;
  497. VFIOPCIDevice *vdev = quirk->vdev;
  498. VFIONvidia3d0State old_state = quirk->state;
  499. quirk->state = NONE;
  500. if (old_state == SELECT) {
  501. quirk->offset = (uint32_t)data;
  502. quirk->state = WINDOW;
  503. trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
  504. nv3d0_states[quirk->state]);
  505. } else if (old_state == WRITE) {
  506. if ((quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) {
  507. uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1);
  508. vfio_pci_write_config(&vdev->pdev, offset, data, size);
  509. trace_vfio_quirk_nvidia_3d0_write(vdev->vbasedev.name,
  510. offset, data, size);
  511. return;
  512. }
  513. }
  514. vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
  515. addr + 0x10, data, size);
  516. }
  517. static const MemoryRegionOps vfio_nvidia_3d0_quirk = {
  518. .read = vfio_nvidia_3d0_quirk_read,
  519. .write = vfio_nvidia_3d0_quirk_write,
  520. .endianness = DEVICE_LITTLE_ENDIAN,
  521. };
  522. static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev)
  523. {
  524. VFIOQuirk *quirk;
  525. VFIONvidia3d0Quirk *data;
  526. if (vdev->no_geforce_quirks ||
  527. !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
  528. !vdev->bars[1].region.size) {
  529. return;
  530. }
  531. quirk = vfio_quirk_alloc(2);
  532. quirk->data = data = g_malloc0(sizeof(*data));
  533. data->vdev = vdev;
  534. memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_nvidia_3d4_quirk,
  535. data, "vfio-nvidia-3d4-quirk", 2);
  536. memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
  537. 0x14 /* 0x3c0 + 0x14 */, &quirk->mem[0]);
  538. memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_nvidia_3d0_quirk,
  539. data, "vfio-nvidia-3d0-quirk", 2);
  540. memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
  541. 0x10 /* 0x3c0 + 0x10 */, &quirk->mem[1]);
  542. QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks,
  543. quirk, next);
  544. trace_vfio_quirk_nvidia_3d0_probe(vdev->vbasedev.name);
  545. }
  546. /*
  547. * The second quirk is documented in envytools. The I/O port BAR5 is just
  548. * a set of address/data ports to the MMIO BARs. The BAR we care about is
  549. * again BAR0. This backdoor is apparently a bit newer than the one above
  550. * so we need to not only trap 256 bytes @0x1800, but all of PCI config
  551. * space, including extended space is available at the 4k @0x88000.
  552. */
  553. typedef struct VFIONvidiaBAR5Quirk {
  554. uint32_t master;
  555. uint32_t enable;
  556. MemoryRegion *addr_mem;
  557. MemoryRegion *data_mem;
  558. bool enabled;
  559. VFIOConfigWindowQuirk window; /* last for match data */
  560. } VFIONvidiaBAR5Quirk;
  561. static void vfio_nvidia_bar5_enable(VFIONvidiaBAR5Quirk *bar5)
  562. {
  563. VFIOPCIDevice *vdev = bar5->window.vdev;
  564. if (((bar5->master & bar5->enable) & 0x1) == bar5->enabled) {
  565. return;
  566. }
  567. bar5->enabled = !bar5->enabled;
  568. trace_vfio_quirk_nvidia_bar5_state(vdev->vbasedev.name,
  569. bar5->enabled ? "Enable" : "Disable");
  570. memory_region_set_enabled(bar5->addr_mem, bar5->enabled);
  571. memory_region_set_enabled(bar5->data_mem, bar5->enabled);
  572. }
  573. static uint64_t vfio_nvidia_bar5_quirk_master_read(void *opaque,
  574. hwaddr addr, unsigned size)
  575. {
  576. VFIONvidiaBAR5Quirk *bar5 = opaque;
  577. VFIOPCIDevice *vdev = bar5->window.vdev;
  578. return vfio_region_read(&vdev->bars[5].region, addr, size);
  579. }
  580. static void vfio_nvidia_bar5_quirk_master_write(void *opaque, hwaddr addr,
  581. uint64_t data, unsigned size)
  582. {
  583. VFIONvidiaBAR5Quirk *bar5 = opaque;
  584. VFIOPCIDevice *vdev = bar5->window.vdev;
  585. vfio_region_write(&vdev->bars[5].region, addr, data, size);
  586. bar5->master = data;
  587. vfio_nvidia_bar5_enable(bar5);
  588. }
  589. static const MemoryRegionOps vfio_nvidia_bar5_quirk_master = {
  590. .read = vfio_nvidia_bar5_quirk_master_read,
  591. .write = vfio_nvidia_bar5_quirk_master_write,
  592. .endianness = DEVICE_LITTLE_ENDIAN,
  593. };
  594. static uint64_t vfio_nvidia_bar5_quirk_enable_read(void *opaque,
  595. hwaddr addr, unsigned size)
  596. {
  597. VFIONvidiaBAR5Quirk *bar5 = opaque;
  598. VFIOPCIDevice *vdev = bar5->window.vdev;
  599. return vfio_region_read(&vdev->bars[5].region, addr + 4, size);
  600. }
  601. static void vfio_nvidia_bar5_quirk_enable_write(void *opaque, hwaddr addr,
  602. uint64_t data, unsigned size)
  603. {
  604. VFIONvidiaBAR5Quirk *bar5 = opaque;
  605. VFIOPCIDevice *vdev = bar5->window.vdev;
  606. vfio_region_write(&vdev->bars[5].region, addr + 4, data, size);
  607. bar5->enable = data;
  608. vfio_nvidia_bar5_enable(bar5);
  609. }
  610. static const MemoryRegionOps vfio_nvidia_bar5_quirk_enable = {
  611. .read = vfio_nvidia_bar5_quirk_enable_read,
  612. .write = vfio_nvidia_bar5_quirk_enable_write,
  613. .endianness = DEVICE_LITTLE_ENDIAN,
  614. };
  615. static void vfio_probe_nvidia_bar5_quirk(VFIOPCIDevice *vdev, int nr)
  616. {
  617. VFIOQuirk *quirk;
  618. VFIONvidiaBAR5Quirk *bar5;
  619. VFIOConfigWindowQuirk *window;
  620. if (vdev->no_geforce_quirks ||
  621. !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
  622. !vdev->vga || nr != 5 || !vdev->bars[5].ioport) {
  623. return;
  624. }
  625. quirk = vfio_quirk_alloc(4);
  626. bar5 = quirk->data = g_malloc0(sizeof(*bar5) +
  627. (sizeof(VFIOConfigWindowMatch) * 2));
  628. window = &bar5->window;
  629. window->vdev = vdev;
  630. window->address_offset = 0x8;
  631. window->data_offset = 0xc;
  632. window->nr_matches = 2;
  633. window->matches[0].match = 0x1800;
  634. window->matches[0].mask = PCI_CONFIG_SPACE_SIZE - 1;
  635. window->matches[1].match = 0x88000;
  636. window->matches[1].mask = vdev->config_size - 1;
  637. window->bar = nr;
  638. window->addr_mem = bar5->addr_mem = &quirk->mem[0];
  639. window->data_mem = bar5->data_mem = &quirk->mem[1];
  640. memory_region_init_io(window->addr_mem, OBJECT(vdev),
  641. &vfio_generic_window_address_quirk, window,
  642. "vfio-nvidia-bar5-window-address-quirk", 4);
  643. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  644. window->address_offset,
  645. window->addr_mem, 1);
  646. memory_region_set_enabled(window->addr_mem, false);
  647. memory_region_init_io(window->data_mem, OBJECT(vdev),
  648. &vfio_generic_window_data_quirk, window,
  649. "vfio-nvidia-bar5-window-data-quirk", 4);
  650. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  651. window->data_offset,
  652. window->data_mem, 1);
  653. memory_region_set_enabled(window->data_mem, false);
  654. memory_region_init_io(&quirk->mem[2], OBJECT(vdev),
  655. &vfio_nvidia_bar5_quirk_master, bar5,
  656. "vfio-nvidia-bar5-master-quirk", 4);
  657. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  658. 0, &quirk->mem[2], 1);
  659. memory_region_init_io(&quirk->mem[3], OBJECT(vdev),
  660. &vfio_nvidia_bar5_quirk_enable, bar5,
  661. "vfio-nvidia-bar5-enable-quirk", 4);
  662. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  663. 4, &quirk->mem[3], 1);
  664. QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
  665. trace_vfio_quirk_nvidia_bar5_probe(vdev->vbasedev.name);
  666. }
  667. typedef struct LastDataSet {
  668. VFIOQuirk *quirk;
  669. hwaddr addr;
  670. uint64_t data;
  671. unsigned size;
  672. int hits;
  673. int added;
  674. } LastDataSet;
  675. #define MAX_DYN_IOEVENTFD 10
  676. #define HITS_FOR_IOEVENTFD 10
  677. /*
  678. * Finally, BAR0 itself. We want to redirect any accesses to either
  679. * 0x1800 or 0x88000 through the PCI config space access functions.
  680. */
  681. static void vfio_nvidia_quirk_mirror_write(void *opaque, hwaddr addr,
  682. uint64_t data, unsigned size)
  683. {
  684. VFIOConfigMirrorQuirk *mirror = opaque;
  685. VFIOPCIDevice *vdev = mirror->vdev;
  686. PCIDevice *pdev = &vdev->pdev;
  687. LastDataSet *last = (LastDataSet *)&mirror->data;
  688. vfio_generic_quirk_mirror_write(opaque, addr, data, size);
  689. /*
  690. * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
  691. * MSI capability ID register. Both the ID and next register are
  692. * read-only, so we allow writes covering either of those to real hw.
  693. */
  694. if ((pdev->cap_present & QEMU_PCI_CAP_MSI) &&
  695. vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) {
  696. vfio_region_write(&vdev->bars[mirror->bar].region,
  697. addr + mirror->offset, data, size);
  698. trace_vfio_quirk_nvidia_bar0_msi_ack(vdev->vbasedev.name);
  699. }
  700. /*
  701. * Automatically add an ioeventfd to handle any repeated write with the
  702. * same data and size above the standard PCI config space header. This is
  703. * primarily expected to accelerate the MSI-ACK behavior, such as noted
  704. * above. Current hardware/drivers should trigger an ioeventfd at config
  705. * offset 0x704 (region offset 0x88704), with data 0x0, size 4.
  706. *
  707. * The criteria of 10 successive hits is arbitrary but reliably adds the
  708. * MSI-ACK region. Note that as some writes are bypassed via the ioeventfd,
  709. * the remaining ones have a greater chance of being seen successively.
  710. * To avoid the pathological case of burning up all of QEMU's open file
  711. * handles, arbitrarily limit this algorithm from adding no more than 10
  712. * ioeventfds, print an error if we would have added an 11th, and then
  713. * stop counting.
  714. */
  715. if (!vdev->no_kvm_ioeventfd &&
  716. addr >= PCI_STD_HEADER_SIZEOF && last->added <= MAX_DYN_IOEVENTFD) {
  717. if (addr != last->addr || data != last->data || size != last->size) {
  718. last->addr = addr;
  719. last->data = data;
  720. last->size = size;
  721. last->hits = 1;
  722. } else if (++last->hits >= HITS_FOR_IOEVENTFD) {
  723. if (last->added < MAX_DYN_IOEVENTFD) {
  724. VFIOIOEventFD *ioeventfd;
  725. ioeventfd = vfio_ioeventfd_init(vdev, mirror->mem, addr, size,
  726. data, &vdev->bars[mirror->bar].region,
  727. mirror->offset + addr, true);
  728. if (ioeventfd) {
  729. VFIOQuirk *quirk = last->quirk;
  730. QLIST_INSERT_HEAD(&quirk->ioeventfds, ioeventfd, next);
  731. last->added++;
  732. }
  733. } else {
  734. last->added++;
  735. warn_report("NVIDIA ioeventfd queue full for %s, unable to "
  736. "accelerate 0x%"HWADDR_PRIx", data 0x%"PRIx64", "
  737. "size %u", vdev->vbasedev.name, addr, data, size);
  738. }
  739. }
  740. }
  741. }
  742. static const MemoryRegionOps vfio_nvidia_mirror_quirk = {
  743. .read = vfio_generic_quirk_mirror_read,
  744. .write = vfio_nvidia_quirk_mirror_write,
  745. .endianness = DEVICE_LITTLE_ENDIAN,
  746. };
  747. static void vfio_nvidia_bar0_quirk_reset(VFIOPCIDevice *vdev, VFIOQuirk *quirk)
  748. {
  749. VFIOConfigMirrorQuirk *mirror = quirk->data;
  750. LastDataSet *last = (LastDataSet *)&mirror->data;
  751. last->addr = last->data = last->size = last->hits = last->added = 0;
  752. vfio_drop_dynamic_eventfds(vdev, quirk);
  753. }
  754. static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr)
  755. {
  756. VFIOQuirk *quirk;
  757. VFIOConfigMirrorQuirk *mirror;
  758. LastDataSet *last;
  759. if (vdev->no_geforce_quirks ||
  760. !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
  761. !vfio_is_vga(vdev) || nr != 0) {
  762. return;
  763. }
  764. quirk = vfio_quirk_alloc(1);
  765. quirk->reset = vfio_nvidia_bar0_quirk_reset;
  766. mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet));
  767. mirror->mem = quirk->mem;
  768. mirror->vdev = vdev;
  769. mirror->offset = 0x88000;
  770. mirror->bar = nr;
  771. last = (LastDataSet *)&mirror->data;
  772. last->quirk = quirk;
  773. memory_region_init_io(mirror->mem, OBJECT(vdev),
  774. &vfio_nvidia_mirror_quirk, mirror,
  775. "vfio-nvidia-bar0-88000-mirror-quirk",
  776. vdev->config_size);
  777. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  778. mirror->offset, mirror->mem, 1);
  779. QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
  780. /* The 0x1800 offset mirror only seems to get used by legacy VGA */
  781. if (vdev->vga) {
  782. quirk = vfio_quirk_alloc(1);
  783. quirk->reset = vfio_nvidia_bar0_quirk_reset;
  784. mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet));
  785. mirror->mem = quirk->mem;
  786. mirror->vdev = vdev;
  787. mirror->offset = 0x1800;
  788. mirror->bar = nr;
  789. last = (LastDataSet *)&mirror->data;
  790. last->quirk = quirk;
  791. memory_region_init_io(mirror->mem, OBJECT(vdev),
  792. &vfio_nvidia_mirror_quirk, mirror,
  793. "vfio-nvidia-bar0-1800-mirror-quirk",
  794. PCI_CONFIG_SPACE_SIZE);
  795. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  796. mirror->offset, mirror->mem, 1);
  797. QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
  798. }
  799. trace_vfio_quirk_nvidia_bar0_probe(vdev->vbasedev.name);
  800. }
  801. /*
  802. * TODO - Some Nvidia devices provide config access to their companion HDA
  803. * device and even to their parent bridge via these config space mirrors.
  804. * Add quirks for those regions.
  805. */
  806. #define PCI_VENDOR_ID_REALTEK 0x10ec
  807. /*
  808. * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2
  809. * offset 0x70 there is a dword data register, offset 0x74 is a dword address
  810. * register. According to the Linux r8169 driver, the MSI-X table is addressed
  811. * when the "type" portion of the address register is set to 0x1. This appears
  812. * to be bits 16:30. Bit 31 is both a write indicator and some sort of
  813. * "address latched" indicator. Bits 12:15 are a mask field, which we can
  814. * ignore because the MSI-X table should always be accessed as a dword (full
  815. * mask). Bits 0:11 is offset within the type.
  816. *
  817. * Example trace:
  818. *
  819. * Read from MSI-X table offset 0
  820. * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr
  821. * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch
  822. * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data
  823. *
  824. * Write 0xfee00000 to MSI-X table offset 0
  825. * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data
  826. * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write
  827. * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete
  828. */
  829. typedef struct VFIOrtl8168Quirk {
  830. VFIOPCIDevice *vdev;
  831. uint32_t addr;
  832. uint32_t data;
  833. bool enabled;
  834. } VFIOrtl8168Quirk;
  835. static uint64_t vfio_rtl8168_quirk_address_read(void *opaque,
  836. hwaddr addr, unsigned size)
  837. {
  838. VFIOrtl8168Quirk *rtl = opaque;
  839. VFIOPCIDevice *vdev = rtl->vdev;
  840. uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x74, size);
  841. if (rtl->enabled) {
  842. data = rtl->addr ^ 0x80000000U; /* latch/complete */
  843. trace_vfio_quirk_rtl8168_fake_latch(vdev->vbasedev.name, data);
  844. }
  845. return data;
  846. }
  847. static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr,
  848. uint64_t data, unsigned size)
  849. {
  850. VFIOrtl8168Quirk *rtl = opaque;
  851. VFIOPCIDevice *vdev = rtl->vdev;
  852. rtl->enabled = false;
  853. if ((data & 0x7fff0000) == 0x10000) { /* MSI-X table */
  854. rtl->enabled = true;
  855. rtl->addr = (uint32_t)data;
  856. if (data & 0x80000000U) { /* Do write */
  857. if (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) {
  858. hwaddr offset = data & 0xfff;
  859. uint64_t val = rtl->data;
  860. trace_vfio_quirk_rtl8168_msix_write(vdev->vbasedev.name,
  861. (uint16_t)offset, val);
  862. /* Write to the proper guest MSI-X table instead */
  863. memory_region_dispatch_write(&vdev->pdev.msix_table_mmio,
  864. offset, val,
  865. size_memop(size) | MO_LE,
  866. MEMTXATTRS_UNSPECIFIED);
  867. }
  868. return; /* Do not write guest MSI-X data to hardware */
  869. }
  870. }
  871. vfio_region_write(&vdev->bars[2].region, addr + 0x74, data, size);
  872. }
  873. static const MemoryRegionOps vfio_rtl_address_quirk = {
  874. .read = vfio_rtl8168_quirk_address_read,
  875. .write = vfio_rtl8168_quirk_address_write,
  876. .valid = {
  877. .min_access_size = 4,
  878. .max_access_size = 4,
  879. .unaligned = false,
  880. },
  881. .endianness = DEVICE_LITTLE_ENDIAN,
  882. };
  883. static uint64_t vfio_rtl8168_quirk_data_read(void *opaque,
  884. hwaddr addr, unsigned size)
  885. {
  886. VFIOrtl8168Quirk *rtl = opaque;
  887. VFIOPCIDevice *vdev = rtl->vdev;
  888. uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x70, size);
  889. if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
  890. hwaddr offset = rtl->addr & 0xfff;
  891. memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset,
  892. &data, size_memop(size) | MO_LE,
  893. MEMTXATTRS_UNSPECIFIED);
  894. trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data);
  895. }
  896. return data;
  897. }
  898. static void vfio_rtl8168_quirk_data_write(void *opaque, hwaddr addr,
  899. uint64_t data, unsigned size)
  900. {
  901. VFIOrtl8168Quirk *rtl = opaque;
  902. VFIOPCIDevice *vdev = rtl->vdev;
  903. rtl->data = (uint32_t)data;
  904. vfio_region_write(&vdev->bars[2].region, addr + 0x70, data, size);
  905. }
  906. static const MemoryRegionOps vfio_rtl_data_quirk = {
  907. .read = vfio_rtl8168_quirk_data_read,
  908. .write = vfio_rtl8168_quirk_data_write,
  909. .valid = {
  910. .min_access_size = 4,
  911. .max_access_size = 4,
  912. .unaligned = false,
  913. },
  914. .endianness = DEVICE_LITTLE_ENDIAN,
  915. };
  916. static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr)
  917. {
  918. VFIOQuirk *quirk;
  919. VFIOrtl8168Quirk *rtl;
  920. if (!vfio_pci_is(vdev, PCI_VENDOR_ID_REALTEK, 0x8168) || nr != 2) {
  921. return;
  922. }
  923. quirk = vfio_quirk_alloc(2);
  924. quirk->data = rtl = g_malloc0(sizeof(*rtl));
  925. rtl->vdev = vdev;
  926. memory_region_init_io(&quirk->mem[0], OBJECT(vdev),
  927. &vfio_rtl_address_quirk, rtl,
  928. "vfio-rtl8168-window-address-quirk", 4);
  929. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  930. 0x74, &quirk->mem[0], 1);
  931. memory_region_init_io(&quirk->mem[1], OBJECT(vdev),
  932. &vfio_rtl_data_quirk, rtl,
  933. "vfio-rtl8168-window-data-quirk", 4);
  934. memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
  935. 0x70, &quirk->mem[1], 1);
  936. QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
  937. trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name);
  938. }
  939. /*
  940. * Common quirk probe entry points.
  941. */
  942. bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp)
  943. {
  944. #ifdef CONFIG_VFIO_IGD
  945. if (!vfio_probe_igd_config_quirk(vdev, errp)) {
  946. return false;
  947. }
  948. #endif
  949. return true;
  950. }
  951. void vfio_vga_quirk_setup(VFIOPCIDevice *vdev)
  952. {
  953. vfio_vga_probe_ati_3c3_quirk(vdev);
  954. vfio_vga_probe_nvidia_3d0_quirk(vdev);
  955. }
  956. void vfio_vga_quirk_exit(VFIOPCIDevice *vdev)
  957. {
  958. VFIOQuirk *quirk;
  959. int i, j;
  960. for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
  961. QLIST_FOREACH(quirk, &vdev->vga->region[i].quirks, next) {
  962. for (j = 0; j < quirk->nr_mem; j++) {
  963. memory_region_del_subregion(&vdev->vga->region[i].mem,
  964. &quirk->mem[j]);
  965. }
  966. }
  967. }
  968. }
  969. void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev)
  970. {
  971. int i, j;
  972. for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
  973. while (!QLIST_EMPTY(&vdev->vga->region[i].quirks)) {
  974. VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga->region[i].quirks);
  975. QLIST_REMOVE(quirk, next);
  976. for (j = 0; j < quirk->nr_mem; j++) {
  977. object_unparent(OBJECT(&quirk->mem[j]));
  978. }
  979. g_free(quirk->mem);
  980. g_free(quirk->data);
  981. g_free(quirk);
  982. }
  983. }
  984. }
  985. void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr)
  986. {
  987. vfio_probe_ati_bar4_quirk(vdev, nr);
  988. vfio_probe_ati_bar2_quirk(vdev, nr);
  989. vfio_probe_nvidia_bar5_quirk(vdev, nr);
  990. vfio_probe_nvidia_bar0_quirk(vdev, nr);
  991. vfio_probe_rtl8168_bar2_quirk(vdev, nr);
  992. #ifdef CONFIG_VFIO_IGD
  993. vfio_probe_igd_bar0_quirk(vdev, nr);
  994. #endif
  995. }
  996. void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr)
  997. {
  998. VFIOBAR *bar = &vdev->bars[nr];
  999. VFIOQuirk *quirk;
  1000. int i;
  1001. QLIST_FOREACH(quirk, &bar->quirks, next) {
  1002. while (!QLIST_EMPTY(&quirk->ioeventfds)) {
  1003. vfio_ioeventfd_exit(vdev, QLIST_FIRST(&quirk->ioeventfds));
  1004. }
  1005. for (i = 0; i < quirk->nr_mem; i++) {
  1006. memory_region_del_subregion(bar->region.mem, &quirk->mem[i]);
  1007. }
  1008. }
  1009. }
  1010. void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr)
  1011. {
  1012. VFIOBAR *bar = &vdev->bars[nr];
  1013. int i;
  1014. while (!QLIST_EMPTY(&bar->quirks)) {
  1015. VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks);
  1016. QLIST_REMOVE(quirk, next);
  1017. for (i = 0; i < quirk->nr_mem; i++) {
  1018. object_unparent(OBJECT(&quirk->mem[i]));
  1019. }
  1020. g_free(quirk->mem);
  1021. g_free(quirk->data);
  1022. g_free(quirk);
  1023. }
  1024. }
  1025. /*
  1026. * Reset quirks
  1027. */
  1028. void vfio_quirk_reset(VFIOPCIDevice *vdev)
  1029. {
  1030. int i;
  1031. for (i = 0; i < PCI_ROM_SLOT; i++) {
  1032. VFIOQuirk *quirk;
  1033. VFIOBAR *bar = &vdev->bars[i];
  1034. QLIST_FOREACH(quirk, &bar->quirks, next) {
  1035. if (quirk->reset) {
  1036. quirk->reset(vdev, quirk);
  1037. }
  1038. }
  1039. }
  1040. }
  1041. /*
  1042. * AMD Radeon PCI config reset, based on Linux:
  1043. * drivers/gpu/drm/radeon/ci_smc.c:ci_is_smc_running()
  1044. * drivers/gpu/drm/radeon/radeon_device.c:radeon_pci_config_reset
  1045. * drivers/gpu/drm/radeon/ci_smc.c:ci_reset_smc()
  1046. * drivers/gpu/drm/radeon/ci_smc.c:ci_stop_smc_clock()
  1047. * IDs: include/drm/drm_pciids.h
  1048. * Registers: http://cgit.freedesktop.org/~agd5f/linux/commit/?id=4e2aa447f6f0
  1049. *
  1050. * Bonaire and Hawaii GPUs do not respond to a bus reset. This is a bug in the
  1051. * hardware that should be fixed on future ASICs. The symptom of this is that
  1052. * once the accerlated driver loads, Windows guests will bsod on subsequent
  1053. * attmpts to load the driver, such as after VM reset or shutdown/restart. To
  1054. * work around this, we do an AMD specific PCI config reset, followed by an SMC
  1055. * reset. The PCI config reset only works if SMC firmware is running, so we
  1056. * have a dependency on the state of the device as to whether this reset will
  1057. * be effective. There are still cases where we won't be able to kick the
  1058. * device into working, but this greatly improves the usability overall. The
  1059. * config reset magic is relatively common on AMD GPUs, but the setup and SMC
  1060. * poking is largely ASIC specific.
  1061. */
  1062. static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev)
  1063. {
  1064. uint32_t clk, pc_c;
  1065. /*
  1066. * Registers 200h and 204h are index and data registers for accessing
  1067. * indirect configuration registers within the device.
  1068. */
  1069. vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
  1070. clk = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
  1071. vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000370, 4);
  1072. pc_c = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
  1073. return (!(clk & 1) && (0x20100 <= pc_c));
  1074. }
  1075. /*
  1076. * The scope of a config reset is controlled by a mode bit in the misc register
  1077. * and a fuse, exposed as a bit in another register. The fuse is the default
  1078. * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the formula
  1079. * scope = !(misc ^ fuse), where the resulting scope is defined the same as
  1080. * the fuse. A truth table therefore tells us that if misc == fuse, we need
  1081. * to flip the value of the bit in the misc register.
  1082. */
  1083. static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev)
  1084. {
  1085. uint32_t misc, fuse;
  1086. bool a, b;
  1087. vfio_region_write(&vdev->bars[5].region, 0x200, 0xc00c0000, 4);
  1088. fuse = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
  1089. b = fuse & 64;
  1090. vfio_region_write(&vdev->bars[5].region, 0x200, 0xc0000010, 4);
  1091. misc = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
  1092. a = misc & 2;
  1093. if (a == b) {
  1094. vfio_region_write(&vdev->bars[5].region, 0x204, misc ^ 2, 4);
  1095. vfio_region_read(&vdev->bars[5].region, 0x204, 4); /* flush */
  1096. }
  1097. }
  1098. static int vfio_radeon_reset(VFIOPCIDevice *vdev)
  1099. {
  1100. PCIDevice *pdev = &vdev->pdev;
  1101. int i, ret = 0;
  1102. uint32_t data;
  1103. /* Defer to a kernel implemented reset */
  1104. if (vdev->vbasedev.reset_works) {
  1105. trace_vfio_quirk_ati_bonaire_reset_skipped(vdev->vbasedev.name);
  1106. return -ENODEV;
  1107. }
  1108. /* Enable only memory BAR access */
  1109. vfio_pci_write_config(pdev, PCI_COMMAND, PCI_COMMAND_MEMORY, 2);
  1110. /* Reset only works if SMC firmware is loaded and running */
  1111. if (!vfio_radeon_smc_is_running(vdev)) {
  1112. ret = -EINVAL;
  1113. trace_vfio_quirk_ati_bonaire_reset_no_smc(vdev->vbasedev.name);
  1114. goto out;
  1115. }
  1116. /* Make sure only the GFX function is reset */
  1117. vfio_radeon_set_gfx_only_reset(vdev);
  1118. /* AMD PCI config reset */
  1119. vfio_pci_write_config(pdev, 0x7c, 0x39d5e86b, 4);
  1120. usleep(100);
  1121. /* Read back the memory size to make sure we're out of reset */
  1122. for (i = 0; i < 100000; i++) {
  1123. if (vfio_region_read(&vdev->bars[5].region, 0x5428, 4) != 0xffffffff) {
  1124. goto reset_smc;
  1125. }
  1126. usleep(1);
  1127. }
  1128. trace_vfio_quirk_ati_bonaire_reset_timeout(vdev->vbasedev.name);
  1129. reset_smc:
  1130. /* Reset SMC */
  1131. vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000000, 4);
  1132. data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
  1133. data |= 1;
  1134. vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
  1135. /* Disable SMC clock */
  1136. vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
  1137. data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
  1138. data |= 1;
  1139. vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
  1140. trace_vfio_quirk_ati_bonaire_reset_done(vdev->vbasedev.name);
  1141. out:
  1142. /* Restore PCI command register */
  1143. vfio_pci_write_config(pdev, PCI_COMMAND, 0, 2);
  1144. return ret;
  1145. }
  1146. void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev)
  1147. {
  1148. switch (vdev->vendor_id) {
  1149. case 0x1002:
  1150. switch (vdev->device_id) {
  1151. /* Bonaire */
  1152. case 0x6649: /* Bonaire [FirePro W5100] */
  1153. case 0x6650:
  1154. case 0x6651:
  1155. case 0x6658: /* Bonaire XTX [Radeon R7 260X] */
  1156. case 0x665c: /* Bonaire XT [Radeon HD 7790/8770 / R9 260 OEM] */
  1157. case 0x665d: /* Bonaire [Radeon R7 200 Series] */
  1158. /* Hawaii */
  1159. case 0x67A0: /* Hawaii XT GL [FirePro W9100] */
  1160. case 0x67A1: /* Hawaii PRO GL [FirePro W8100] */
  1161. case 0x67A2:
  1162. case 0x67A8:
  1163. case 0x67A9:
  1164. case 0x67AA:
  1165. case 0x67B0: /* Hawaii XT [Radeon R9 290X] */
  1166. case 0x67B1: /* Hawaii PRO [Radeon R9 290] */
  1167. case 0x67B8:
  1168. case 0x67B9:
  1169. case 0x67BA:
  1170. case 0x67BE:
  1171. vdev->resetfn = vfio_radeon_reset;
  1172. trace_vfio_quirk_ati_bonaire_reset(vdev->vbasedev.name);
  1173. break;
  1174. }
  1175. break;
  1176. }
  1177. }
  1178. /*
  1179. * The NVIDIA GPUDirect P2P Vendor capability allows the user to specify
  1180. * devices as a member of a clique. Devices within the same clique ID
  1181. * are capable of direct P2P. It's the user's responsibility that this
  1182. * is correct. The spec says that this may reside at any unused config
  1183. * offset, but reserves and recommends hypervisors place this at C8h.
  1184. * The spec also states that the hypervisor should place this capability
  1185. * at the end of the capability list, thus next is defined as 0h.
  1186. *
  1187. * +----------------+----------------+----------------+----------------+
  1188. * | sig 7:0 ('P') | vndr len (8h) | next (0h) | cap id (9h) |
  1189. * +----------------+----------------+----------------+----------------+
  1190. * | rsvd 15:7(0h),id 6:3,ver 2:0(0h)| sig 23:8 ('P2') |
  1191. * +---------------------------------+---------------------------------+
  1192. *
  1193. * https://lists.gnu.org/archive/html/qemu-devel/2017-08/pdfUda5iEpgOS.pdf
  1194. *
  1195. * Specification for Turning and later GPU architectures:
  1196. * https://lists.gnu.org/archive/html/qemu-devel/2023-06/pdf142OR4O4c2.pdf
  1197. */
  1198. static void get_nv_gpudirect_clique_id(Object *obj, Visitor *v,
  1199. const char *name, void *opaque,
  1200. Error **errp)
  1201. {
  1202. const Property *prop = opaque;
  1203. uint8_t *ptr = object_field_prop_ptr(obj, prop);
  1204. visit_type_uint8(v, name, ptr, errp);
  1205. }
  1206. static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v,
  1207. const char *name, void *opaque,
  1208. Error **errp)
  1209. {
  1210. const Property *prop = opaque;
  1211. uint8_t value, *ptr = object_field_prop_ptr(obj, prop);
  1212. if (!visit_type_uint8(v, name, &value, errp)) {
  1213. return;
  1214. }
  1215. if (value & ~0xF) {
  1216. error_setg(errp, "Property %s: valid range 0-15", name);
  1217. return;
  1218. }
  1219. *ptr = value;
  1220. }
  1221. const PropertyInfo qdev_prop_nv_gpudirect_clique = {
  1222. .type = "uint8",
  1223. .description = "NVIDIA GPUDirect Clique ID (0 - 15)",
  1224. .get = get_nv_gpudirect_clique_id,
  1225. .set = set_nv_gpudirect_clique_id,
  1226. };
  1227. static bool is_valid_std_cap_offset(uint8_t pos)
  1228. {
  1229. return (pos >= PCI_STD_HEADER_SIZEOF &&
  1230. pos <= (PCI_CFG_SPACE_SIZE - PCI_CAP_SIZEOF));
  1231. }
  1232. static bool vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
  1233. {
  1234. ERRP_GUARD();
  1235. PCIDevice *pdev = &vdev->pdev;
  1236. int ret, pos;
  1237. bool c8_conflict = false, d4_conflict = false;
  1238. uint8_t tmp;
  1239. if (vdev->nv_gpudirect_clique == 0xFF) {
  1240. return true;
  1241. }
  1242. if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) {
  1243. error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid device vendor");
  1244. return false;
  1245. }
  1246. if (pci_get_byte(pdev->config + PCI_CLASS_DEVICE + 1) !=
  1247. PCI_BASE_CLASS_DISPLAY) {
  1248. error_setg(errp, "NVIDIA GPUDirect Clique ID: unsupported PCI class");
  1249. return false;
  1250. }
  1251. /*
  1252. * Per the updated specification above, it's recommended to use offset
  1253. * D4h for Turing and later GPU architectures due to a conflict of the
  1254. * MSI-X capability at C8h. We don't know how to determine the GPU
  1255. * architecture, instead we walk the capability chain to mark conflicts
  1256. * and choose one or error based on the result.
  1257. *
  1258. * NB. Cap list head in pdev->config is already cleared, read from device.
  1259. */
  1260. ret = pread(vdev->vbasedev.fd, &tmp, 1,
  1261. vdev->config_offset + PCI_CAPABILITY_LIST);
  1262. if (ret != 1 || !is_valid_std_cap_offset(tmp)) {
  1263. error_setg(errp, "NVIDIA GPUDirect Clique ID: error getting cap list");
  1264. return false;
  1265. }
  1266. do {
  1267. if (tmp == 0xC8) {
  1268. c8_conflict = true;
  1269. } else if (tmp == 0xD4) {
  1270. d4_conflict = true;
  1271. }
  1272. tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT];
  1273. } while (is_valid_std_cap_offset(tmp));
  1274. if (!c8_conflict) {
  1275. pos = 0xC8;
  1276. } else if (!d4_conflict) {
  1277. pos = 0xD4;
  1278. } else {
  1279. error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid config space");
  1280. return false;
  1281. }
  1282. ret = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, 8, errp);
  1283. if (ret < 0) {
  1284. error_prepend(errp, "Failed to add NVIDIA GPUDirect cap: ");
  1285. return false;
  1286. }
  1287. memset(vdev->emulated_config_bits + pos, 0xFF, 8);
  1288. pos += PCI_CAP_FLAGS;
  1289. pci_set_byte(pdev->config + pos++, 8);
  1290. pci_set_byte(pdev->config + pos++, 'P');
  1291. pci_set_byte(pdev->config + pos++, '2');
  1292. pci_set_byte(pdev->config + pos++, 'P');
  1293. pci_set_byte(pdev->config + pos++, vdev->nv_gpudirect_clique << 3);
  1294. pci_set_byte(pdev->config + pos, 0);
  1295. return true;
  1296. }
  1297. /*
  1298. * The VMD endpoint provides a real PCIe domain to the guest and the guest
  1299. * kernel performs enumeration of the VMD sub-device domain. Guest transactions
  1300. * to VMD sub-devices go through MMU translation from guest addresses to
  1301. * physical addresses. When MMIO goes to an endpoint after being translated to
  1302. * physical addresses, the bridge rejects the transaction because the window
  1303. * has been programmed with guest addresses.
  1304. *
  1305. * VMD can use the Host Physical Address in order to correctly program the
  1306. * bridge windows in its PCIe domain. VMD device 28C0 has HPA shadow registers
  1307. * located at offset 0x2000 in MEMBAR2 (BAR 4). This quirk provides the HPA
  1308. * shadow registers in a vendor-specific capability register for devices
  1309. * without native support. The position of 0xE8-0xFF is in the reserved range
  1310. * of the VMD device capability space following the Power Management
  1311. * Capability.
  1312. */
  1313. #define VMD_SHADOW_CAP_VER 1
  1314. #define VMD_SHADOW_CAP_LEN 24
  1315. static bool vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
  1316. {
  1317. ERRP_GUARD();
  1318. uint8_t membar_phys[16];
  1319. int ret, pos = 0xE8;
  1320. if (!(vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x201D) ||
  1321. vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x467F) ||
  1322. vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x4C3D) ||
  1323. vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x9A0B))) {
  1324. return true;
  1325. }
  1326. ret = pread(vdev->vbasedev.fd, membar_phys, 16,
  1327. vdev->config_offset + PCI_BASE_ADDRESS_2);
  1328. if (ret != 16) {
  1329. error_report("VMD %s cannot read MEMBARs (%d)",
  1330. vdev->vbasedev.name, ret);
  1331. return false;
  1332. }
  1333. ret = pci_add_capability(&vdev->pdev, PCI_CAP_ID_VNDR, pos,
  1334. VMD_SHADOW_CAP_LEN, errp);
  1335. if (ret < 0) {
  1336. error_prepend(errp, "Failed to add VMD MEMBAR Shadow cap: ");
  1337. return false;
  1338. }
  1339. memset(vdev->emulated_config_bits + pos, 0xFF, VMD_SHADOW_CAP_LEN);
  1340. pos += PCI_CAP_FLAGS;
  1341. pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_LEN);
  1342. pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_VER);
  1343. pci_set_long(vdev->pdev.config + pos, 0x53484457); /* SHDW */
  1344. memcpy(vdev->pdev.config + pos + 4, membar_phys, 16);
  1345. return true;
  1346. }
  1347. bool vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
  1348. {
  1349. if (!vfio_add_nv_gpudirect_cap(vdev, errp)) {
  1350. return false;
  1351. }
  1352. if (!vfio_add_vmd_shadow_cap(vdev, errp)) {
  1353. return false;
  1354. }
  1355. return true;
  1356. }