2
0

platform.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. /*
  2. * vfio based device assignment support - platform devices
  3. *
  4. * Copyright Linaro Limited, 2014
  5. *
  6. * Authors:
  7. * Kim Phillips <kim.phillips@linaro.org>
  8. * Eric Auger <eric.auger@linaro.org>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2. See
  11. * the COPYING file in the top-level directory.
  12. *
  13. * Based on vfio based PCI device assignment support:
  14. * Copyright Red Hat, Inc. 2012
  15. */
  16. #include "qemu/osdep.h"
  17. #include "qapi/error.h"
  18. #include <sys/ioctl.h>
  19. #include <linux/vfio.h>
  20. #include "hw/vfio/vfio-platform.h"
  21. #include "migration/vmstate.h"
  22. #include "qemu/error-report.h"
  23. #include "qemu/main-loop.h"
  24. #include "qemu/module.h"
  25. #include "qemu/range.h"
  26. #include "exec/memory.h"
  27. #include "exec/address-spaces.h"
  28. #include "qemu/queue.h"
  29. #include "hw/sysbus.h"
  30. #include "trace.h"
  31. #include "hw/irq.h"
  32. #include "hw/platform-bus.h"
  33. #include "hw/qdev-properties.h"
  34. #include "sysemu/kvm.h"
  35. /*
  36. * Functions used whatever the injection method
  37. */
  38. static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
  39. {
  40. return intp->flags & VFIO_IRQ_INFO_AUTOMASKED;
  41. }
  42. /**
  43. * vfio_init_intp - allocate, initialize the IRQ struct pointer
  44. * and add it into the list of IRQs
  45. * @vbasedev: the VFIO device handle
  46. * @info: irq info struct retrieved from VFIO driver
  47. * @errp: error object
  48. */
  49. static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
  50. struct vfio_irq_info info, Error **errp)
  51. {
  52. int ret;
  53. VFIOPlatformDevice *vdev =
  54. container_of(vbasedev, VFIOPlatformDevice, vbasedev);
  55. SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
  56. VFIOINTp *intp;
  57. intp = g_malloc0(sizeof(*intp));
  58. intp->vdev = vdev;
  59. intp->pin = info.index;
  60. intp->flags = info.flags;
  61. intp->state = VFIO_IRQ_INACTIVE;
  62. intp->kvm_accel = false;
  63. sysbus_init_irq(sbdev, &intp->qemuirq);
  64. /* Get an eventfd for trigger */
  65. intp->interrupt = g_malloc0(sizeof(EventNotifier));
  66. ret = event_notifier_init(intp->interrupt, 0);
  67. if (ret) {
  68. g_free(intp->interrupt);
  69. g_free(intp);
  70. error_setg_errno(errp, -ret,
  71. "failed to initialize trigger eventfd notifier");
  72. return NULL;
  73. }
  74. if (vfio_irq_is_automasked(intp)) {
  75. /* Get an eventfd for resample/unmask */
  76. intp->unmask = g_malloc0(sizeof(EventNotifier));
  77. ret = event_notifier_init(intp->unmask, 0);
  78. if (ret) {
  79. g_free(intp->interrupt);
  80. g_free(intp->unmask);
  81. g_free(intp);
  82. error_setg_errno(errp, -ret,
  83. "failed to initialize resample eventfd notifier");
  84. return NULL;
  85. }
  86. }
  87. QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
  88. return intp;
  89. }
  90. /**
  91. * vfio_set_trigger_eventfd - set VFIO eventfd handling
  92. *
  93. * @intp: IRQ struct handle
  94. * @handler: handler to be called on eventfd signaling
  95. *
  96. * Setup VFIO signaling and attach an optional user-side handler
  97. * to the eventfd
  98. */
  99. static int vfio_set_trigger_eventfd(VFIOINTp *intp,
  100. eventfd_user_side_handler_t handler)
  101. {
  102. VFIODevice *vbasedev = &intp->vdev->vbasedev;
  103. int32_t fd = event_notifier_get_fd(intp->interrupt);
  104. Error *err = NULL;
  105. int ret;
  106. qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp);
  107. ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
  108. VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err);
  109. if (ret) {
  110. error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
  111. qemu_set_fd_handler(fd, NULL, NULL, NULL);
  112. }
  113. return ret;
  114. }
  115. /*
  116. * Functions only used when eventfds are handled on user-side
  117. * ie. without irqfd
  118. */
  119. /**
  120. * vfio_mmap_set_enabled - enable/disable the fast path mode
  121. * @vdev: the VFIO platform device
  122. * @enabled: the target mmap state
  123. *
  124. * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
  125. * enabled = false ~ slow path = MMIO region is trapped and region callbacks
  126. * are called; slow path enables to trap the device IRQ status register reset
  127. */
  128. static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
  129. {
  130. int i;
  131. for (i = 0; i < vdev->vbasedev.num_regions; i++) {
  132. vfio_region_mmaps_set_enabled(vdev->regions[i], enabled);
  133. }
  134. }
  135. /**
  136. * vfio_intp_mmap_enable - timer function, restores the fast path
  137. * if there is no more active IRQ
  138. * @opaque: actually points to the VFIO platform device
  139. *
  140. * Called on mmap timer timout, this function checks whether the
  141. * IRQ is still active and if not, restores the fast path.
  142. * by construction a single eventfd is handled at a time.
  143. * if the IRQ is still active, the timer is re-programmed.
  144. */
  145. static void vfio_intp_mmap_enable(void *opaque)
  146. {
  147. VFIOINTp *tmp;
  148. VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
  149. qemu_mutex_lock(&vdev->intp_mutex);
  150. QLIST_FOREACH(tmp, &vdev->intp_list, next) {
  151. if (tmp->state == VFIO_IRQ_ACTIVE) {
  152. trace_vfio_platform_intp_mmap_enable(tmp->pin);
  153. /* re-program the timer to check active status later */
  154. timer_mod(vdev->mmap_timer,
  155. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
  156. vdev->mmap_timeout);
  157. qemu_mutex_unlock(&vdev->intp_mutex);
  158. return;
  159. }
  160. }
  161. vfio_mmap_set_enabled(vdev, true);
  162. qemu_mutex_unlock(&vdev->intp_mutex);
  163. }
  164. /**
  165. * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
  166. * @opaque: opaque pointer, in practice the VFIOINTp handle
  167. *
  168. * The function is called on a previous IRQ completion, from
  169. * vfio_platform_eoi, while the intp_mutex is locked.
  170. * Also in such situation, the slow path already is set and
  171. * the mmap timer was already programmed.
  172. */
  173. static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
  174. {
  175. trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
  176. event_notifier_get_fd(intp->interrupt));
  177. intp->state = VFIO_IRQ_ACTIVE;
  178. /* trigger the virtual IRQ */
  179. qemu_set_irq(intp->qemuirq, 1);
  180. }
  181. /**
  182. * vfio_intp_interrupt - The user-side eventfd handler
  183. * @opaque: opaque pointer which in practice is the VFIOINTp handle
  184. *
  185. * the function is entered in event handler context:
  186. * the vIRQ is injected into the guest if there is no other active
  187. * or pending IRQ.
  188. */
  189. static void vfio_intp_interrupt(VFIOINTp *intp)
  190. {
  191. int ret;
  192. VFIOINTp *tmp;
  193. VFIOPlatformDevice *vdev = intp->vdev;
  194. bool delay_handling = false;
  195. qemu_mutex_lock(&vdev->intp_mutex);
  196. if (intp->state == VFIO_IRQ_INACTIVE) {
  197. QLIST_FOREACH(tmp, &vdev->intp_list, next) {
  198. if (tmp->state == VFIO_IRQ_ACTIVE ||
  199. tmp->state == VFIO_IRQ_PENDING) {
  200. delay_handling = true;
  201. break;
  202. }
  203. }
  204. }
  205. if (delay_handling) {
  206. /*
  207. * the new IRQ gets a pending status and is pushed in
  208. * the pending queue
  209. */
  210. intp->state = VFIO_IRQ_PENDING;
  211. trace_vfio_intp_interrupt_set_pending(intp->pin);
  212. QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
  213. intp, pqnext);
  214. ret = event_notifier_test_and_clear(intp->interrupt);
  215. qemu_mutex_unlock(&vdev->intp_mutex);
  216. return;
  217. }
  218. trace_vfio_platform_intp_interrupt(intp->pin,
  219. event_notifier_get_fd(intp->interrupt));
  220. ret = event_notifier_test_and_clear(intp->interrupt);
  221. if (!ret) {
  222. error_report("Error when clearing fd=%d (ret = %d)",
  223. event_notifier_get_fd(intp->interrupt), ret);
  224. }
  225. intp->state = VFIO_IRQ_ACTIVE;
  226. /* sets slow path */
  227. vfio_mmap_set_enabled(vdev, false);
  228. /* trigger the virtual IRQ */
  229. qemu_set_irq(intp->qemuirq, 1);
  230. /*
  231. * Schedule the mmap timer which will restore fastpath when no IRQ
  232. * is active anymore
  233. */
  234. if (vdev->mmap_timeout) {
  235. timer_mod(vdev->mmap_timer,
  236. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
  237. vdev->mmap_timeout);
  238. }
  239. qemu_mutex_unlock(&vdev->intp_mutex);
  240. }
  241. /**
  242. * vfio_platform_eoi - IRQ completion routine
  243. * @vbasedev: the VFIO device handle
  244. *
  245. * De-asserts the active virtual IRQ and unmasks the physical IRQ
  246. * (effective for level sensitive IRQ auto-masked by the VFIO driver).
  247. * Then it handles next pending IRQ if any.
  248. * eoi function is called on the first access to any MMIO region
  249. * after an IRQ was triggered, trapped since slow path was set.
  250. * It is assumed this access corresponds to the IRQ status
  251. * register reset. With such a mechanism, a single IRQ can be
  252. * handled at a time since there is no way to know which IRQ
  253. * was completed by the guest (we would need additional details
  254. * about the IRQ status register mask).
  255. */
  256. static void vfio_platform_eoi(VFIODevice *vbasedev)
  257. {
  258. VFIOINTp *intp;
  259. VFIOPlatformDevice *vdev =
  260. container_of(vbasedev, VFIOPlatformDevice, vbasedev);
  261. qemu_mutex_lock(&vdev->intp_mutex);
  262. QLIST_FOREACH(intp, &vdev->intp_list, next) {
  263. if (intp->state == VFIO_IRQ_ACTIVE) {
  264. trace_vfio_platform_eoi(intp->pin,
  265. event_notifier_get_fd(intp->interrupt));
  266. intp->state = VFIO_IRQ_INACTIVE;
  267. /* deassert the virtual IRQ */
  268. qemu_set_irq(intp->qemuirq, 0);
  269. if (vfio_irq_is_automasked(intp)) {
  270. /* unmasks the physical level-sensitive IRQ */
  271. vfio_unmask_single_irqindex(vbasedev, intp->pin);
  272. }
  273. /* a single IRQ can be active at a time */
  274. break;
  275. }
  276. }
  277. /* in case there are pending IRQs, handle the first one */
  278. if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
  279. intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
  280. vfio_intp_inject_pending_lockheld(intp);
  281. QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
  282. }
  283. qemu_mutex_unlock(&vdev->intp_mutex);
  284. }
  285. /**
  286. * vfio_start_eventfd_injection - starts the virtual IRQ injection using
  287. * user-side handled eventfds
  288. * @sbdev: the sysbus device handle
  289. * @irq: the qemu irq handle
  290. */
  291. static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq)
  292. {
  293. VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
  294. VFIOINTp *intp;
  295. QLIST_FOREACH(intp, &vdev->intp_list, next) {
  296. if (intp->qemuirq == irq) {
  297. break;
  298. }
  299. }
  300. assert(intp);
  301. if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) {
  302. abort();
  303. }
  304. }
  305. /*
  306. * Functions used for irqfd
  307. */
  308. /**
  309. * vfio_set_resample_eventfd - sets the resamplefd for an IRQ
  310. * @intp: the IRQ struct handle
  311. * programs the VFIO driver to unmask this IRQ when the
  312. * intp->unmask eventfd is triggered
  313. */
  314. static int vfio_set_resample_eventfd(VFIOINTp *intp)
  315. {
  316. int32_t fd = event_notifier_get_fd(intp->unmask);
  317. VFIODevice *vbasedev = &intp->vdev->vbasedev;
  318. Error *err = NULL;
  319. int ret;
  320. qemu_set_fd_handler(fd, NULL, NULL, NULL);
  321. ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
  322. VFIO_IRQ_SET_ACTION_UNMASK, fd, &err);
  323. if (ret) {
  324. error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
  325. }
  326. return ret;
  327. }
  328. /**
  329. * vfio_start_irqfd_injection - starts the virtual IRQ injection using
  330. * irqfd
  331. *
  332. * @sbdev: the sysbus device handle
  333. * @irq: the qemu irq handle
  334. *
  335. * In case the irqfd setup fails, we fallback to userspace handled eventfd
  336. */
  337. static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq)
  338. {
  339. VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
  340. VFIOINTp *intp;
  341. if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() ||
  342. !vdev->irqfd_allowed) {
  343. goto fail_irqfd;
  344. }
  345. QLIST_FOREACH(intp, &vdev->intp_list, next) {
  346. if (intp->qemuirq == irq) {
  347. break;
  348. }
  349. }
  350. assert(intp);
  351. if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt,
  352. intp->unmask, irq) < 0) {
  353. goto fail_irqfd;
  354. }
  355. if (vfio_set_trigger_eventfd(intp, NULL) < 0) {
  356. goto fail_vfio;
  357. }
  358. if (vfio_irq_is_automasked(intp)) {
  359. if (vfio_set_resample_eventfd(intp) < 0) {
  360. goto fail_vfio;
  361. }
  362. trace_vfio_platform_start_level_irqfd_injection(intp->pin,
  363. event_notifier_get_fd(intp->interrupt),
  364. event_notifier_get_fd(intp->unmask));
  365. } else {
  366. trace_vfio_platform_start_edge_irqfd_injection(intp->pin,
  367. event_notifier_get_fd(intp->interrupt));
  368. }
  369. intp->kvm_accel = true;
  370. return;
  371. fail_vfio:
  372. kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq);
  373. abort();
  374. fail_irqfd:
  375. vfio_start_eventfd_injection(sbdev, irq);
  376. return;
  377. }
  378. /* VFIO skeleton */
  379. static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
  380. {
  381. vbasedev->needs_reset = true;
  382. }
  383. /* not implemented yet */
  384. static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
  385. {
  386. return -1;
  387. }
  388. /**
  389. * vfio_populate_device - Allocate and populate MMIO region
  390. * and IRQ structs according to driver returned information
  391. * @vbasedev: the VFIO device handle
  392. * @errp: error object
  393. *
  394. */
  395. static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
  396. {
  397. VFIOINTp *intp, *tmp;
  398. int i, ret = -1;
  399. VFIOPlatformDevice *vdev =
  400. container_of(vbasedev, VFIOPlatformDevice, vbasedev);
  401. if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
  402. error_setg(errp, "this isn't a platform device");
  403. return ret;
  404. }
  405. vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
  406. for (i = 0; i < vbasedev->num_regions; i++) {
  407. char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i);
  408. vdev->regions[i] = g_new0(VFIORegion, 1);
  409. ret = vfio_region_setup(OBJECT(vdev), vbasedev,
  410. vdev->regions[i], i, name);
  411. g_free(name);
  412. if (ret) {
  413. error_setg_errno(errp, -ret, "failed to get region %d info", i);
  414. goto reg_error;
  415. }
  416. }
  417. vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  418. vfio_intp_mmap_enable, vdev);
  419. QSIMPLEQ_INIT(&vdev->pending_intp_queue);
  420. for (i = 0; i < vbasedev->num_irqs; i++) {
  421. struct vfio_irq_info irq = { .argsz = sizeof(irq) };
  422. irq.index = i;
  423. ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
  424. if (ret) {
  425. error_setg_errno(errp, -ret, "failed to get device irq info");
  426. goto irq_err;
  427. } else {
  428. trace_vfio_platform_populate_interrupts(irq.index,
  429. irq.count,
  430. irq.flags);
  431. intp = vfio_init_intp(vbasedev, irq, errp);
  432. if (!intp) {
  433. ret = -1;
  434. goto irq_err;
  435. }
  436. }
  437. }
  438. return 0;
  439. irq_err:
  440. timer_del(vdev->mmap_timer);
  441. QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
  442. QLIST_REMOVE(intp, next);
  443. g_free(intp);
  444. }
  445. reg_error:
  446. for (i = 0; i < vbasedev->num_regions; i++) {
  447. if (vdev->regions[i]) {
  448. vfio_region_finalize(vdev->regions[i]);
  449. }
  450. g_free(vdev->regions[i]);
  451. }
  452. g_free(vdev->regions);
  453. return ret;
  454. }
  455. /* specialized functions for VFIO Platform devices */
  456. static VFIODeviceOps vfio_platform_ops = {
  457. .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
  458. .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
  459. .vfio_eoi = vfio_platform_eoi,
  460. };
  461. /**
  462. * vfio_base_device_init - perform preliminary VFIO setup
  463. * @vbasedev: the VFIO device handle
  464. * @errp: error object
  465. *
  466. * Implement the VFIO command sequence that allows to discover
  467. * assigned device resources: group extraction, device
  468. * fd retrieval, resource query.
  469. * Precondition: the device name must be initialized
  470. */
  471. static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
  472. {
  473. VFIOGroup *group;
  474. VFIODevice *vbasedev_iter;
  475. char *tmp, group_path[PATH_MAX], *group_name;
  476. ssize_t len;
  477. struct stat st;
  478. int groupid;
  479. int ret;
  480. /* @sysfsdev takes precedence over @host */
  481. if (vbasedev->sysfsdev) {
  482. g_free(vbasedev->name);
  483. vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
  484. } else {
  485. if (!vbasedev->name || strchr(vbasedev->name, '/')) {
  486. error_setg(errp, "wrong host device name");
  487. return -EINVAL;
  488. }
  489. vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
  490. vbasedev->name);
  491. }
  492. if (stat(vbasedev->sysfsdev, &st) < 0) {
  493. error_setg_errno(errp, errno,
  494. "failed to get the sysfs host device file status");
  495. return -errno;
  496. }
  497. tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
  498. len = readlink(tmp, group_path, sizeof(group_path));
  499. g_free(tmp);
  500. if (len < 0 || len >= sizeof(group_path)) {
  501. ret = len < 0 ? -errno : -ENAMETOOLONG;
  502. error_setg_errno(errp, -ret, "no iommu_group found");
  503. return ret;
  504. }
  505. group_path[len] = 0;
  506. group_name = basename(group_path);
  507. if (sscanf(group_name, "%d", &groupid) != 1) {
  508. error_setg_errno(errp, errno, "failed to read %s", group_path);
  509. return -errno;
  510. }
  511. trace_vfio_platform_base_device_init(vbasedev->name, groupid);
  512. group = vfio_get_group(groupid, &address_space_memory, errp);
  513. if (!group) {
  514. return -ENOENT;
  515. }
  516. QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
  517. if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
  518. error_setg(errp, "device is already attached");
  519. vfio_put_group(group);
  520. return -EBUSY;
  521. }
  522. }
  523. ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
  524. if (ret) {
  525. vfio_put_group(group);
  526. return ret;
  527. }
  528. ret = vfio_populate_device(vbasedev, errp);
  529. if (ret) {
  530. vfio_put_group(group);
  531. }
  532. return ret;
  533. }
  534. /**
  535. * vfio_platform_realize - the device realize function
  536. * @dev: device state pointer
  537. * @errp: error
  538. *
  539. * initialize the device, its memory regions and IRQ structures
  540. * IRQ are started separately
  541. */
  542. static void vfio_platform_realize(DeviceState *dev, Error **errp)
  543. {
  544. VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
  545. SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
  546. VFIODevice *vbasedev = &vdev->vbasedev;
  547. int i, ret;
  548. vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
  549. vbasedev->dev = dev;
  550. vbasedev->ops = &vfio_platform_ops;
  551. qemu_mutex_init(&vdev->intp_mutex);
  552. trace_vfio_platform_realize(vbasedev->sysfsdev ?
  553. vbasedev->sysfsdev : vbasedev->name,
  554. vdev->compat);
  555. ret = vfio_base_device_init(vbasedev, errp);
  556. if (ret) {
  557. goto out;
  558. }
  559. if (!vdev->compat) {
  560. GError *gerr = NULL;
  561. gchar *contents;
  562. gsize length;
  563. char *path;
  564. path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev);
  565. if (!g_file_get_contents(path, &contents, &length, &gerr)) {
  566. error_setg(errp, "%s", gerr->message);
  567. g_error_free(gerr);
  568. g_free(path);
  569. return;
  570. }
  571. g_free(path);
  572. vdev->compat = contents;
  573. for (vdev->num_compat = 0; length; vdev->num_compat++) {
  574. size_t skip = strlen(contents) + 1;
  575. contents += skip;
  576. length -= skip;
  577. }
  578. }
  579. for (i = 0; i < vbasedev->num_regions; i++) {
  580. if (vfio_region_mmap(vdev->regions[i])) {
  581. warn_report("%s mmap unsupported, performance may be slow",
  582. memory_region_name(vdev->regions[i]->mem));
  583. }
  584. sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
  585. }
  586. out:
  587. if (!ret) {
  588. return;
  589. }
  590. if (vdev->vbasedev.name) {
  591. error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
  592. } else {
  593. error_prepend(errp, "vfio error: ");
  594. }
  595. }
  596. static const VMStateDescription vfio_platform_vmstate = {
  597. .name = "vfio-platform",
  598. .unmigratable = 1,
  599. };
  600. static Property vfio_platform_dev_properties[] = {
  601. DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
  602. DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
  603. DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
  604. DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
  605. mmap_timeout, 1100),
  606. DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
  607. DEFINE_PROP_END_OF_LIST(),
  608. };
  609. static void vfio_platform_class_init(ObjectClass *klass, void *data)
  610. {
  611. DeviceClass *dc = DEVICE_CLASS(klass);
  612. SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
  613. dc->realize = vfio_platform_realize;
  614. dc->props = vfio_platform_dev_properties;
  615. dc->vmsd = &vfio_platform_vmstate;
  616. dc->desc = "VFIO-based platform device assignment";
  617. sbc->connect_irq_notifier = vfio_start_irqfd_injection;
  618. set_bit(DEVICE_CATEGORY_MISC, dc->categories);
  619. /* Supported by TYPE_VIRT_MACHINE */
  620. dc->user_creatable = true;
  621. }
  622. static const TypeInfo vfio_platform_dev_info = {
  623. .name = TYPE_VFIO_PLATFORM,
  624. .parent = TYPE_SYS_BUS_DEVICE,
  625. .instance_size = sizeof(VFIOPlatformDevice),
  626. .class_init = vfio_platform_class_init,
  627. .class_size = sizeof(VFIOPlatformDeviceClass),
  628. };
  629. static void register_vfio_platform_dev_type(void)
  630. {
  631. type_register_static(&vfio_platform_dev_info);
  632. }
  633. type_init(register_vfio_platform_dev_type)