iommufd.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /*
  2. * iommufd container backend
  3. *
  4. * Copyright (C) 2023 Intel Corporation.
  5. * Copyright Red Hat, Inc. 2023
  6. *
  7. * Authors: Yi Liu <yi.l.liu@intel.com>
  8. * Eric Auger <eric.auger@redhat.com>
  9. *
  10. * SPDX-License-Identifier: GPL-2.0-or-later
  11. */
  12. #include "qemu/osdep.h"
  13. #include <sys/ioctl.h>
  14. #include <linux/vfio.h>
  15. #include <linux/iommufd.h>
  16. #include "hw/vfio/vfio-common.h"
  17. #include "qemu/error-report.h"
  18. #include "trace.h"
  19. #include "qapi/error.h"
  20. #include "system/iommufd.h"
  21. #include "hw/qdev-core.h"
  22. #include "system/reset.h"
  23. #include "qemu/cutils.h"
  24. #include "qemu/chardev_open.h"
  25. #include "pci.h"
  26. static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova,
  27. ram_addr_t size, void *vaddr, bool readonly)
  28. {
  29. const VFIOIOMMUFDContainer *container =
  30. container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
  31. return iommufd_backend_map_dma(container->be,
  32. container->ioas_id,
  33. iova, size, vaddr, readonly);
  34. }
  35. static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer,
  36. hwaddr iova, ram_addr_t size,
  37. IOMMUTLBEntry *iotlb)
  38. {
  39. const VFIOIOMMUFDContainer *container =
  40. container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
  41. /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */
  42. return iommufd_backend_unmap_dma(container->be,
  43. container->ioas_id, iova, size);
  44. }
  45. static bool iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp)
  46. {
  47. return !vfio_kvm_device_add_fd(vbasedev->fd, errp);
  48. }
  49. static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev)
  50. {
  51. Error *err = NULL;
  52. if (vfio_kvm_device_del_fd(vbasedev->fd, &err)) {
  53. error_report_err(err);
  54. }
  55. }
  56. static bool iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp)
  57. {
  58. IOMMUFDBackend *iommufd = vbasedev->iommufd;
  59. struct vfio_device_bind_iommufd bind = {
  60. .argsz = sizeof(bind),
  61. .flags = 0,
  62. };
  63. if (!iommufd_backend_connect(iommufd, errp)) {
  64. return false;
  65. }
  66. /*
  67. * Add device to kvm-vfio to be prepared for the tracking
  68. * in KVM. Especially for some emulated devices, it requires
  69. * to have kvm information in the device open.
  70. */
  71. if (!iommufd_cdev_kvm_device_add(vbasedev, errp)) {
  72. goto err_kvm_device_add;
  73. }
  74. /* Bind device to iommufd */
  75. bind.iommufd = iommufd->fd;
  76. if (ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
  77. error_setg_errno(errp, errno, "error bind device fd=%d to iommufd=%d",
  78. vbasedev->fd, bind.iommufd);
  79. goto err_bind;
  80. }
  81. vbasedev->devid = bind.out_devid;
  82. trace_iommufd_cdev_connect_and_bind(bind.iommufd, vbasedev->name,
  83. vbasedev->fd, vbasedev->devid);
  84. return true;
  85. err_bind:
  86. iommufd_cdev_kvm_device_del(vbasedev);
  87. err_kvm_device_add:
  88. iommufd_backend_disconnect(iommufd);
  89. return false;
  90. }
  91. static void iommufd_cdev_unbind_and_disconnect(VFIODevice *vbasedev)
  92. {
  93. /* Unbind is automatically conducted when device fd is closed */
  94. iommufd_cdev_kvm_device_del(vbasedev);
  95. iommufd_backend_disconnect(vbasedev->iommufd);
  96. }
  97. static bool iommufd_hwpt_dirty_tracking(VFIOIOASHwpt *hwpt)
  98. {
  99. return hwpt && hwpt->hwpt_flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
  100. }
  101. static int iommufd_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
  102. bool start, Error **errp)
  103. {
  104. const VFIOIOMMUFDContainer *container =
  105. container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
  106. VFIOIOASHwpt *hwpt;
  107. QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
  108. if (!iommufd_hwpt_dirty_tracking(hwpt)) {
  109. continue;
  110. }
  111. if (!iommufd_backend_set_dirty_tracking(container->be,
  112. hwpt->hwpt_id, start, errp)) {
  113. goto err;
  114. }
  115. }
  116. return 0;
  117. err:
  118. QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
  119. if (!iommufd_hwpt_dirty_tracking(hwpt)) {
  120. continue;
  121. }
  122. iommufd_backend_set_dirty_tracking(container->be,
  123. hwpt->hwpt_id, !start, NULL);
  124. }
  125. return -EINVAL;
  126. }
  127. static int iommufd_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
  128. VFIOBitmap *vbmap, hwaddr iova,
  129. hwaddr size, Error **errp)
  130. {
  131. VFIOIOMMUFDContainer *container = container_of(bcontainer,
  132. VFIOIOMMUFDContainer,
  133. bcontainer);
  134. unsigned long page_size = qemu_real_host_page_size();
  135. VFIOIOASHwpt *hwpt;
  136. QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
  137. if (!iommufd_hwpt_dirty_tracking(hwpt)) {
  138. continue;
  139. }
  140. if (!iommufd_backend_get_dirty_bitmap(container->be, hwpt->hwpt_id,
  141. iova, size, page_size,
  142. (uint64_t *)vbmap->bitmap,
  143. errp)) {
  144. return -EINVAL;
  145. }
  146. }
  147. return 0;
  148. }
  149. static int iommufd_cdev_getfd(const char *sysfs_path, Error **errp)
  150. {
  151. ERRP_GUARD();
  152. long int ret = -ENOTTY;
  153. g_autofree char *path = NULL;
  154. g_autofree char *vfio_dev_path = NULL;
  155. g_autofree char *vfio_path = NULL;
  156. DIR *dir = NULL;
  157. struct dirent *dent;
  158. g_autofree gchar *contents = NULL;
  159. gsize length;
  160. int major, minor;
  161. dev_t vfio_devt;
  162. path = g_strdup_printf("%s/vfio-dev", sysfs_path);
  163. dir = opendir(path);
  164. if (!dir) {
  165. error_setg_errno(errp, errno, "couldn't open directory %s", path);
  166. goto out;
  167. }
  168. while ((dent = readdir(dir))) {
  169. if (!strncmp(dent->d_name, "vfio", 4)) {
  170. vfio_dev_path = g_strdup_printf("%s/%s/dev", path, dent->d_name);
  171. break;
  172. }
  173. }
  174. if (!vfio_dev_path) {
  175. error_setg(errp, "failed to find vfio-dev/vfioX/dev");
  176. goto out_close_dir;
  177. }
  178. if (!g_file_get_contents(vfio_dev_path, &contents, &length, NULL)) {
  179. error_setg(errp, "failed to load \"%s\"", vfio_dev_path);
  180. goto out_close_dir;
  181. }
  182. if (sscanf(contents, "%d:%d", &major, &minor) != 2) {
  183. error_setg(errp, "failed to get major:minor for \"%s\"", vfio_dev_path);
  184. goto out_close_dir;
  185. }
  186. vfio_devt = makedev(major, minor);
  187. vfio_path = g_strdup_printf("/dev/vfio/devices/%s", dent->d_name);
  188. ret = open_cdev(vfio_path, vfio_devt);
  189. if (ret < 0) {
  190. error_setg(errp, "Failed to open %s", vfio_path);
  191. }
  192. trace_iommufd_cdev_getfd(vfio_path, ret);
  193. out_close_dir:
  194. closedir(dir);
  195. out:
  196. if (*errp) {
  197. error_prepend(errp, VFIO_MSG_PREFIX, path);
  198. }
  199. return ret;
  200. }
  201. static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
  202. Error **errp)
  203. {
  204. int iommufd = vbasedev->iommufd->fd;
  205. struct vfio_device_attach_iommufd_pt attach_data = {
  206. .argsz = sizeof(attach_data),
  207. .flags = 0,
  208. .pt_id = id,
  209. };
  210. /* Attach device to an IOAS or hwpt within iommufd */
  211. if (ioctl(vbasedev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
  212. error_setg_errno(errp, errno,
  213. "[iommufd=%d] error attach %s (%d) to id=%d",
  214. iommufd, vbasedev->name, vbasedev->fd, id);
  215. return -errno;
  216. }
  217. trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name,
  218. vbasedev->fd, id);
  219. return 0;
  220. }
  221. static bool iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp)
  222. {
  223. int iommufd = vbasedev->iommufd->fd;
  224. struct vfio_device_detach_iommufd_pt detach_data = {
  225. .argsz = sizeof(detach_data),
  226. .flags = 0,
  227. };
  228. if (ioctl(vbasedev->fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_data)) {
  229. error_setg_errno(errp, errno, "detach %s failed", vbasedev->name);
  230. return false;
  231. }
  232. trace_iommufd_cdev_detach_ioas_hwpt(iommufd, vbasedev->name);
  233. return true;
  234. }
  235. static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev,
  236. VFIOIOMMUFDContainer *container,
  237. Error **errp)
  238. {
  239. ERRP_GUARD();
  240. IOMMUFDBackend *iommufd = vbasedev->iommufd;
  241. uint32_t flags = 0;
  242. VFIOIOASHwpt *hwpt;
  243. uint32_t hwpt_id;
  244. int ret;
  245. /* Try to find a domain */
  246. QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
  247. ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt->hwpt_id, errp);
  248. if (ret) {
  249. /* -EINVAL means the domain is incompatible with the device. */
  250. if (ret == -EINVAL) {
  251. /*
  252. * It is an expected failure and it just means we will try
  253. * another domain, or create one if no existing compatible
  254. * domain is found. Hence why the error is discarded below.
  255. */
  256. error_free(*errp);
  257. *errp = NULL;
  258. continue;
  259. }
  260. return false;
  261. } else {
  262. vbasedev->hwpt = hwpt;
  263. QLIST_INSERT_HEAD(&hwpt->device_list, vbasedev, hwpt_next);
  264. vbasedev->iommu_dirty_tracking = iommufd_hwpt_dirty_tracking(hwpt);
  265. return true;
  266. }
  267. }
  268. /*
  269. * This is quite early and VFIO Migration state isn't yet fully
  270. * initialized, thus rely only on IOMMU hardware capabilities as to
  271. * whether IOMMU dirty tracking is going to be requested. Later
  272. * vfio_migration_realize() may decide to use VF dirty tracking
  273. * instead.
  274. */
  275. if (vbasedev->hiod->caps.hw_caps & IOMMU_HW_CAP_DIRTY_TRACKING) {
  276. flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
  277. }
  278. if (!iommufd_backend_alloc_hwpt(iommufd, vbasedev->devid,
  279. container->ioas_id, flags,
  280. IOMMU_HWPT_DATA_NONE, 0, NULL,
  281. &hwpt_id, errp)) {
  282. return false;
  283. }
  284. hwpt = g_malloc0(sizeof(*hwpt));
  285. hwpt->hwpt_id = hwpt_id;
  286. hwpt->hwpt_flags = flags;
  287. QLIST_INIT(&hwpt->device_list);
  288. ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt->hwpt_id, errp);
  289. if (ret) {
  290. iommufd_backend_free_id(container->be, hwpt->hwpt_id);
  291. g_free(hwpt);
  292. return false;
  293. }
  294. vbasedev->hwpt = hwpt;
  295. vbasedev->iommu_dirty_tracking = iommufd_hwpt_dirty_tracking(hwpt);
  296. QLIST_INSERT_HEAD(&hwpt->device_list, vbasedev, hwpt_next);
  297. QLIST_INSERT_HEAD(&container->hwpt_list, hwpt, next);
  298. container->bcontainer.dirty_pages_supported |=
  299. vbasedev->iommu_dirty_tracking;
  300. if (container->bcontainer.dirty_pages_supported &&
  301. !vbasedev->iommu_dirty_tracking) {
  302. warn_report("IOMMU instance for device %s doesn't support dirty tracking",
  303. vbasedev->name);
  304. }
  305. return true;
  306. }
  307. static void iommufd_cdev_autodomains_put(VFIODevice *vbasedev,
  308. VFIOIOMMUFDContainer *container)
  309. {
  310. VFIOIOASHwpt *hwpt = vbasedev->hwpt;
  311. QLIST_REMOVE(vbasedev, hwpt_next);
  312. vbasedev->hwpt = NULL;
  313. if (QLIST_EMPTY(&hwpt->device_list)) {
  314. QLIST_REMOVE(hwpt, next);
  315. iommufd_backend_free_id(container->be, hwpt->hwpt_id);
  316. g_free(hwpt);
  317. }
  318. }
  319. static bool iommufd_cdev_attach_container(VFIODevice *vbasedev,
  320. VFIOIOMMUFDContainer *container,
  321. Error **errp)
  322. {
  323. /* mdevs aren't physical devices and will fail with auto domains */
  324. if (!vbasedev->mdev) {
  325. return iommufd_cdev_autodomains_get(vbasedev, container, errp);
  326. }
  327. return !iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp);
  328. }
  329. static void iommufd_cdev_detach_container(VFIODevice *vbasedev,
  330. VFIOIOMMUFDContainer *container)
  331. {
  332. Error *err = NULL;
  333. if (!iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) {
  334. error_report_err(err);
  335. }
  336. if (vbasedev->hwpt) {
  337. iommufd_cdev_autodomains_put(vbasedev, container);
  338. }
  339. }
  340. static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container)
  341. {
  342. VFIOContainerBase *bcontainer = &container->bcontainer;
  343. if (!QLIST_EMPTY(&bcontainer->device_list)) {
  344. return;
  345. }
  346. memory_listener_unregister(&bcontainer->listener);
  347. iommufd_backend_free_id(container->be, container->ioas_id);
  348. object_unref(container);
  349. }
  350. static int iommufd_cdev_ram_block_discard_disable(bool state)
  351. {
  352. /*
  353. * We support coordinated discarding of RAM via the RamDiscardManager.
  354. */
  355. return ram_block_uncoordinated_discard_disable(state);
  356. }
  357. static bool iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container,
  358. uint32_t ioas_id, Error **errp)
  359. {
  360. VFIOContainerBase *bcontainer = &container->bcontainer;
  361. g_autofree struct iommu_ioas_iova_ranges *info = NULL;
  362. struct iommu_iova_range *iova_ranges;
  363. int sz, fd = container->be->fd;
  364. info = g_malloc0(sizeof(*info));
  365. info->size = sizeof(*info);
  366. info->ioas_id = ioas_id;
  367. if (ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info) && errno != EMSGSIZE) {
  368. goto error;
  369. }
  370. sz = info->num_iovas * sizeof(struct iommu_iova_range);
  371. info = g_realloc(info, sizeof(*info) + sz);
  372. info->allowed_iovas = (uintptr_t)(info + 1);
  373. if (ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info)) {
  374. goto error;
  375. }
  376. iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas;
  377. for (int i = 0; i < info->num_iovas; i++) {
  378. Range *range = g_new(Range, 1);
  379. range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last);
  380. bcontainer->iova_ranges =
  381. range_list_insert(bcontainer->iova_ranges, range);
  382. }
  383. bcontainer->pgsizes = info->out_iova_alignment;
  384. return true;
  385. error:
  386. error_setg_errno(errp, errno, "Cannot get IOVA ranges");
  387. return false;
  388. }
  389. static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
  390. AddressSpace *as, Error **errp)
  391. {
  392. VFIOContainerBase *bcontainer;
  393. VFIOIOMMUFDContainer *container;
  394. VFIOAddressSpace *space;
  395. struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
  396. int ret, devfd;
  397. uint32_t ioas_id;
  398. Error *err = NULL;
  399. const VFIOIOMMUClass *iommufd_vioc =
  400. VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD));
  401. if (vbasedev->fd < 0) {
  402. devfd = iommufd_cdev_getfd(vbasedev->sysfsdev, errp);
  403. if (devfd < 0) {
  404. return false;
  405. }
  406. vbasedev->fd = devfd;
  407. } else {
  408. devfd = vbasedev->fd;
  409. }
  410. if (!iommufd_cdev_connect_and_bind(vbasedev, errp)) {
  411. goto err_connect_bind;
  412. }
  413. space = vfio_get_address_space(as);
  414. /*
  415. * The HostIOMMUDevice data from legacy backend is static and doesn't need
  416. * any information from the (type1-iommu) backend to be initialized. In
  417. * contrast however, the IOMMUFD HostIOMMUDevice data requires the iommufd
  418. * FD to be connected and having a devid to be able to successfully call
  419. * iommufd_backend_get_device_info().
  420. */
  421. if (!vfio_device_hiod_realize(vbasedev, errp)) {
  422. goto err_alloc_ioas;
  423. }
  424. /* try to attach to an existing container in this space */
  425. QLIST_FOREACH(bcontainer, &space->containers, next) {
  426. container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
  427. if (VFIO_IOMMU_GET_CLASS(bcontainer) != iommufd_vioc ||
  428. vbasedev->iommufd != container->be) {
  429. continue;
  430. }
  431. if (!iommufd_cdev_attach_container(vbasedev, container, &err)) {
  432. const char *msg = error_get_pretty(err);
  433. trace_iommufd_cdev_fail_attach_existing_container(msg);
  434. error_free(err);
  435. err = NULL;
  436. } else {
  437. ret = iommufd_cdev_ram_block_discard_disable(true);
  438. if (ret) {
  439. error_setg_errno(errp, -ret,
  440. "Cannot set discarding of RAM broken");
  441. goto err_discard_disable;
  442. }
  443. goto found_container;
  444. }
  445. }
  446. /* Need to allocate a new dedicated container */
  447. if (!iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp)) {
  448. goto err_alloc_ioas;
  449. }
  450. trace_iommufd_cdev_alloc_ioas(vbasedev->iommufd->fd, ioas_id);
  451. container = VFIO_IOMMU_IOMMUFD(object_new(TYPE_VFIO_IOMMU_IOMMUFD));
  452. container->be = vbasedev->iommufd;
  453. container->ioas_id = ioas_id;
  454. QLIST_INIT(&container->hwpt_list);
  455. bcontainer = &container->bcontainer;
  456. vfio_address_space_insert(space, bcontainer);
  457. if (!iommufd_cdev_attach_container(vbasedev, container, errp)) {
  458. goto err_attach_container;
  459. }
  460. ret = iommufd_cdev_ram_block_discard_disable(true);
  461. if (ret) {
  462. error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
  463. goto err_discard_disable;
  464. }
  465. if (!iommufd_cdev_get_info_iova_range(container, ioas_id, &err)) {
  466. error_append_hint(&err,
  467. "Fallback to default 64bit IOVA range and 4K page size\n");
  468. warn_report_err(err);
  469. err = NULL;
  470. bcontainer->pgsizes = qemu_real_host_page_size();
  471. }
  472. bcontainer->listener = vfio_memory_listener;
  473. memory_listener_register(&bcontainer->listener, bcontainer->space->as);
  474. if (bcontainer->error) {
  475. error_propagate_prepend(errp, bcontainer->error,
  476. "memory listener initialization failed: ");
  477. goto err_listener_register;
  478. }
  479. bcontainer->initialized = true;
  480. found_container:
  481. ret = ioctl(devfd, VFIO_DEVICE_GET_INFO, &dev_info);
  482. if (ret) {
  483. error_setg_errno(errp, errno, "error getting device info");
  484. goto err_listener_register;
  485. }
  486. if (!vfio_cpr_register_container(bcontainer, errp)) {
  487. goto err_listener_register;
  488. }
  489. /*
  490. * TODO: examine RAM_BLOCK_DISCARD stuff, should we do group level
  491. * for discarding incompatibility check as well?
  492. */
  493. if (vbasedev->ram_block_discard_allowed) {
  494. iommufd_cdev_ram_block_discard_disable(false);
  495. }
  496. vbasedev->group = 0;
  497. vbasedev->num_irqs = dev_info.num_irqs;
  498. vbasedev->num_regions = dev_info.num_regions;
  499. vbasedev->flags = dev_info.flags;
  500. vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
  501. vbasedev->bcontainer = bcontainer;
  502. QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
  503. QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
  504. trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs,
  505. vbasedev->num_regions, vbasedev->flags);
  506. return true;
  507. err_listener_register:
  508. iommufd_cdev_ram_block_discard_disable(false);
  509. err_discard_disable:
  510. iommufd_cdev_detach_container(vbasedev, container);
  511. err_attach_container:
  512. iommufd_cdev_container_destroy(container);
  513. err_alloc_ioas:
  514. vfio_put_address_space(space);
  515. iommufd_cdev_unbind_and_disconnect(vbasedev);
  516. err_connect_bind:
  517. close(vbasedev->fd);
  518. return false;
  519. }
  520. static void iommufd_cdev_detach(VFIODevice *vbasedev)
  521. {
  522. VFIOContainerBase *bcontainer = vbasedev->bcontainer;
  523. VFIOAddressSpace *space = bcontainer->space;
  524. VFIOIOMMUFDContainer *container = container_of(bcontainer,
  525. VFIOIOMMUFDContainer,
  526. bcontainer);
  527. QLIST_REMOVE(vbasedev, global_next);
  528. QLIST_REMOVE(vbasedev, container_next);
  529. vbasedev->bcontainer = NULL;
  530. if (!vbasedev->ram_block_discard_allowed) {
  531. iommufd_cdev_ram_block_discard_disable(false);
  532. }
  533. vfio_cpr_unregister_container(bcontainer);
  534. iommufd_cdev_detach_container(vbasedev, container);
  535. iommufd_cdev_container_destroy(container);
  536. vfio_put_address_space(space);
  537. iommufd_cdev_unbind_and_disconnect(vbasedev);
  538. close(vbasedev->fd);
  539. }
  540. static VFIODevice *iommufd_cdev_pci_find_by_devid(__u32 devid)
  541. {
  542. VFIODevice *vbasedev_iter;
  543. const VFIOIOMMUClass *iommufd_vioc =
  544. VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD));
  545. QLIST_FOREACH(vbasedev_iter, &vfio_device_list, global_next) {
  546. if (VFIO_IOMMU_GET_CLASS(vbasedev_iter->bcontainer) != iommufd_vioc) {
  547. continue;
  548. }
  549. if (devid == vbasedev_iter->devid) {
  550. return vbasedev_iter;
  551. }
  552. }
  553. return NULL;
  554. }
  555. static VFIOPCIDevice *
  556. iommufd_cdev_dep_get_realized_vpdev(struct vfio_pci_dependent_device *dep_dev,
  557. VFIODevice *reset_dev)
  558. {
  559. VFIODevice *vbasedev_tmp;
  560. if (dep_dev->devid == reset_dev->devid ||
  561. dep_dev->devid == VFIO_PCI_DEVID_OWNED) {
  562. return NULL;
  563. }
  564. vbasedev_tmp = iommufd_cdev_pci_find_by_devid(dep_dev->devid);
  565. if (!vbasedev_tmp || !vbasedev_tmp->dev->realized ||
  566. vbasedev_tmp->type != VFIO_DEVICE_TYPE_PCI) {
  567. return NULL;
  568. }
  569. return container_of(vbasedev_tmp, VFIOPCIDevice, vbasedev);
  570. }
  571. static int iommufd_cdev_pci_hot_reset(VFIODevice *vbasedev, bool single)
  572. {
  573. VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
  574. struct vfio_pci_hot_reset_info *info = NULL;
  575. struct vfio_pci_dependent_device *devices;
  576. struct vfio_pci_hot_reset *reset;
  577. int ret, i;
  578. bool multi = false;
  579. trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
  580. if (!single) {
  581. vfio_pci_pre_reset(vdev);
  582. }
  583. vdev->vbasedev.needs_reset = false;
  584. ret = vfio_pci_get_pci_hot_reset_info(vdev, &info);
  585. if (ret) {
  586. goto out_single;
  587. }
  588. assert(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID);
  589. devices = &info->devices[0];
  590. if (!(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED)) {
  591. if (!vdev->has_pm_reset) {
  592. for (i = 0; i < info->count; i++) {
  593. if (devices[i].devid == VFIO_PCI_DEVID_NOT_OWNED) {
  594. error_report("vfio: Cannot reset device %s, "
  595. "depends on device %04x:%02x:%02x.%x "
  596. "which is not owned.",
  597. vdev->vbasedev.name, devices[i].segment,
  598. devices[i].bus, PCI_SLOT(devices[i].devfn),
  599. PCI_FUNC(devices[i].devfn));
  600. }
  601. }
  602. }
  603. ret = -EPERM;
  604. goto out_single;
  605. }
  606. trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
  607. for (i = 0; i < info->count; i++) {
  608. VFIOPCIDevice *tmp;
  609. trace_iommufd_cdev_pci_hot_reset_dep_devices(devices[i].segment,
  610. devices[i].bus,
  611. PCI_SLOT(devices[i].devfn),
  612. PCI_FUNC(devices[i].devfn),
  613. devices[i].devid);
  614. /*
  615. * If a VFIO cdev device is resettable, all the dependent devices
  616. * are either bound to same iommufd or within same iommu_groups as
  617. * one of the iommufd bound devices.
  618. */
  619. assert(devices[i].devid != VFIO_PCI_DEVID_NOT_OWNED);
  620. tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev);
  621. if (!tmp) {
  622. continue;
  623. }
  624. if (single) {
  625. ret = -EINVAL;
  626. goto out_single;
  627. }
  628. vfio_pci_pre_reset(tmp);
  629. tmp->vbasedev.needs_reset = false;
  630. multi = true;
  631. }
  632. if (!single && !multi) {
  633. ret = -EINVAL;
  634. goto out_single;
  635. }
  636. /* Use zero length array for hot reset with iommufd backend */
  637. reset = g_malloc0(sizeof(*reset));
  638. reset->argsz = sizeof(*reset);
  639. /* Bus reset! */
  640. ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
  641. g_free(reset);
  642. if (ret) {
  643. ret = -errno;
  644. }
  645. trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
  646. ret ? strerror(errno) : "Success");
  647. /* Re-enable INTx on affected devices */
  648. for (i = 0; i < info->count; i++) {
  649. VFIOPCIDevice *tmp;
  650. tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev);
  651. if (!tmp) {
  652. continue;
  653. }
  654. vfio_pci_post_reset(tmp);
  655. }
  656. out_single:
  657. if (!single) {
  658. vfio_pci_post_reset(vdev);
  659. }
  660. g_free(info);
  661. return ret;
  662. }
  663. static void vfio_iommu_iommufd_class_init(ObjectClass *klass, void *data)
  664. {
  665. VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
  666. vioc->hiod_typename = TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO;
  667. vioc->dma_map = iommufd_cdev_map;
  668. vioc->dma_unmap = iommufd_cdev_unmap;
  669. vioc->attach_device = iommufd_cdev_attach;
  670. vioc->detach_device = iommufd_cdev_detach;
  671. vioc->pci_hot_reset = iommufd_cdev_pci_hot_reset;
  672. vioc->set_dirty_page_tracking = iommufd_set_dirty_page_tracking;
  673. vioc->query_dirty_bitmap = iommufd_query_dirty_bitmap;
  674. };
  675. static bool hiod_iommufd_vfio_realize(HostIOMMUDevice *hiod, void *opaque,
  676. Error **errp)
  677. {
  678. VFIODevice *vdev = opaque;
  679. HostIOMMUDeviceCaps *caps = &hiod->caps;
  680. enum iommu_hw_info_type type;
  681. union {
  682. struct iommu_hw_info_vtd vtd;
  683. } data;
  684. uint64_t hw_caps;
  685. hiod->agent = opaque;
  686. if (!iommufd_backend_get_device_info(vdev->iommufd, vdev->devid,
  687. &type, &data, sizeof(data),
  688. &hw_caps, errp)) {
  689. return false;
  690. }
  691. hiod->name = g_strdup(vdev->name);
  692. caps->type = type;
  693. caps->hw_caps = hw_caps;
  694. return true;
  695. }
  696. static GList *
  697. hiod_iommufd_vfio_get_iova_ranges(HostIOMMUDevice *hiod)
  698. {
  699. VFIODevice *vdev = hiod->agent;
  700. g_assert(vdev);
  701. return vfio_container_get_iova_ranges(vdev->bcontainer);
  702. }
  703. static uint64_t
  704. hiod_iommufd_vfio_get_page_size_mask(HostIOMMUDevice *hiod)
  705. {
  706. VFIODevice *vdev = hiod->agent;
  707. g_assert(vdev);
  708. return vfio_container_get_page_size_mask(vdev->bcontainer);
  709. }
  710. static void hiod_iommufd_vfio_class_init(ObjectClass *oc, void *data)
  711. {
  712. HostIOMMUDeviceClass *hiodc = HOST_IOMMU_DEVICE_CLASS(oc);
  713. hiodc->realize = hiod_iommufd_vfio_realize;
  714. hiodc->get_iova_ranges = hiod_iommufd_vfio_get_iova_ranges;
  715. hiodc->get_page_size_mask = hiod_iommufd_vfio_get_page_size_mask;
  716. };
  717. static const TypeInfo types[] = {
  718. {
  719. .name = TYPE_VFIO_IOMMU_IOMMUFD,
  720. .parent = TYPE_VFIO_IOMMU,
  721. .instance_size = sizeof(VFIOIOMMUFDContainer),
  722. .class_init = vfio_iommu_iommufd_class_init,
  723. }, {
  724. .name = TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO,
  725. .parent = TYPE_HOST_IOMMU_DEVICE_IOMMUFD,
  726. .class_init = hiod_iommufd_vfio_class_init,
  727. }
  728. };
  729. DEFINE_TYPES(types)