spapr.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /*
  2. * DMA memory preregistration
  3. *
  4. * Authors:
  5. * Alexey Kardashevskiy <aik@ozlabs.ru>
  6. *
  7. * This work is licensed under the terms of the GNU GPL, version 2. See
  8. * the COPYING file in the top-level directory.
  9. */
  10. #include "qemu/osdep.h"
  11. #include <sys/ioctl.h>
  12. #include <linux/vfio.h>
  13. #include "system/kvm.h"
  14. #include "system/hostmem.h"
  15. #include "exec/address-spaces.h"
  16. #include "hw/vfio/vfio-common.h"
  17. #include "hw/hw.h"
  18. #include "exec/ram_addr.h"
  19. #include "qemu/error-report.h"
  20. #include "qapi/error.h"
  21. #include "trace.h"
  22. typedef struct VFIOSpaprContainer {
  23. VFIOContainer container;
  24. MemoryListener prereg_listener;
  25. QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
  26. } VFIOSpaprContainer;
  27. OBJECT_DECLARE_SIMPLE_TYPE(VFIOSpaprContainer, VFIO_IOMMU_SPAPR);
  28. static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section)
  29. {
  30. if (memory_region_is_iommu(section->mr)) {
  31. hw_error("Cannot possibly preregister IOMMU memory");
  32. }
  33. return !memory_region_is_ram(section->mr) ||
  34. memory_region_is_ram_device(section->mr);
  35. }
  36. static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa)
  37. {
  38. return memory_region_get_ram_ptr(section->mr) +
  39. section->offset_within_region +
  40. (gpa - section->offset_within_address_space);
  41. }
  42. static void vfio_prereg_listener_region_add(MemoryListener *listener,
  43. MemoryRegionSection *section)
  44. {
  45. VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer,
  46. prereg_listener);
  47. VFIOContainer *container = &scontainer->container;
  48. VFIOContainerBase *bcontainer = &container->bcontainer;
  49. const hwaddr gpa = section->offset_within_address_space;
  50. hwaddr end;
  51. int ret;
  52. hwaddr page_mask = qemu_real_host_page_mask();
  53. struct vfio_iommu_spapr_register_memory reg = {
  54. .argsz = sizeof(reg),
  55. .flags = 0,
  56. };
  57. if (vfio_prereg_listener_skipped_section(section)) {
  58. trace_vfio_prereg_listener_region_add_skip(
  59. section->offset_within_address_space,
  60. section->offset_within_address_space +
  61. int128_get64(int128_sub(section->size, int128_one())));
  62. return;
  63. }
  64. if (unlikely((section->offset_within_address_space & ~page_mask) ||
  65. (section->offset_within_region & ~page_mask) ||
  66. (int128_get64(section->size) & ~page_mask))) {
  67. error_report("%s received unaligned region", __func__);
  68. return;
  69. }
  70. end = section->offset_within_address_space + int128_get64(section->size);
  71. if (gpa >= end) {
  72. return;
  73. }
  74. memory_region_ref(section->mr);
  75. reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
  76. reg.size = end - gpa;
  77. ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
  78. trace_vfio_prereg_register(reg.vaddr, reg.size, ret ? -errno : 0);
  79. if (ret) {
  80. /*
  81. * On the initfn path, store the first error in the container so we
  82. * can gracefully fail. Runtime, there's not much we can do other
  83. * than throw a hardware error.
  84. */
  85. if (!bcontainer->initialized) {
  86. if (!bcontainer->error) {
  87. error_setg_errno(&bcontainer->error, -ret,
  88. "Memory registering failed");
  89. }
  90. } else {
  91. hw_error("vfio: Memory registering failed, unable to continue");
  92. }
  93. }
  94. }
  95. static void vfio_prereg_listener_region_del(MemoryListener *listener,
  96. MemoryRegionSection *section)
  97. {
  98. VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer,
  99. prereg_listener);
  100. VFIOContainer *container = &scontainer->container;
  101. const hwaddr gpa = section->offset_within_address_space;
  102. hwaddr end;
  103. int ret;
  104. hwaddr page_mask = qemu_real_host_page_mask();
  105. struct vfio_iommu_spapr_register_memory reg = {
  106. .argsz = sizeof(reg),
  107. .flags = 0,
  108. };
  109. if (vfio_prereg_listener_skipped_section(section)) {
  110. trace_vfio_prereg_listener_region_del_skip(
  111. section->offset_within_address_space,
  112. section->offset_within_address_space +
  113. int128_get64(int128_sub(section->size, int128_one())));
  114. return;
  115. }
  116. if (unlikely((section->offset_within_address_space & ~page_mask) ||
  117. (section->offset_within_region & ~page_mask) ||
  118. (int128_get64(section->size) & ~page_mask))) {
  119. error_report("%s received unaligned region", __func__);
  120. return;
  121. }
  122. end = section->offset_within_address_space + int128_get64(section->size);
  123. if (gpa >= end) {
  124. return;
  125. }
  126. reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
  127. reg.size = end - gpa;
  128. ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);
  129. trace_vfio_prereg_unregister(reg.vaddr, reg.size, ret ? -errno : 0);
  130. }
  131. static const MemoryListener vfio_prereg_listener = {
  132. .name = "vfio-pre-reg",
  133. .region_add = vfio_prereg_listener_region_add,
  134. .region_del = vfio_prereg_listener_region_del,
  135. };
  136. static void vfio_host_win_add(VFIOSpaprContainer *scontainer, hwaddr min_iova,
  137. hwaddr max_iova, uint64_t iova_pgsizes)
  138. {
  139. VFIOHostDMAWindow *hostwin;
  140. QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) {
  141. if (ranges_overlap(hostwin->min_iova,
  142. hostwin->max_iova - hostwin->min_iova + 1,
  143. min_iova,
  144. max_iova - min_iova + 1)) {
  145. hw_error("%s: Overlapped IOMMU are not enabled", __func__);
  146. }
  147. }
  148. hostwin = g_malloc0(sizeof(*hostwin));
  149. hostwin->min_iova = min_iova;
  150. hostwin->max_iova = max_iova;
  151. hostwin->iova_pgsizes = iova_pgsizes;
  152. QLIST_INSERT_HEAD(&scontainer->hostwin_list, hostwin, hostwin_next);
  153. }
  154. static int vfio_host_win_del(VFIOSpaprContainer *scontainer,
  155. hwaddr min_iova, hwaddr max_iova)
  156. {
  157. VFIOHostDMAWindow *hostwin;
  158. QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) {
  159. if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
  160. QLIST_REMOVE(hostwin, hostwin_next);
  161. g_free(hostwin);
  162. return 0;
  163. }
  164. }
  165. return -1;
  166. }
  167. static VFIOHostDMAWindow *vfio_find_hostwin(VFIOSpaprContainer *container,
  168. hwaddr iova, hwaddr end)
  169. {
  170. VFIOHostDMAWindow *hostwin;
  171. bool hostwin_found = false;
  172. QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
  173. if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
  174. hostwin_found = true;
  175. break;
  176. }
  177. }
  178. return hostwin_found ? hostwin : NULL;
  179. }
  180. static int vfio_spapr_remove_window(VFIOContainer *container,
  181. hwaddr offset_within_address_space)
  182. {
  183. struct vfio_iommu_spapr_tce_remove remove = {
  184. .argsz = sizeof(remove),
  185. .start_addr = offset_within_address_space,
  186. };
  187. int ret;
  188. ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
  189. if (ret) {
  190. error_report("Failed to remove window at %"PRIx64,
  191. (uint64_t)remove.start_addr);
  192. return -errno;
  193. }
  194. trace_vfio_spapr_remove_window(offset_within_address_space);
  195. return 0;
  196. }
  197. static int vfio_spapr_create_window(VFIOContainer *container,
  198. MemoryRegionSection *section,
  199. hwaddr *pgsize)
  200. {
  201. int ret = 0;
  202. VFIOContainerBase *bcontainer = &container->bcontainer;
  203. IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
  204. uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask;
  205. unsigned entries, bits_total, bits_per_level, max_levels;
  206. struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
  207. long rampagesize = qemu_minrampagesize();
  208. /*
  209. * The host might not support the guest supported IOMMU page size,
  210. * so we will use smaller physical IOMMU pages to back them.
  211. */
  212. if (pagesize > rampagesize) {
  213. pagesize = rampagesize;
  214. }
  215. pgmask = bcontainer->pgsizes & (pagesize | (pagesize - 1));
  216. pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0;
  217. if (!pagesize) {
  218. error_report("Host doesn't support page size 0x%"PRIx64
  219. ", the supported mask is 0x%lx",
  220. memory_region_iommu_get_min_page_size(iommu_mr),
  221. bcontainer->pgsizes);
  222. return -EINVAL;
  223. }
  224. /*
  225. * FIXME: For VFIO iommu types which have KVM acceleration to
  226. * avoid bouncing all map/unmaps through qemu this way, this
  227. * would be the right place to wire that up (tell the KVM
  228. * device emulation the VFIO iommu handles to use).
  229. */
  230. create.window_size = int128_get64(section->size);
  231. create.page_shift = ctz64(pagesize);
  232. /*
  233. * SPAPR host supports multilevel TCE tables. We try to guess optimal
  234. * levels number and if this fails (for example due to the host memory
  235. * fragmentation), we increase levels. The DMA address structure is:
  236. * rrrrrrrr rxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx iiiiiiii
  237. * where:
  238. * r = reserved (bits >= 55 are reserved in the existing hardware)
  239. * i = IOMMU page offset (64K in this example)
  240. * x = bits to index a TCE which can be split to equal chunks to index
  241. * within the level.
  242. * The aim is to split "x" to smaller possible number of levels.
  243. */
  244. entries = create.window_size >> create.page_shift;
  245. /* bits_total is number of "x" needed */
  246. bits_total = ctz64(entries * sizeof(uint64_t));
  247. /*
  248. * bits_per_level is a safe guess of how much we can allocate per level:
  249. * 8 is the current minimum for CONFIG_FORCE_MAX_ZONEORDER and MAX_ORDER
  250. * is usually bigger than that.
  251. * Below we look at qemu_real_host_page_size as TCEs are allocated from
  252. * system pages.
  253. */
  254. bits_per_level = ctz64(qemu_real_host_page_size()) + 8;
  255. create.levels = bits_total / bits_per_level;
  256. if (bits_total % bits_per_level) {
  257. ++create.levels;
  258. }
  259. max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size());
  260. for ( ; create.levels <= max_levels; ++create.levels) {
  261. ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
  262. if (!ret) {
  263. break;
  264. }
  265. }
  266. if (ret) {
  267. error_report("Failed to create a window, ret = %d (%m)", ret);
  268. return -errno;
  269. }
  270. if (create.start_addr != section->offset_within_address_space) {
  271. vfio_spapr_remove_window(container, create.start_addr);
  272. error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64,
  273. section->offset_within_address_space,
  274. (uint64_t)create.start_addr);
  275. return -EINVAL;
  276. }
  277. trace_vfio_spapr_create_window(create.page_shift,
  278. create.levels,
  279. create.window_size,
  280. create.start_addr);
  281. *pgsize = pagesize;
  282. return 0;
  283. }
  284. static bool
  285. vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer,
  286. MemoryRegionSection *section,
  287. Error **errp)
  288. {
  289. VFIOContainer *container = container_of(bcontainer, VFIOContainer,
  290. bcontainer);
  291. VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
  292. container);
  293. VFIOHostDMAWindow *hostwin;
  294. hwaddr pgsize = 0;
  295. int ret;
  296. /*
  297. * VFIO_SPAPR_TCE_IOMMU supports a single host window between
  298. * [dma32_window_start, dma32_window_size), we need to ensure
  299. * the section fall in this range.
  300. */
  301. if (container->iommu_type == VFIO_SPAPR_TCE_IOMMU) {
  302. hwaddr iova, end;
  303. iova = section->offset_within_address_space;
  304. end = iova + int128_get64(section->size) - 1;
  305. if (!vfio_find_hostwin(scontainer, iova, end)) {
  306. error_setg(errp, "Container %p can't map guest IOVA region"
  307. " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container,
  308. iova, end);
  309. return false;
  310. }
  311. return true;
  312. }
  313. if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) {
  314. return true;
  315. }
  316. /* For now intersections are not allowed, we may relax this later */
  317. QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) {
  318. if (ranges_overlap(hostwin->min_iova,
  319. hostwin->max_iova - hostwin->min_iova + 1,
  320. section->offset_within_address_space,
  321. int128_get64(section->size))) {
  322. error_setg(errp,
  323. "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing"
  324. "host DMA window [0x%"PRIx64",0x%"PRIx64"]",
  325. section->offset_within_address_space,
  326. section->offset_within_address_space +
  327. int128_get64(section->size) - 1,
  328. hostwin->min_iova, hostwin->max_iova);
  329. return false;
  330. }
  331. }
  332. ret = vfio_spapr_create_window(container, section, &pgsize);
  333. if (ret) {
  334. error_setg_errno(errp, -ret, "Failed to create SPAPR window");
  335. return false;
  336. }
  337. vfio_host_win_add(scontainer, section->offset_within_address_space,
  338. section->offset_within_address_space +
  339. int128_get64(section->size) - 1, pgsize);
  340. #ifdef CONFIG_KVM
  341. if (kvm_enabled()) {
  342. VFIOGroup *group;
  343. IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
  344. struct kvm_vfio_spapr_tce param;
  345. struct kvm_device_attr attr = {
  346. .group = KVM_DEV_VFIO_GROUP,
  347. .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
  348. .addr = (uint64_t)(unsigned long)&param,
  349. };
  350. if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
  351. &param.tablefd)) {
  352. QLIST_FOREACH(group, &container->group_list, container_next) {
  353. param.groupfd = group->fd;
  354. if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
  355. error_setg_errno(errp, errno,
  356. "vfio: failed GROUP_SET_SPAPR_TCE for "
  357. "KVM VFIO device %d and group fd %d",
  358. param.tablefd, param.groupfd);
  359. return false;
  360. }
  361. trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
  362. }
  363. }
  364. }
  365. #endif
  366. return true;
  367. }
  368. static void
  369. vfio_spapr_container_del_section_window(VFIOContainerBase *bcontainer,
  370. MemoryRegionSection *section)
  371. {
  372. VFIOContainer *container = container_of(bcontainer, VFIOContainer,
  373. bcontainer);
  374. VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
  375. container);
  376. if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) {
  377. return;
  378. }
  379. vfio_spapr_remove_window(container,
  380. section->offset_within_address_space);
  381. if (vfio_host_win_del(scontainer,
  382. section->offset_within_address_space,
  383. section->offset_within_address_space +
  384. int128_get64(section->size) - 1) < 0) {
  385. hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
  386. __func__, section->offset_within_address_space);
  387. }
  388. }
  389. static void vfio_spapr_container_release(VFIOContainerBase *bcontainer)
  390. {
  391. VFIOContainer *container = container_of(bcontainer, VFIOContainer,
  392. bcontainer);
  393. VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
  394. container);
  395. VFIOHostDMAWindow *hostwin, *next;
  396. if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
  397. memory_listener_unregister(&scontainer->prereg_listener);
  398. }
  399. QLIST_FOREACH_SAFE(hostwin, &scontainer->hostwin_list, hostwin_next,
  400. next) {
  401. QLIST_REMOVE(hostwin, hostwin_next);
  402. g_free(hostwin);
  403. }
  404. }
  405. static bool vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
  406. Error **errp)
  407. {
  408. VFIOContainer *container = container_of(bcontainer, VFIOContainer,
  409. bcontainer);
  410. VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
  411. container);
  412. struct vfio_iommu_spapr_tce_info info;
  413. bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
  414. int ret, fd = container->fd;
  415. QLIST_INIT(&scontainer->hostwin_list);
  416. /*
  417. * The host kernel code implementing VFIO_IOMMU_DISABLE is called
  418. * when container fd is closed so we do not call it explicitly
  419. * in this file.
  420. */
  421. if (!v2) {
  422. ret = ioctl(fd, VFIO_IOMMU_ENABLE);
  423. if (ret) {
  424. error_setg_errno(errp, errno, "failed to enable container");
  425. return false;
  426. }
  427. } else {
  428. scontainer->prereg_listener = vfio_prereg_listener;
  429. memory_listener_register(&scontainer->prereg_listener,
  430. &address_space_memory);
  431. if (bcontainer->error) {
  432. error_propagate_prepend(errp, bcontainer->error,
  433. "RAM memory listener initialization failed: ");
  434. goto listener_unregister_exit;
  435. }
  436. }
  437. info.argsz = sizeof(info);
  438. ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
  439. if (ret) {
  440. error_setg_errno(errp, errno,
  441. "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
  442. goto listener_unregister_exit;
  443. }
  444. if (v2) {
  445. bcontainer->pgsizes = info.ddw.pgsizes;
  446. /*
  447. * There is a default window in just created container.
  448. * To make region_add/del simpler, we better remove this
  449. * window now and let those iommu_listener callbacks
  450. * create/remove them when needed.
  451. */
  452. ret = vfio_spapr_remove_window(container, info.dma32_window_start);
  453. if (ret) {
  454. error_setg_errno(errp, -ret,
  455. "failed to remove existing window");
  456. goto listener_unregister_exit;
  457. }
  458. } else {
  459. /* The default table uses 4K pages */
  460. bcontainer->pgsizes = 0x1000;
  461. vfio_host_win_add(scontainer, info.dma32_window_start,
  462. info.dma32_window_start +
  463. info.dma32_window_size - 1,
  464. 0x1000);
  465. }
  466. return true;
  467. listener_unregister_exit:
  468. if (v2) {
  469. memory_listener_unregister(&scontainer->prereg_listener);
  470. }
  471. return false;
  472. }
  473. static void vfio_iommu_spapr_class_init(ObjectClass *klass, void *data)
  474. {
  475. VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
  476. vioc->add_window = vfio_spapr_container_add_section_window;
  477. vioc->del_window = vfio_spapr_container_del_section_window;
  478. vioc->release = vfio_spapr_container_release;
  479. vioc->setup = vfio_spapr_container_setup;
  480. };
  481. static const TypeInfo types[] = {
  482. {
  483. .name = TYPE_VFIO_IOMMU_SPAPR,
  484. .parent = TYPE_VFIO_IOMMU_LEGACY,
  485. .instance_size = sizeof(VFIOSpaprContainer),
  486. .class_init = vfio_iommu_spapr_class_init,
  487. },
  488. };
  489. DEFINE_TYPES(types)