2
0

smmu-common.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /*
  2. * Copyright (C) 2014-2016 Broadcom Corporation
  3. * Copyright (c) 2017 Red Hat, Inc.
  4. * Written by Prem Mallappa, Eric Auger
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * Author: Prem Mallappa <pmallapp@broadcom.com>
  16. *
  17. */
  18. #include "qemu/osdep.h"
  19. #include "exec/address-spaces.h"
  20. #include "trace.h"
  21. #include "exec/target_page.h"
  22. #include "hw/core/cpu.h"
  23. #include "hw/qdev-properties.h"
  24. #include "qapi/error.h"
  25. #include "qemu/jhash.h"
  26. #include "qemu/module.h"
  27. #include "qemu/error-report.h"
  28. #include "hw/arm/smmu-common.h"
  29. #include "smmu-internal.h"
  30. /* IOTLB Management */
  31. inline void smmu_iotlb_inv_all(SMMUState *s)
  32. {
  33. trace_smmu_iotlb_inv_all();
  34. g_hash_table_remove_all(s->iotlb);
  35. }
  36. static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value,
  37. gpointer user_data)
  38. {
  39. uint16_t asid = *(uint16_t *)user_data;
  40. SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
  41. return iotlb_key->asid == asid;
  42. }
  43. inline void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova)
  44. {
  45. SMMUIOTLBKey key = {.asid = asid, .iova = iova};
  46. trace_smmu_iotlb_inv_iova(asid, iova);
  47. g_hash_table_remove(s->iotlb, &key);
  48. }
  49. inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
  50. {
  51. trace_smmu_iotlb_inv_asid(asid);
  52. g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid);
  53. }
  54. /* VMSAv8-64 Translation */
  55. /**
  56. * get_pte - Get the content of a page table entry located at
  57. * @base_addr[@index]
  58. */
  59. static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
  60. SMMUPTWEventInfo *info)
  61. {
  62. int ret;
  63. dma_addr_t addr = baseaddr + index * sizeof(*pte);
  64. /* TODO: guarantee 64-bit single-copy atomicity */
  65. ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte));
  66. if (ret != MEMTX_OK) {
  67. info->type = SMMU_PTW_ERR_WALK_EABT;
  68. info->addr = addr;
  69. return -EINVAL;
  70. }
  71. trace_smmu_get_pte(baseaddr, index, addr, *pte);
  72. return 0;
  73. }
  74. /* VMSAv8-64 Translation Table Format Descriptor Decoding */
  75. /**
  76. * get_page_pte_address - returns the L3 descriptor output address,
  77. * ie. the page frame
  78. * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format
  79. */
  80. static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz)
  81. {
  82. return PTE_ADDRESS(pte, granule_sz);
  83. }
  84. /**
  85. * get_table_pte_address - return table descriptor output address,
  86. * ie. address of next level table
  87. * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
  88. */
  89. static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz)
  90. {
  91. return PTE_ADDRESS(pte, granule_sz);
  92. }
  93. /**
  94. * get_block_pte_address - return block descriptor output address and block size
  95. * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
  96. */
  97. static inline hwaddr get_block_pte_address(uint64_t pte, int level,
  98. int granule_sz, uint64_t *bsz)
  99. {
  100. int n = level_shift(level, granule_sz);
  101. *bsz = 1ULL << n;
  102. return PTE_ADDRESS(pte, n);
  103. }
  104. SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
  105. {
  106. bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi);
  107. uint8_t tbi_byte = tbi * 8;
  108. if (cfg->tt[0].tsz &&
  109. !extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) {
  110. /* there is a ttbr0 region and we are in it (high bits all zero) */
  111. return &cfg->tt[0];
  112. } else if (cfg->tt[1].tsz &&
  113. !extract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte)) {
  114. /* there is a ttbr1 region and we are in it (high bits all one) */
  115. return &cfg->tt[1];
  116. } else if (!cfg->tt[0].tsz) {
  117. /* ttbr0 region is "everything not in the ttbr1 region" */
  118. return &cfg->tt[0];
  119. } else if (!cfg->tt[1].tsz) {
  120. /* ttbr1 region is "everything not in the ttbr0 region" */
  121. return &cfg->tt[1];
  122. }
  123. /* in the gap between the two regions, this is a Translation fault */
  124. return NULL;
  125. }
  126. /**
  127. * smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA
  128. * @cfg: translation config
  129. * @iova: iova to translate
  130. * @perm: access type
  131. * @tlbe: IOMMUTLBEntry (out)
  132. * @info: handle to an error info
  133. *
  134. * Return 0 on success, < 0 on error. In case of error, @info is filled
  135. * and tlbe->perm is set to IOMMU_NONE.
  136. * Upon success, @tlbe is filled with translated_addr and entry
  137. * permission rights.
  138. */
  139. static int smmu_ptw_64(SMMUTransCfg *cfg,
  140. dma_addr_t iova, IOMMUAccessFlags perm,
  141. IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
  142. {
  143. dma_addr_t baseaddr, indexmask;
  144. int stage = cfg->stage;
  145. SMMUTransTableInfo *tt = select_tt(cfg, iova);
  146. uint8_t level, granule_sz, inputsize, stride;
  147. if (!tt || tt->disabled) {
  148. info->type = SMMU_PTW_ERR_TRANSLATION;
  149. goto error;
  150. }
  151. granule_sz = tt->granule_sz;
  152. stride = granule_sz - 3;
  153. inputsize = 64 - tt->tsz;
  154. level = 4 - (inputsize - 4) / stride;
  155. indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
  156. baseaddr = extract64(tt->ttb, 0, 48);
  157. baseaddr &= ~indexmask;
  158. tlbe->iova = iova;
  159. tlbe->addr_mask = (1 << granule_sz) - 1;
  160. while (level <= 3) {
  161. uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
  162. uint64_t mask = subpage_size - 1;
  163. uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
  164. uint64_t pte;
  165. dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
  166. uint8_t ap;
  167. if (get_pte(baseaddr, offset, &pte, info)) {
  168. goto error;
  169. }
  170. trace_smmu_ptw_level(level, iova, subpage_size,
  171. baseaddr, offset, pte);
  172. if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
  173. trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
  174. pte_addr, offset, pte);
  175. info->type = SMMU_PTW_ERR_TRANSLATION;
  176. goto error;
  177. }
  178. if (is_page_pte(pte, level)) {
  179. uint64_t gpa = get_page_pte_address(pte, granule_sz);
  180. ap = PTE_AP(pte);
  181. if (is_permission_fault(ap, perm)) {
  182. info->type = SMMU_PTW_ERR_PERMISSION;
  183. goto error;
  184. }
  185. tlbe->translated_addr = gpa + (iova & mask);
  186. tlbe->perm = PTE_AP_TO_PERM(ap);
  187. trace_smmu_ptw_page_pte(stage, level, iova,
  188. baseaddr, pte_addr, pte, gpa);
  189. return 0;
  190. }
  191. if (is_block_pte(pte, level)) {
  192. uint64_t block_size;
  193. hwaddr gpa = get_block_pte_address(pte, level, granule_sz,
  194. &block_size);
  195. ap = PTE_AP(pte);
  196. if (is_permission_fault(ap, perm)) {
  197. info->type = SMMU_PTW_ERR_PERMISSION;
  198. goto error;
  199. }
  200. trace_smmu_ptw_block_pte(stage, level, baseaddr,
  201. pte_addr, pte, iova, gpa,
  202. block_size >> 20);
  203. tlbe->translated_addr = gpa + (iova & mask);
  204. tlbe->perm = PTE_AP_TO_PERM(ap);
  205. return 0;
  206. }
  207. /* table pte */
  208. ap = PTE_APTABLE(pte);
  209. if (is_permission_fault(ap, perm)) {
  210. info->type = SMMU_PTW_ERR_PERMISSION;
  211. goto error;
  212. }
  213. baseaddr = get_table_pte_address(pte, granule_sz);
  214. level++;
  215. }
  216. info->type = SMMU_PTW_ERR_TRANSLATION;
  217. error:
  218. tlbe->perm = IOMMU_NONE;
  219. return -EINVAL;
  220. }
  221. /**
  222. * smmu_ptw - Walk the page tables for an IOVA, according to @cfg
  223. *
  224. * @cfg: translation configuration
  225. * @iova: iova to translate
  226. * @perm: tentative access type
  227. * @tlbe: returned entry
  228. * @info: ptw event handle
  229. *
  230. * return 0 on success
  231. */
  232. inline int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
  233. IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
  234. {
  235. if (!cfg->aa64) {
  236. /*
  237. * This code path is not entered as we check this while decoding
  238. * the configuration data in the derived SMMU model.
  239. */
  240. g_assert_not_reached();
  241. }
  242. return smmu_ptw_64(cfg, iova, perm, tlbe, info);
  243. }
  244. /**
  245. * The bus number is used for lookup when SID based invalidation occurs.
  246. * In that case we lazily populate the SMMUPciBus array from the bus hash
  247. * table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus
  248. * numbers may not be always initialized yet.
  249. */
  250. SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num)
  251. {
  252. SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num];
  253. GHashTableIter iter;
  254. if (smmu_pci_bus) {
  255. return smmu_pci_bus;
  256. }
  257. g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr);
  258. while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) {
  259. if (pci_bus_num(smmu_pci_bus->bus) == bus_num) {
  260. s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus;
  261. return smmu_pci_bus;
  262. }
  263. }
  264. return NULL;
  265. }
  266. static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn)
  267. {
  268. SMMUState *s = opaque;
  269. SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus);
  270. SMMUDevice *sdev;
  271. static unsigned int index;
  272. if (!sbus) {
  273. sbus = g_malloc0(sizeof(SMMUPciBus) +
  274. sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX);
  275. sbus->bus = bus;
  276. g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus);
  277. }
  278. sdev = sbus->pbdev[devfn];
  279. if (!sdev) {
  280. char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++);
  281. sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1);
  282. sdev->smmu = s;
  283. sdev->bus = bus;
  284. sdev->devfn = devfn;
  285. memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu),
  286. s->mrtypename,
  287. OBJECT(s), name, 1ULL << SMMU_MAX_VA_BITS);
  288. address_space_init(&sdev->as,
  289. MEMORY_REGION(&sdev->iommu), name);
  290. trace_smmu_add_mr(name);
  291. g_free(name);
  292. }
  293. return &sdev->as;
  294. }
  295. IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid)
  296. {
  297. uint8_t bus_n, devfn;
  298. SMMUPciBus *smmu_bus;
  299. SMMUDevice *smmu;
  300. bus_n = PCI_BUS_NUM(sid);
  301. smmu_bus = smmu_find_smmu_pcibus(s, bus_n);
  302. if (smmu_bus) {
  303. devfn = SMMU_PCI_DEVFN(sid);
  304. smmu = smmu_bus->pbdev[devfn];
  305. if (smmu) {
  306. return &smmu->iommu;
  307. }
  308. }
  309. return NULL;
  310. }
  311. static guint smmu_iotlb_key_hash(gconstpointer v)
  312. {
  313. SMMUIOTLBKey *key = (SMMUIOTLBKey *)v;
  314. uint32_t a, b, c;
  315. /* Jenkins hash */
  316. a = b = c = JHASH_INITVAL + sizeof(*key);
  317. a += key->asid;
  318. b += extract64(key->iova, 0, 32);
  319. c += extract64(key->iova, 32, 32);
  320. __jhash_mix(a, b, c);
  321. __jhash_final(a, b, c);
  322. return c;
  323. }
  324. static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
  325. {
  326. const SMMUIOTLBKey *k1 = v1;
  327. const SMMUIOTLBKey *k2 = v2;
  328. return (k1->asid == k2->asid) && (k1->iova == k2->iova);
  329. }
  330. /* Unmap the whole notifier's range */
  331. static void smmu_unmap_notifier_range(IOMMUNotifier *n)
  332. {
  333. IOMMUTLBEntry entry;
  334. entry.target_as = &address_space_memory;
  335. entry.iova = n->start;
  336. entry.perm = IOMMU_NONE;
  337. entry.addr_mask = n->end - n->start;
  338. memory_region_notify_one(n, &entry);
  339. }
  340. /* Unmap all notifiers attached to @mr */
  341. inline void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr)
  342. {
  343. IOMMUNotifier *n;
  344. trace_smmu_inv_notifiers_mr(mr->parent_obj.name);
  345. IOMMU_NOTIFIER_FOREACH(n, mr) {
  346. smmu_unmap_notifier_range(n);
  347. }
  348. }
  349. /* Unmap all notifiers of all mr's */
  350. void smmu_inv_notifiers_all(SMMUState *s)
  351. {
  352. SMMUDevice *sdev;
  353. QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
  354. smmu_inv_notifiers_mr(&sdev->iommu);
  355. }
  356. }
  357. static void smmu_base_realize(DeviceState *dev, Error **errp)
  358. {
  359. SMMUState *s = ARM_SMMU(dev);
  360. SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev);
  361. Error *local_err = NULL;
  362. sbc->parent_realize(dev, &local_err);
  363. if (local_err) {
  364. error_propagate(errp, local_err);
  365. return;
  366. }
  367. s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free);
  368. s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal,
  369. g_free, g_free);
  370. s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL);
  371. if (s->primary_bus) {
  372. pci_setup_iommu(s->primary_bus, smmu_find_add_as, s);
  373. } else {
  374. error_setg(errp, "SMMU is not attached to any PCI bus!");
  375. }
  376. }
  377. static void smmu_base_reset(DeviceState *dev)
  378. {
  379. SMMUState *s = ARM_SMMU(dev);
  380. g_hash_table_remove_all(s->configs);
  381. g_hash_table_remove_all(s->iotlb);
  382. }
  383. static Property smmu_dev_properties[] = {
  384. DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0),
  385. DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus, "PCI", PCIBus *),
  386. DEFINE_PROP_END_OF_LIST(),
  387. };
  388. static void smmu_base_class_init(ObjectClass *klass, void *data)
  389. {
  390. DeviceClass *dc = DEVICE_CLASS(klass);
  391. SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass);
  392. device_class_set_props(dc, smmu_dev_properties);
  393. device_class_set_parent_realize(dc, smmu_base_realize,
  394. &sbc->parent_realize);
  395. dc->reset = smmu_base_reset;
  396. }
  397. static const TypeInfo smmu_base_info = {
  398. .name = TYPE_ARM_SMMU,
  399. .parent = TYPE_SYS_BUS_DEVICE,
  400. .instance_size = sizeof(SMMUState),
  401. .class_data = NULL,
  402. .class_size = sizeof(SMMUBaseClass),
  403. .class_init = smmu_base_class_init,
  404. .abstract = true,
  405. };
  406. static void smmu_base_register_types(void)
  407. {
  408. type_register_static(&smmu_base_info);
  409. }
  410. type_init(smmu_base_register_types)