smmu-common.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. /*
  2. * Copyright (C) 2014-2016 Broadcom Corporation
  3. * Copyright (c) 2017 Red Hat, Inc.
  4. * Written by Prem Mallappa, Eric Auger
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * Author: Prem Mallappa <pmallapp@broadcom.com>
  16. *
  17. */
  18. #include "qemu/osdep.h"
  19. #include "trace.h"
  20. #include "exec/target_page.h"
  21. #include "hw/core/cpu.h"
  22. #include "hw/qdev-properties.h"
  23. #include "qapi/error.h"
  24. #include "qemu/jhash.h"
  25. #include "qemu/module.h"
  26. #include "qemu/error-report.h"
  27. #include "hw/arm/smmu-common.h"
  28. #include "smmu-internal.h"
  29. /* IOTLB Management */
  30. static guint smmu_iotlb_key_hash(gconstpointer v)
  31. {
  32. SMMUIOTLBKey *key = (SMMUIOTLBKey *)v;
  33. uint32_t a, b, c;
  34. /* Jenkins hash */
  35. a = b = c = JHASH_INITVAL + sizeof(*key);
  36. a += key->asid + key->vmid + key->level + key->tg;
  37. b += extract64(key->iova, 0, 32);
  38. c += extract64(key->iova, 32, 32);
  39. __jhash_mix(a, b, c);
  40. __jhash_final(a, b, c);
  41. return c;
  42. }
  43. static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
  44. {
  45. SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2;
  46. return (k1->asid == k2->asid) && (k1->iova == k2->iova) &&
  47. (k1->level == k2->level) && (k1->tg == k2->tg) &&
  48. (k1->vmid == k2->vmid);
  49. }
  50. SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova,
  51. uint8_t tg, uint8_t level)
  52. {
  53. SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova,
  54. .tg = tg, .level = level};
  55. return key;
  56. }
  57. SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
  58. SMMUTransTableInfo *tt, hwaddr iova)
  59. {
  60. uint8_t tg = (tt->granule_sz - 10) / 2;
  61. uint8_t inputsize = 64 - tt->tsz;
  62. uint8_t stride = tt->granule_sz - 3;
  63. uint8_t level = 4 - (inputsize - 4) / stride;
  64. SMMUTLBEntry *entry = NULL;
  65. while (level <= 3) {
  66. uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz);
  67. uint64_t mask = subpage_size - 1;
  68. SMMUIOTLBKey key;
  69. key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid,
  70. iova & ~mask, tg, level);
  71. entry = g_hash_table_lookup(bs->iotlb, &key);
  72. if (entry) {
  73. break;
  74. }
  75. level++;
  76. }
  77. if (entry) {
  78. cfg->iotlb_hits++;
  79. trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova,
  80. cfg->iotlb_hits, cfg->iotlb_misses,
  81. 100 * cfg->iotlb_hits /
  82. (cfg->iotlb_hits + cfg->iotlb_misses));
  83. } else {
  84. cfg->iotlb_misses++;
  85. trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova,
  86. cfg->iotlb_hits, cfg->iotlb_misses,
  87. 100 * cfg->iotlb_hits /
  88. (cfg->iotlb_hits + cfg->iotlb_misses));
  89. }
  90. return entry;
  91. }
  92. void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
  93. {
  94. SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1);
  95. uint8_t tg = (new->granule - 10) / 2;
  96. if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
  97. smmu_iotlb_inv_all(bs);
  98. }
  99. *key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
  100. tg, new->level);
  101. trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
  102. tg, new->level);
  103. g_hash_table_insert(bs->iotlb, key, new);
  104. }
  105. void smmu_iotlb_inv_all(SMMUState *s)
  106. {
  107. trace_smmu_iotlb_inv_all();
  108. g_hash_table_remove_all(s->iotlb);
  109. }
  110. static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value,
  111. gpointer user_data)
  112. {
  113. uint16_t asid = *(uint16_t *)user_data;
  114. SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
  115. return SMMU_IOTLB_ASID(*iotlb_key) == asid;
  116. }
  117. static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value,
  118. gpointer user_data)
  119. {
  120. uint16_t vmid = *(uint16_t *)user_data;
  121. SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
  122. return SMMU_IOTLB_VMID(*iotlb_key) == vmid;
  123. }
  124. static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value,
  125. gpointer user_data)
  126. {
  127. SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
  128. IOMMUTLBEntry *entry = &iter->entry;
  129. SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
  130. SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key;
  131. if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) {
  132. return false;
  133. }
  134. if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) {
  135. return false;
  136. }
  137. return ((info->iova & ~entry->addr_mask) == entry->iova) ||
  138. ((entry->iova & ~info->mask) == info->iova);
  139. }
  140. void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
  141. uint8_t tg, uint64_t num_pages, uint8_t ttl)
  142. {
  143. /* if tg is not set we use 4KB range invalidation */
  144. uint8_t granule = tg ? tg * 2 + 10 : 12;
  145. if (ttl && (num_pages == 1) && (asid >= 0)) {
  146. SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl);
  147. if (g_hash_table_remove(s->iotlb, &key)) {
  148. return;
  149. }
  150. /*
  151. * if the entry is not found, let's see if it does not
  152. * belong to a larger IOTLB entry
  153. */
  154. }
  155. SMMUIOTLBPageInvInfo info = {
  156. .asid = asid, .iova = iova,
  157. .vmid = vmid,
  158. .mask = (num_pages * 1 << granule) - 1};
  159. g_hash_table_foreach_remove(s->iotlb,
  160. smmu_hash_remove_by_asid_vmid_iova,
  161. &info);
  162. }
  163. void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
  164. {
  165. trace_smmu_iotlb_inv_asid(asid);
  166. g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid);
  167. }
  168. void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid)
  169. {
  170. trace_smmu_iotlb_inv_vmid(vmid);
  171. g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid);
  172. }
  173. /* VMSAv8-64 Translation */
  174. /**
  175. * get_pte - Get the content of a page table entry located at
  176. * @base_addr[@index]
  177. */
  178. static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
  179. SMMUPTWEventInfo *info)
  180. {
  181. int ret;
  182. dma_addr_t addr = baseaddr + index * sizeof(*pte);
  183. /* TODO: guarantee 64-bit single-copy atomicity */
  184. ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED);
  185. if (ret != MEMTX_OK) {
  186. info->type = SMMU_PTW_ERR_WALK_EABT;
  187. info->addr = addr;
  188. return -EINVAL;
  189. }
  190. trace_smmu_get_pte(baseaddr, index, addr, *pte);
  191. return 0;
  192. }
  193. /* VMSAv8-64 Translation Table Format Descriptor Decoding */
  194. /**
  195. * get_page_pte_address - returns the L3 descriptor output address,
  196. * ie. the page frame
  197. * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format
  198. */
  199. static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz)
  200. {
  201. return PTE_ADDRESS(pte, granule_sz);
  202. }
  203. /**
  204. * get_table_pte_address - return table descriptor output address,
  205. * ie. address of next level table
  206. * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
  207. */
  208. static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz)
  209. {
  210. return PTE_ADDRESS(pte, granule_sz);
  211. }
  212. /**
  213. * get_block_pte_address - return block descriptor output address and block size
  214. * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
  215. */
  216. static inline hwaddr get_block_pte_address(uint64_t pte, int level,
  217. int granule_sz, uint64_t *bsz)
  218. {
  219. int n = level_shift(level, granule_sz);
  220. *bsz = 1ULL << n;
  221. return PTE_ADDRESS(pte, n);
  222. }
  223. SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
  224. {
  225. bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi);
  226. uint8_t tbi_byte = tbi * 8;
  227. if (cfg->tt[0].tsz &&
  228. !extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) {
  229. /* there is a ttbr0 region and we are in it (high bits all zero) */
  230. return &cfg->tt[0];
  231. } else if (cfg->tt[1].tsz &&
  232. sextract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte) == -1) {
  233. /* there is a ttbr1 region and we are in it (high bits all one) */
  234. return &cfg->tt[1];
  235. } else if (!cfg->tt[0].tsz) {
  236. /* ttbr0 region is "everything not in the ttbr1 region" */
  237. return &cfg->tt[0];
  238. } else if (!cfg->tt[1].tsz) {
  239. /* ttbr1 region is "everything not in the ttbr0 region" */
  240. return &cfg->tt[1];
  241. }
  242. /* in the gap between the two regions, this is a Translation fault */
  243. return NULL;
  244. }
  245. /**
  246. * smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA
  247. * @cfg: translation config
  248. * @iova: iova to translate
  249. * @perm: access type
  250. * @tlbe: SMMUTLBEntry (out)
  251. * @info: handle to an error info
  252. *
  253. * Return 0 on success, < 0 on error. In case of error, @info is filled
  254. * and tlbe->perm is set to IOMMU_NONE.
  255. * Upon success, @tlbe is filled with translated_addr and entry
  256. * permission rights.
  257. */
  258. static int smmu_ptw_64_s1(SMMUTransCfg *cfg,
  259. dma_addr_t iova, IOMMUAccessFlags perm,
  260. SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
  261. {
  262. dma_addr_t baseaddr, indexmask;
  263. int stage = cfg->stage;
  264. SMMUTransTableInfo *tt = select_tt(cfg, iova);
  265. uint8_t level, granule_sz, inputsize, stride;
  266. if (!tt || tt->disabled) {
  267. info->type = SMMU_PTW_ERR_TRANSLATION;
  268. goto error;
  269. }
  270. granule_sz = tt->granule_sz;
  271. stride = VMSA_STRIDE(granule_sz);
  272. inputsize = 64 - tt->tsz;
  273. level = 4 - (inputsize - 4) / stride;
  274. indexmask = VMSA_IDXMSK(inputsize, stride, level);
  275. baseaddr = extract64(tt->ttb, 0, 48);
  276. baseaddr &= ~indexmask;
  277. while (level < VMSA_LEVELS) {
  278. uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
  279. uint64_t mask = subpage_size - 1;
  280. uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
  281. uint64_t pte, gpa;
  282. dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
  283. uint8_t ap;
  284. if (get_pte(baseaddr, offset, &pte, info)) {
  285. goto error;
  286. }
  287. trace_smmu_ptw_level(stage, level, iova, subpage_size,
  288. baseaddr, offset, pte);
  289. if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
  290. trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
  291. pte_addr, offset, pte);
  292. break;
  293. }
  294. if (is_table_pte(pte, level)) {
  295. ap = PTE_APTABLE(pte);
  296. if (is_permission_fault(ap, perm) && !tt->had) {
  297. info->type = SMMU_PTW_ERR_PERMISSION;
  298. goto error;
  299. }
  300. baseaddr = get_table_pte_address(pte, granule_sz);
  301. level++;
  302. continue;
  303. } else if (is_page_pte(pte, level)) {
  304. gpa = get_page_pte_address(pte, granule_sz);
  305. trace_smmu_ptw_page_pte(stage, level, iova,
  306. baseaddr, pte_addr, pte, gpa);
  307. } else {
  308. uint64_t block_size;
  309. gpa = get_block_pte_address(pte, level, granule_sz,
  310. &block_size);
  311. trace_smmu_ptw_block_pte(stage, level, baseaddr,
  312. pte_addr, pte, iova, gpa,
  313. block_size >> 20);
  314. }
  315. /*
  316. * QEMU does not currently implement HTTU, so if AFFD and PTE.AF
  317. * are 0 we take an Access flag fault. (5.4. Context Descriptor)
  318. * An Access flag fault takes priority over a Permission fault.
  319. */
  320. if (!PTE_AF(pte) && !cfg->affd) {
  321. info->type = SMMU_PTW_ERR_ACCESS;
  322. goto error;
  323. }
  324. ap = PTE_AP(pte);
  325. if (is_permission_fault(ap, perm)) {
  326. info->type = SMMU_PTW_ERR_PERMISSION;
  327. goto error;
  328. }
  329. tlbe->entry.translated_addr = gpa;
  330. tlbe->entry.iova = iova & ~mask;
  331. tlbe->entry.addr_mask = mask;
  332. tlbe->entry.perm = PTE_AP_TO_PERM(ap);
  333. tlbe->level = level;
  334. tlbe->granule = granule_sz;
  335. return 0;
  336. }
  337. info->type = SMMU_PTW_ERR_TRANSLATION;
  338. error:
  339. info->stage = 1;
  340. tlbe->entry.perm = IOMMU_NONE;
  341. return -EINVAL;
  342. }
  343. /**
  344. * smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa
  345. * for stage-2.
  346. * @cfg: translation config
  347. * @ipa: ipa to translate
  348. * @perm: access type
  349. * @tlbe: SMMUTLBEntry (out)
  350. * @info: handle to an error info
  351. *
  352. * Return 0 on success, < 0 on error. In case of error, @info is filled
  353. * and tlbe->perm is set to IOMMU_NONE.
  354. * Upon success, @tlbe is filled with translated_addr and entry
  355. * permission rights.
  356. */
  357. static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
  358. dma_addr_t ipa, IOMMUAccessFlags perm,
  359. SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
  360. {
  361. const int stage = 2;
  362. int granule_sz = cfg->s2cfg.granule_sz;
  363. /* ARM DDI0487I.a: Table D8-7. */
  364. int inputsize = 64 - cfg->s2cfg.tsz;
  365. int level = get_start_level(cfg->s2cfg.sl0, granule_sz);
  366. int stride = VMSA_STRIDE(granule_sz);
  367. int idx = pgd_concat_idx(level, granule_sz, ipa);
  368. /*
  369. * Get the ttb from concatenated structure.
  370. * The offset is the idx * size of each ttb(number of ptes * (sizeof(pte))
  371. */
  372. uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, 48) + (1 << stride) *
  373. idx * sizeof(uint64_t);
  374. dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level);
  375. baseaddr &= ~indexmask;
  376. /*
  377. * On input, a stage 2 Translation fault occurs if the IPA is outside the
  378. * range configured by the relevant S2T0SZ field of the STE.
  379. */
  380. if (ipa >= (1ULL << inputsize)) {
  381. info->type = SMMU_PTW_ERR_TRANSLATION;
  382. goto error;
  383. }
  384. while (level < VMSA_LEVELS) {
  385. uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
  386. uint64_t mask = subpage_size - 1;
  387. uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz);
  388. uint64_t pte, gpa;
  389. dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
  390. uint8_t s2ap;
  391. if (get_pte(baseaddr, offset, &pte, info)) {
  392. goto error;
  393. }
  394. trace_smmu_ptw_level(stage, level, ipa, subpage_size,
  395. baseaddr, offset, pte);
  396. if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
  397. trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
  398. pte_addr, offset, pte);
  399. break;
  400. }
  401. if (is_table_pte(pte, level)) {
  402. baseaddr = get_table_pte_address(pte, granule_sz);
  403. level++;
  404. continue;
  405. } else if (is_page_pte(pte, level)) {
  406. gpa = get_page_pte_address(pte, granule_sz);
  407. trace_smmu_ptw_page_pte(stage, level, ipa,
  408. baseaddr, pte_addr, pte, gpa);
  409. } else {
  410. uint64_t block_size;
  411. gpa = get_block_pte_address(pte, level, granule_sz,
  412. &block_size);
  413. trace_smmu_ptw_block_pte(stage, level, baseaddr,
  414. pte_addr, pte, ipa, gpa,
  415. block_size >> 20);
  416. }
  417. /*
  418. * If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry)
  419. * An Access fault takes priority over a Permission fault.
  420. */
  421. if (!PTE_AF(pte) && !cfg->s2cfg.affd) {
  422. info->type = SMMU_PTW_ERR_ACCESS;
  423. goto error;
  424. }
  425. s2ap = PTE_AP(pte);
  426. if (is_permission_fault_s2(s2ap, perm)) {
  427. info->type = SMMU_PTW_ERR_PERMISSION;
  428. goto error;
  429. }
  430. /*
  431. * The address output from the translation causes a stage 2 Address
  432. * Size fault if it exceeds the effective PA output range.
  433. */
  434. if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) {
  435. info->type = SMMU_PTW_ERR_ADDR_SIZE;
  436. goto error;
  437. }
  438. tlbe->entry.translated_addr = gpa;
  439. tlbe->entry.iova = ipa & ~mask;
  440. tlbe->entry.addr_mask = mask;
  441. tlbe->entry.perm = s2ap;
  442. tlbe->level = level;
  443. tlbe->granule = granule_sz;
  444. return 0;
  445. }
  446. info->type = SMMU_PTW_ERR_TRANSLATION;
  447. error:
  448. info->stage = 2;
  449. tlbe->entry.perm = IOMMU_NONE;
  450. return -EINVAL;
  451. }
  452. /**
  453. * smmu_ptw - Walk the page tables for an IOVA, according to @cfg
  454. *
  455. * @cfg: translation configuration
  456. * @iova: iova to translate
  457. * @perm: tentative access type
  458. * @tlbe: returned entry
  459. * @info: ptw event handle
  460. *
  461. * return 0 on success
  462. */
  463. int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
  464. SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
  465. {
  466. if (cfg->stage == 1) {
  467. return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info);
  468. } else if (cfg->stage == 2) {
  469. /*
  470. * If bypassing stage 1(or unimplemented), the input address is passed
  471. * directly to stage 2 as IPA. If the input address of a transaction
  472. * exceeds the size of the IAS, a stage 1 Address Size fault occurs.
  473. * For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes"
  474. */
  475. if (iova >= (1ULL << cfg->oas)) {
  476. info->type = SMMU_PTW_ERR_ADDR_SIZE;
  477. info->stage = 1;
  478. tlbe->entry.perm = IOMMU_NONE;
  479. return -EINVAL;
  480. }
  481. return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info);
  482. }
  483. g_assert_not_reached();
  484. }
  485. /**
  486. * The bus number is used for lookup when SID based invalidation occurs.
  487. * In that case we lazily populate the SMMUPciBus array from the bus hash
  488. * table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus
  489. * numbers may not be always initialized yet.
  490. */
  491. SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num)
  492. {
  493. SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num];
  494. GHashTableIter iter;
  495. if (smmu_pci_bus) {
  496. return smmu_pci_bus;
  497. }
  498. g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr);
  499. while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) {
  500. if (pci_bus_num(smmu_pci_bus->bus) == bus_num) {
  501. s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus;
  502. return smmu_pci_bus;
  503. }
  504. }
  505. return NULL;
  506. }
  507. static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn)
  508. {
  509. SMMUState *s = opaque;
  510. SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus);
  511. SMMUDevice *sdev;
  512. static unsigned int index;
  513. if (!sbus) {
  514. sbus = g_malloc0(sizeof(SMMUPciBus) +
  515. sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX);
  516. sbus->bus = bus;
  517. g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus);
  518. }
  519. sdev = sbus->pbdev[devfn];
  520. if (!sdev) {
  521. char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++);
  522. sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1);
  523. sdev->smmu = s;
  524. sdev->bus = bus;
  525. sdev->devfn = devfn;
  526. memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu),
  527. s->mrtypename,
  528. OBJECT(s), name, UINT64_MAX);
  529. address_space_init(&sdev->as,
  530. MEMORY_REGION(&sdev->iommu), name);
  531. trace_smmu_add_mr(name);
  532. g_free(name);
  533. }
  534. return &sdev->as;
  535. }
  536. static const PCIIOMMUOps smmu_ops = {
  537. .get_address_space = smmu_find_add_as,
  538. };
  539. IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid)
  540. {
  541. uint8_t bus_n, devfn;
  542. SMMUPciBus *smmu_bus;
  543. SMMUDevice *smmu;
  544. bus_n = PCI_BUS_NUM(sid);
  545. smmu_bus = smmu_find_smmu_pcibus(s, bus_n);
  546. if (smmu_bus) {
  547. devfn = SMMU_PCI_DEVFN(sid);
  548. smmu = smmu_bus->pbdev[devfn];
  549. if (smmu) {
  550. return &smmu->iommu;
  551. }
  552. }
  553. return NULL;
  554. }
  555. /* Unmap all notifiers attached to @mr */
  556. static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr)
  557. {
  558. IOMMUNotifier *n;
  559. trace_smmu_inv_notifiers_mr(mr->parent_obj.name);
  560. IOMMU_NOTIFIER_FOREACH(n, mr) {
  561. memory_region_unmap_iommu_notifier_range(n);
  562. }
  563. }
  564. /* Unmap all notifiers of all mr's */
  565. void smmu_inv_notifiers_all(SMMUState *s)
  566. {
  567. SMMUDevice *sdev;
  568. QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
  569. smmu_inv_notifiers_mr(&sdev->iommu);
  570. }
  571. }
  572. static void smmu_base_realize(DeviceState *dev, Error **errp)
  573. {
  574. SMMUState *s = ARM_SMMU(dev);
  575. SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev);
  576. Error *local_err = NULL;
  577. sbc->parent_realize(dev, &local_err);
  578. if (local_err) {
  579. error_propagate(errp, local_err);
  580. return;
  581. }
  582. s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free);
  583. s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal,
  584. g_free, g_free);
  585. s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL);
  586. if (s->primary_bus) {
  587. pci_setup_iommu(s->primary_bus, &smmu_ops, s);
  588. } else {
  589. error_setg(errp, "SMMU is not attached to any PCI bus!");
  590. }
  591. }
  592. static void smmu_base_reset_hold(Object *obj, ResetType type)
  593. {
  594. SMMUState *s = ARM_SMMU(obj);
  595. memset(s->smmu_pcibus_by_bus_num, 0, sizeof(s->smmu_pcibus_by_bus_num));
  596. g_hash_table_remove_all(s->configs);
  597. g_hash_table_remove_all(s->iotlb);
  598. }
  599. static Property smmu_dev_properties[] = {
  600. DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0),
  601. DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus,
  602. TYPE_PCI_BUS, PCIBus *),
  603. DEFINE_PROP_END_OF_LIST(),
  604. };
  605. static void smmu_base_class_init(ObjectClass *klass, void *data)
  606. {
  607. DeviceClass *dc = DEVICE_CLASS(klass);
  608. ResettableClass *rc = RESETTABLE_CLASS(klass);
  609. SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass);
  610. device_class_set_props(dc, smmu_dev_properties);
  611. device_class_set_parent_realize(dc, smmu_base_realize,
  612. &sbc->parent_realize);
  613. rc->phases.hold = smmu_base_reset_hold;
  614. }
  615. static const TypeInfo smmu_base_info = {
  616. .name = TYPE_ARM_SMMU,
  617. .parent = TYPE_SYS_BUS_DEVICE,
  618. .instance_size = sizeof(SMMUState),
  619. .class_data = NULL,
  620. .class_size = sizeof(SMMUBaseClass),
  621. .class_init = smmu_base_class_init,
  622. .abstract = true,
  623. };
  624. static void smmu_base_register_types(void)
  625. {
  626. type_register_static(&smmu_base_info);
  627. }
  628. type_init(smmu_base_register_types)