smmuv3.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530
  1. /*
  2. * Copyright (C) 2014-2016 Broadcom Corporation
  3. * Copyright (c) 2017 Red Hat, Inc.
  4. * Written by Prem Mallappa, Eric Auger
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program; if not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "qemu/osdep.h"
  19. #include "hw/irq.h"
  20. #include "hw/sysbus.h"
  21. #include "migration/vmstate.h"
  22. #include "hw/qdev-core.h"
  23. #include "hw/pci/pci.h"
  24. #include "exec/address-spaces.h"
  25. #include "cpu.h"
  26. #include "trace.h"
  27. #include "qemu/log.h"
  28. #include "qemu/error-report.h"
  29. #include "qapi/error.h"
  30. #include "hw/arm/smmuv3.h"
  31. #include "smmuv3-internal.h"
  32. /**
  33. * smmuv3_trigger_irq - pulse @irq if enabled and update
  34. * GERROR register in case of GERROR interrupt
  35. *
  36. * @irq: irq type
  37. * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
  38. */
  39. static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
  40. uint32_t gerror_mask)
  41. {
  42. bool pulse = false;
  43. switch (irq) {
  44. case SMMU_IRQ_EVTQ:
  45. pulse = smmuv3_eventq_irq_enabled(s);
  46. break;
  47. case SMMU_IRQ_PRIQ:
  48. qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
  49. break;
  50. case SMMU_IRQ_CMD_SYNC:
  51. pulse = true;
  52. break;
  53. case SMMU_IRQ_GERROR:
  54. {
  55. uint32_t pending = s->gerror ^ s->gerrorn;
  56. uint32_t new_gerrors = ~pending & gerror_mask;
  57. if (!new_gerrors) {
  58. /* only toggle non pending errors */
  59. return;
  60. }
  61. s->gerror ^= new_gerrors;
  62. trace_smmuv3_write_gerror(new_gerrors, s->gerror);
  63. pulse = smmuv3_gerror_irq_enabled(s);
  64. break;
  65. }
  66. }
  67. if (pulse) {
  68. trace_smmuv3_trigger_irq(irq);
  69. qemu_irq_pulse(s->irq[irq]);
  70. }
  71. }
  72. static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
  73. {
  74. uint32_t pending = s->gerror ^ s->gerrorn;
  75. uint32_t toggled = s->gerrorn ^ new_gerrorn;
  76. if (toggled & ~pending) {
  77. qemu_log_mask(LOG_GUEST_ERROR,
  78. "guest toggles non pending errors = 0x%x\n",
  79. toggled & ~pending);
  80. }
  81. /*
  82. * We do not raise any error in case guest toggles bits corresponding
  83. * to not active IRQs (CONSTRAINED UNPREDICTABLE)
  84. */
  85. s->gerrorn = new_gerrorn;
  86. trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
  87. }
  88. static inline MemTxResult queue_read(SMMUQueue *q, void *data)
  89. {
  90. dma_addr_t addr = Q_CONS_ENTRY(q);
  91. return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
  92. }
  93. static MemTxResult queue_write(SMMUQueue *q, void *data)
  94. {
  95. dma_addr_t addr = Q_PROD_ENTRY(q);
  96. MemTxResult ret;
  97. ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
  98. if (ret != MEMTX_OK) {
  99. return ret;
  100. }
  101. queue_prod_incr(q);
  102. return MEMTX_OK;
  103. }
  104. static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
  105. {
  106. SMMUQueue *q = &s->eventq;
  107. MemTxResult r;
  108. if (!smmuv3_eventq_enabled(s)) {
  109. return MEMTX_ERROR;
  110. }
  111. if (smmuv3_q_full(q)) {
  112. return MEMTX_ERROR;
  113. }
  114. r = queue_write(q, evt);
  115. if (r != MEMTX_OK) {
  116. return r;
  117. }
  118. if (!smmuv3_q_empty(q)) {
  119. smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
  120. }
  121. return MEMTX_OK;
  122. }
  123. void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
  124. {
  125. Evt evt = {};
  126. MemTxResult r;
  127. if (!smmuv3_eventq_enabled(s)) {
  128. return;
  129. }
  130. EVT_SET_TYPE(&evt, info->type);
  131. EVT_SET_SID(&evt, info->sid);
  132. switch (info->type) {
  133. case SMMU_EVT_NONE:
  134. return;
  135. case SMMU_EVT_F_UUT:
  136. EVT_SET_SSID(&evt, info->u.f_uut.ssid);
  137. EVT_SET_SSV(&evt, info->u.f_uut.ssv);
  138. EVT_SET_ADDR(&evt, info->u.f_uut.addr);
  139. EVT_SET_RNW(&evt, info->u.f_uut.rnw);
  140. EVT_SET_PNU(&evt, info->u.f_uut.pnu);
  141. EVT_SET_IND(&evt, info->u.f_uut.ind);
  142. break;
  143. case SMMU_EVT_C_BAD_STREAMID:
  144. EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
  145. EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
  146. break;
  147. case SMMU_EVT_F_STE_FETCH:
  148. EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
  149. EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
  150. EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
  151. break;
  152. case SMMU_EVT_C_BAD_STE:
  153. EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
  154. EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
  155. break;
  156. case SMMU_EVT_F_STREAM_DISABLED:
  157. break;
  158. case SMMU_EVT_F_TRANS_FORBIDDEN:
  159. EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
  160. EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
  161. break;
  162. case SMMU_EVT_C_BAD_SUBSTREAMID:
  163. EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
  164. break;
  165. case SMMU_EVT_F_CD_FETCH:
  166. EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
  167. EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
  168. EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
  169. break;
  170. case SMMU_EVT_C_BAD_CD:
  171. EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
  172. EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
  173. break;
  174. case SMMU_EVT_F_WALK_EABT:
  175. case SMMU_EVT_F_TRANSLATION:
  176. case SMMU_EVT_F_ADDR_SIZE:
  177. case SMMU_EVT_F_ACCESS:
  178. case SMMU_EVT_F_PERMISSION:
  179. EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
  180. EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
  181. EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
  182. EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
  183. EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
  184. EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
  185. EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
  186. EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
  187. EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
  188. EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
  189. EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
  190. break;
  191. case SMMU_EVT_F_CFG_CONFLICT:
  192. EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
  193. EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
  194. break;
  195. /* rest is not implemented */
  196. case SMMU_EVT_F_BAD_ATS_TREQ:
  197. case SMMU_EVT_F_TLB_CONFLICT:
  198. case SMMU_EVT_E_PAGE_REQ:
  199. default:
  200. g_assert_not_reached();
  201. }
  202. trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
  203. r = smmuv3_write_eventq(s, &evt);
  204. if (r != MEMTX_OK) {
  205. smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
  206. }
  207. info->recorded = true;
  208. }
  209. static void smmuv3_init_regs(SMMUv3State *s)
  210. {
  211. /**
  212. * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
  213. * multi-level stream table
  214. */
  215. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
  216. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
  217. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
  218. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
  219. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
  220. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
  221. /* terminated transaction will always be aborted/error returned */
  222. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
  223. /* 2-level stream table supported */
  224. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
  225. s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
  226. s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
  227. s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
  228. /* 4K and 64K granule support */
  229. s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
  230. s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
  231. s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
  232. s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
  233. s->cmdq.prod = 0;
  234. s->cmdq.cons = 0;
  235. s->cmdq.entry_size = sizeof(struct Cmd);
  236. s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
  237. s->eventq.prod = 0;
  238. s->eventq.cons = 0;
  239. s->eventq.entry_size = sizeof(struct Evt);
  240. s->features = 0;
  241. s->sid_split = 0;
  242. }
  243. static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
  244. SMMUEventInfo *event)
  245. {
  246. int ret;
  247. trace_smmuv3_get_ste(addr);
  248. /* TODO: guarantee 64-bit single-copy atomicity */
  249. ret = dma_memory_read(&address_space_memory, addr,
  250. (void *)buf, sizeof(*buf));
  251. if (ret != MEMTX_OK) {
  252. qemu_log_mask(LOG_GUEST_ERROR,
  253. "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
  254. event->type = SMMU_EVT_F_STE_FETCH;
  255. event->u.f_ste_fetch.addr = addr;
  256. return -EINVAL;
  257. }
  258. return 0;
  259. }
  260. /* @ssid > 0 not supported yet */
  261. static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
  262. CD *buf, SMMUEventInfo *event)
  263. {
  264. dma_addr_t addr = STE_CTXPTR(ste);
  265. int ret;
  266. trace_smmuv3_get_cd(addr);
  267. /* TODO: guarantee 64-bit single-copy atomicity */
  268. ret = dma_memory_read(&address_space_memory, addr,
  269. (void *)buf, sizeof(*buf));
  270. if (ret != MEMTX_OK) {
  271. qemu_log_mask(LOG_GUEST_ERROR,
  272. "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
  273. event->type = SMMU_EVT_F_CD_FETCH;
  274. event->u.f_ste_fetch.addr = addr;
  275. return -EINVAL;
  276. }
  277. return 0;
  278. }
  279. /* Returns < 0 in case of invalid STE, 0 otherwise */
  280. static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
  281. STE *ste, SMMUEventInfo *event)
  282. {
  283. uint32_t config;
  284. if (!STE_VALID(ste)) {
  285. if (!event->inval_ste_allowed) {
  286. qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
  287. }
  288. goto bad_ste;
  289. }
  290. config = STE_CONFIG(ste);
  291. if (STE_CFG_ABORT(config)) {
  292. cfg->aborted = true;
  293. return 0;
  294. }
  295. if (STE_CFG_BYPASS(config)) {
  296. cfg->bypassed = true;
  297. return 0;
  298. }
  299. if (STE_CFG_S2_ENABLED(config)) {
  300. qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
  301. goto bad_ste;
  302. }
  303. if (STE_S1CDMAX(ste) != 0) {
  304. qemu_log_mask(LOG_UNIMP,
  305. "SMMUv3 does not support multiple context descriptors yet\n");
  306. goto bad_ste;
  307. }
  308. if (STE_S1STALLD(ste)) {
  309. qemu_log_mask(LOG_UNIMP,
  310. "SMMUv3 S1 stalling fault model not allowed yet\n");
  311. goto bad_ste;
  312. }
  313. return 0;
  314. bad_ste:
  315. event->type = SMMU_EVT_C_BAD_STE;
  316. return -EINVAL;
  317. }
  318. /**
  319. * smmu_find_ste - Return the stream table entry associated
  320. * to the sid
  321. *
  322. * @s: smmuv3 handle
  323. * @sid: stream ID
  324. * @ste: returned stream table entry
  325. * @event: handle to an event info
  326. *
  327. * Supports linear and 2-level stream table
  328. * Return 0 on success, -EINVAL otherwise
  329. */
  330. static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
  331. SMMUEventInfo *event)
  332. {
  333. dma_addr_t addr;
  334. int ret;
  335. trace_smmuv3_find_ste(sid, s->features, s->sid_split);
  336. /* Check SID range */
  337. if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
  338. event->type = SMMU_EVT_C_BAD_STREAMID;
  339. return -EINVAL;
  340. }
  341. if (s->features & SMMU_FEATURE_2LVL_STE) {
  342. int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
  343. dma_addr_t strtab_base, l1ptr, l2ptr;
  344. STEDesc l1std;
  345. strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
  346. l1_ste_offset = sid >> s->sid_split;
  347. l2_ste_offset = sid & ((1 << s->sid_split) - 1);
  348. l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
  349. /* TODO: guarantee 64-bit single-copy atomicity */
  350. ret = dma_memory_read(&address_space_memory, l1ptr,
  351. (uint8_t *)&l1std, sizeof(l1std));
  352. if (ret != MEMTX_OK) {
  353. qemu_log_mask(LOG_GUEST_ERROR,
  354. "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
  355. event->type = SMMU_EVT_F_STE_FETCH;
  356. event->u.f_ste_fetch.addr = l1ptr;
  357. return -EINVAL;
  358. }
  359. span = L1STD_SPAN(&l1std);
  360. if (!span) {
  361. /* l2ptr is not valid */
  362. if (!event->inval_ste_allowed) {
  363. qemu_log_mask(LOG_GUEST_ERROR,
  364. "invalid sid=%d (L1STD span=0)\n", sid);
  365. }
  366. event->type = SMMU_EVT_C_BAD_STREAMID;
  367. return -EINVAL;
  368. }
  369. max_l2_ste = (1 << span) - 1;
  370. l2ptr = l1std_l2ptr(&l1std);
  371. trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
  372. l2ptr, l2_ste_offset, max_l2_ste);
  373. if (l2_ste_offset > max_l2_ste) {
  374. qemu_log_mask(LOG_GUEST_ERROR,
  375. "l2_ste_offset=%d > max_l2_ste=%d\n",
  376. l2_ste_offset, max_l2_ste);
  377. event->type = SMMU_EVT_C_BAD_STE;
  378. return -EINVAL;
  379. }
  380. addr = l2ptr + l2_ste_offset * sizeof(*ste);
  381. } else {
  382. addr = s->strtab_base + sid * sizeof(*ste);
  383. }
  384. if (smmu_get_ste(s, addr, ste, event)) {
  385. return -EINVAL;
  386. }
  387. return 0;
  388. }
  389. static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
  390. {
  391. int ret = -EINVAL;
  392. int i;
  393. if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
  394. goto bad_cd;
  395. }
  396. if (!CD_A(cd)) {
  397. goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
  398. }
  399. if (CD_S(cd)) {
  400. goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
  401. }
  402. if (CD_HA(cd) || CD_HD(cd)) {
  403. goto bad_cd; /* HTTU = 0 */
  404. }
  405. /* we support only those at the moment */
  406. cfg->aa64 = true;
  407. cfg->stage = 1;
  408. cfg->oas = oas2bits(CD_IPS(cd));
  409. cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
  410. cfg->tbi = CD_TBI(cd);
  411. cfg->asid = CD_ASID(cd);
  412. trace_smmuv3_decode_cd(cfg->oas);
  413. /* decode data dependent on TT */
  414. for (i = 0; i <= 1; i++) {
  415. int tg, tsz;
  416. SMMUTransTableInfo *tt = &cfg->tt[i];
  417. cfg->tt[i].disabled = CD_EPD(cd, i);
  418. if (cfg->tt[i].disabled) {
  419. continue;
  420. }
  421. tsz = CD_TSZ(cd, i);
  422. if (tsz < 16 || tsz > 39) {
  423. goto bad_cd;
  424. }
  425. tg = CD_TG(cd, i);
  426. tt->granule_sz = tg2granule(tg, i);
  427. if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
  428. goto bad_cd;
  429. }
  430. tt->tsz = tsz;
  431. tt->ttb = CD_TTB(cd, i);
  432. if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
  433. goto bad_cd;
  434. }
  435. trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
  436. }
  437. event->record_trans_faults = CD_R(cd);
  438. return 0;
  439. bad_cd:
  440. event->type = SMMU_EVT_C_BAD_CD;
  441. return ret;
  442. }
  443. /**
  444. * smmuv3_decode_config - Prepare the translation configuration
  445. * for the @mr iommu region
  446. * @mr: iommu memory region the translation config must be prepared for
  447. * @cfg: output translation configuration which is populated through
  448. * the different configuration decoding steps
  449. * @event: must be zero'ed by the caller
  450. *
  451. * return < 0 in case of config decoding error (@event is filled
  452. * accordingly). Return 0 otherwise.
  453. */
  454. static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
  455. SMMUEventInfo *event)
  456. {
  457. SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
  458. uint32_t sid = smmu_get_sid(sdev);
  459. SMMUv3State *s = sdev->smmu;
  460. int ret;
  461. STE ste;
  462. CD cd;
  463. ret = smmu_find_ste(s, sid, &ste, event);
  464. if (ret) {
  465. return ret;
  466. }
  467. ret = decode_ste(s, cfg, &ste, event);
  468. if (ret) {
  469. return ret;
  470. }
  471. if (cfg->aborted || cfg->bypassed) {
  472. return 0;
  473. }
  474. ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
  475. if (ret) {
  476. return ret;
  477. }
  478. return decode_cd(cfg, &cd, event);
  479. }
  480. /**
  481. * smmuv3_get_config - Look up for a cached copy of configuration data for
  482. * @sdev and on cache miss performs a configuration structure decoding from
  483. * guest RAM.
  484. *
  485. * @sdev: SMMUDevice handle
  486. * @event: output event info
  487. *
  488. * The configuration cache contains data resulting from both STE and CD
  489. * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
  490. * by the SMMUDevice handle.
  491. */
  492. static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
  493. {
  494. SMMUv3State *s = sdev->smmu;
  495. SMMUState *bc = &s->smmu_state;
  496. SMMUTransCfg *cfg;
  497. cfg = g_hash_table_lookup(bc->configs, sdev);
  498. if (cfg) {
  499. sdev->cfg_cache_hits++;
  500. trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
  501. sdev->cfg_cache_hits, sdev->cfg_cache_misses,
  502. 100 * sdev->cfg_cache_hits /
  503. (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
  504. } else {
  505. sdev->cfg_cache_misses++;
  506. trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
  507. sdev->cfg_cache_hits, sdev->cfg_cache_misses,
  508. 100 * sdev->cfg_cache_hits /
  509. (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
  510. cfg = g_new0(SMMUTransCfg, 1);
  511. if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
  512. g_hash_table_insert(bc->configs, sdev, cfg);
  513. } else {
  514. g_free(cfg);
  515. cfg = NULL;
  516. }
  517. }
  518. return cfg;
  519. }
  520. static void smmuv3_flush_config(SMMUDevice *sdev)
  521. {
  522. SMMUv3State *s = sdev->smmu;
  523. SMMUState *bc = &s->smmu_state;
  524. trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
  525. g_hash_table_remove(bc->configs, sdev);
  526. }
  527. static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
  528. IOMMUAccessFlags flag, int iommu_idx)
  529. {
  530. SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
  531. SMMUv3State *s = sdev->smmu;
  532. uint32_t sid = smmu_get_sid(sdev);
  533. SMMUEventInfo event = {.type = SMMU_EVT_NONE,
  534. .sid = sid,
  535. .inval_ste_allowed = false};
  536. SMMUPTWEventInfo ptw_info = {};
  537. SMMUTranslationStatus status;
  538. SMMUState *bs = ARM_SMMU(s);
  539. uint64_t page_mask, aligned_addr;
  540. IOMMUTLBEntry *cached_entry = NULL;
  541. SMMUTransTableInfo *tt;
  542. SMMUTransCfg *cfg = NULL;
  543. IOMMUTLBEntry entry = {
  544. .target_as = &address_space_memory,
  545. .iova = addr,
  546. .translated_addr = addr,
  547. .addr_mask = ~(hwaddr)0,
  548. .perm = IOMMU_NONE,
  549. };
  550. SMMUIOTLBKey key, *new_key;
  551. qemu_mutex_lock(&s->mutex);
  552. if (!smmu_enabled(s)) {
  553. status = SMMU_TRANS_DISABLE;
  554. goto epilogue;
  555. }
  556. cfg = smmuv3_get_config(sdev, &event);
  557. if (!cfg) {
  558. status = SMMU_TRANS_ERROR;
  559. goto epilogue;
  560. }
  561. if (cfg->aborted) {
  562. status = SMMU_TRANS_ABORT;
  563. goto epilogue;
  564. }
  565. if (cfg->bypassed) {
  566. status = SMMU_TRANS_BYPASS;
  567. goto epilogue;
  568. }
  569. tt = select_tt(cfg, addr);
  570. if (!tt) {
  571. if (event.record_trans_faults) {
  572. event.type = SMMU_EVT_F_TRANSLATION;
  573. event.u.f_translation.addr = addr;
  574. event.u.f_translation.rnw = flag & 0x1;
  575. }
  576. status = SMMU_TRANS_ERROR;
  577. goto epilogue;
  578. }
  579. page_mask = (1ULL << (tt->granule_sz)) - 1;
  580. aligned_addr = addr & ~page_mask;
  581. key.asid = cfg->asid;
  582. key.iova = aligned_addr;
  583. cached_entry = g_hash_table_lookup(bs->iotlb, &key);
  584. if (cached_entry) {
  585. cfg->iotlb_hits++;
  586. trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr,
  587. cfg->iotlb_hits, cfg->iotlb_misses,
  588. 100 * cfg->iotlb_hits /
  589. (cfg->iotlb_hits + cfg->iotlb_misses));
  590. if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) {
  591. status = SMMU_TRANS_ERROR;
  592. if (event.record_trans_faults) {
  593. event.type = SMMU_EVT_F_PERMISSION;
  594. event.u.f_permission.addr = addr;
  595. event.u.f_permission.rnw = flag & 0x1;
  596. }
  597. } else {
  598. status = SMMU_TRANS_SUCCESS;
  599. }
  600. goto epilogue;
  601. }
  602. cfg->iotlb_misses++;
  603. trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask,
  604. cfg->iotlb_hits, cfg->iotlb_misses,
  605. 100 * cfg->iotlb_hits /
  606. (cfg->iotlb_hits + cfg->iotlb_misses));
  607. if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
  608. smmu_iotlb_inv_all(bs);
  609. }
  610. cached_entry = g_new0(IOMMUTLBEntry, 1);
  611. if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
  612. g_free(cached_entry);
  613. switch (ptw_info.type) {
  614. case SMMU_PTW_ERR_WALK_EABT:
  615. event.type = SMMU_EVT_F_WALK_EABT;
  616. event.u.f_walk_eabt.addr = addr;
  617. event.u.f_walk_eabt.rnw = flag & 0x1;
  618. event.u.f_walk_eabt.class = 0x1;
  619. event.u.f_walk_eabt.addr2 = ptw_info.addr;
  620. break;
  621. case SMMU_PTW_ERR_TRANSLATION:
  622. if (event.record_trans_faults) {
  623. event.type = SMMU_EVT_F_TRANSLATION;
  624. event.u.f_translation.addr = addr;
  625. event.u.f_translation.rnw = flag & 0x1;
  626. }
  627. break;
  628. case SMMU_PTW_ERR_ADDR_SIZE:
  629. if (event.record_trans_faults) {
  630. event.type = SMMU_EVT_F_ADDR_SIZE;
  631. event.u.f_addr_size.addr = addr;
  632. event.u.f_addr_size.rnw = flag & 0x1;
  633. }
  634. break;
  635. case SMMU_PTW_ERR_ACCESS:
  636. if (event.record_trans_faults) {
  637. event.type = SMMU_EVT_F_ACCESS;
  638. event.u.f_access.addr = addr;
  639. event.u.f_access.rnw = flag & 0x1;
  640. }
  641. break;
  642. case SMMU_PTW_ERR_PERMISSION:
  643. if (event.record_trans_faults) {
  644. event.type = SMMU_EVT_F_PERMISSION;
  645. event.u.f_permission.addr = addr;
  646. event.u.f_permission.rnw = flag & 0x1;
  647. }
  648. break;
  649. default:
  650. g_assert_not_reached();
  651. }
  652. status = SMMU_TRANS_ERROR;
  653. } else {
  654. new_key = g_new0(SMMUIOTLBKey, 1);
  655. new_key->asid = cfg->asid;
  656. new_key->iova = aligned_addr;
  657. g_hash_table_insert(bs->iotlb, new_key, cached_entry);
  658. status = SMMU_TRANS_SUCCESS;
  659. }
  660. epilogue:
  661. qemu_mutex_unlock(&s->mutex);
  662. switch (status) {
  663. case SMMU_TRANS_SUCCESS:
  664. entry.perm = flag;
  665. entry.translated_addr = cached_entry->translated_addr +
  666. (addr & page_mask);
  667. entry.addr_mask = cached_entry->addr_mask;
  668. trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
  669. entry.translated_addr, entry.perm);
  670. break;
  671. case SMMU_TRANS_DISABLE:
  672. entry.perm = flag;
  673. entry.addr_mask = ~TARGET_PAGE_MASK;
  674. trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
  675. entry.perm);
  676. break;
  677. case SMMU_TRANS_BYPASS:
  678. entry.perm = flag;
  679. entry.addr_mask = ~TARGET_PAGE_MASK;
  680. trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
  681. entry.perm);
  682. break;
  683. case SMMU_TRANS_ABORT:
  684. /* no event is recorded on abort */
  685. trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
  686. entry.perm);
  687. break;
  688. case SMMU_TRANS_ERROR:
  689. qemu_log_mask(LOG_GUEST_ERROR,
  690. "%s translation failed for iova=0x%"PRIx64"(%s)\n",
  691. mr->parent_obj.name, addr, smmu_event_string(event.type));
  692. smmuv3_record_event(s, &event);
  693. break;
  694. }
  695. return entry;
  696. }
  697. /**
  698. * smmuv3_notify_iova - call the notifier @n for a given
  699. * @asid and @iova tuple.
  700. *
  701. * @mr: IOMMU mr region handle
  702. * @n: notifier to be called
  703. * @asid: address space ID or negative value if we don't care
  704. * @iova: iova
  705. */
  706. static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
  707. IOMMUNotifier *n,
  708. int asid,
  709. dma_addr_t iova)
  710. {
  711. SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
  712. SMMUEventInfo event = {.inval_ste_allowed = true};
  713. SMMUTransTableInfo *tt;
  714. SMMUTransCfg *cfg;
  715. IOMMUTLBEntry entry;
  716. cfg = smmuv3_get_config(sdev, &event);
  717. if (!cfg) {
  718. return;
  719. }
  720. if (asid >= 0 && cfg->asid != asid) {
  721. return;
  722. }
  723. tt = select_tt(cfg, iova);
  724. if (!tt) {
  725. return;
  726. }
  727. entry.target_as = &address_space_memory;
  728. entry.iova = iova;
  729. entry.addr_mask = (1 << tt->granule_sz) - 1;
  730. entry.perm = IOMMU_NONE;
  731. memory_region_notify_one(n, &entry);
  732. }
  733. /* invalidate an asid/iova tuple in all mr's */
  734. static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova)
  735. {
  736. SMMUDevice *sdev;
  737. QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
  738. IOMMUMemoryRegion *mr = &sdev->iommu;
  739. IOMMUNotifier *n;
  740. trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova);
  741. IOMMU_NOTIFIER_FOREACH(n, mr) {
  742. smmuv3_notify_iova(mr, n, asid, iova);
  743. }
  744. }
  745. }
  746. static int smmuv3_cmdq_consume(SMMUv3State *s)
  747. {
  748. SMMUState *bs = ARM_SMMU(s);
  749. SMMUCmdError cmd_error = SMMU_CERROR_NONE;
  750. SMMUQueue *q = &s->cmdq;
  751. SMMUCommandType type = 0;
  752. if (!smmuv3_cmdq_enabled(s)) {
  753. return 0;
  754. }
  755. /*
  756. * some commands depend on register values, typically CR0. In case those
  757. * register values change while handling the command, spec says it
  758. * is UNPREDICTABLE whether the command is interpreted under the new
  759. * or old value.
  760. */
  761. while (!smmuv3_q_empty(q)) {
  762. uint32_t pending = s->gerror ^ s->gerrorn;
  763. Cmd cmd;
  764. trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
  765. Q_PROD_WRAP(q), Q_CONS_WRAP(q));
  766. if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
  767. break;
  768. }
  769. if (queue_read(q, &cmd) != MEMTX_OK) {
  770. cmd_error = SMMU_CERROR_ABT;
  771. break;
  772. }
  773. type = CMD_TYPE(&cmd);
  774. trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
  775. qemu_mutex_lock(&s->mutex);
  776. switch (type) {
  777. case SMMU_CMD_SYNC:
  778. if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
  779. smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
  780. }
  781. break;
  782. case SMMU_CMD_PREFETCH_CONFIG:
  783. case SMMU_CMD_PREFETCH_ADDR:
  784. break;
  785. case SMMU_CMD_CFGI_STE:
  786. {
  787. uint32_t sid = CMD_SID(&cmd);
  788. IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
  789. SMMUDevice *sdev;
  790. if (CMD_SSEC(&cmd)) {
  791. cmd_error = SMMU_CERROR_ILL;
  792. break;
  793. }
  794. if (!mr) {
  795. break;
  796. }
  797. trace_smmuv3_cmdq_cfgi_ste(sid);
  798. sdev = container_of(mr, SMMUDevice, iommu);
  799. smmuv3_flush_config(sdev);
  800. break;
  801. }
  802. case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
  803. {
  804. uint32_t start = CMD_SID(&cmd), end, i;
  805. uint8_t range = CMD_STE_RANGE(&cmd);
  806. if (CMD_SSEC(&cmd)) {
  807. cmd_error = SMMU_CERROR_ILL;
  808. break;
  809. }
  810. end = start + (1 << (range + 1)) - 1;
  811. trace_smmuv3_cmdq_cfgi_ste_range(start, end);
  812. for (i = start; i <= end; i++) {
  813. IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i);
  814. SMMUDevice *sdev;
  815. if (!mr) {
  816. continue;
  817. }
  818. sdev = container_of(mr, SMMUDevice, iommu);
  819. smmuv3_flush_config(sdev);
  820. }
  821. break;
  822. }
  823. case SMMU_CMD_CFGI_CD:
  824. case SMMU_CMD_CFGI_CD_ALL:
  825. {
  826. uint32_t sid = CMD_SID(&cmd);
  827. IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
  828. SMMUDevice *sdev;
  829. if (CMD_SSEC(&cmd)) {
  830. cmd_error = SMMU_CERROR_ILL;
  831. break;
  832. }
  833. if (!mr) {
  834. break;
  835. }
  836. trace_smmuv3_cmdq_cfgi_cd(sid);
  837. sdev = container_of(mr, SMMUDevice, iommu);
  838. smmuv3_flush_config(sdev);
  839. break;
  840. }
  841. case SMMU_CMD_TLBI_NH_ASID:
  842. {
  843. uint16_t asid = CMD_ASID(&cmd);
  844. trace_smmuv3_cmdq_tlbi_nh_asid(asid);
  845. smmu_inv_notifiers_all(&s->smmu_state);
  846. smmu_iotlb_inv_asid(bs, asid);
  847. break;
  848. }
  849. case SMMU_CMD_TLBI_NH_ALL:
  850. case SMMU_CMD_TLBI_NSNH_ALL:
  851. trace_smmuv3_cmdq_tlbi_nh();
  852. smmu_inv_notifiers_all(&s->smmu_state);
  853. smmu_iotlb_inv_all(bs);
  854. break;
  855. case SMMU_CMD_TLBI_NH_VAA:
  856. {
  857. dma_addr_t addr = CMD_ADDR(&cmd);
  858. uint16_t vmid = CMD_VMID(&cmd);
  859. trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr);
  860. smmuv3_inv_notifiers_iova(bs, -1, addr);
  861. smmu_iotlb_inv_all(bs);
  862. break;
  863. }
  864. case SMMU_CMD_TLBI_NH_VA:
  865. {
  866. uint16_t asid = CMD_ASID(&cmd);
  867. uint16_t vmid = CMD_VMID(&cmd);
  868. dma_addr_t addr = CMD_ADDR(&cmd);
  869. bool leaf = CMD_LEAF(&cmd);
  870. trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf);
  871. smmuv3_inv_notifiers_iova(bs, asid, addr);
  872. smmu_iotlb_inv_iova(bs, asid, addr);
  873. break;
  874. }
  875. case SMMU_CMD_TLBI_EL3_ALL:
  876. case SMMU_CMD_TLBI_EL3_VA:
  877. case SMMU_CMD_TLBI_EL2_ALL:
  878. case SMMU_CMD_TLBI_EL2_ASID:
  879. case SMMU_CMD_TLBI_EL2_VA:
  880. case SMMU_CMD_TLBI_EL2_VAA:
  881. case SMMU_CMD_TLBI_S12_VMALL:
  882. case SMMU_CMD_TLBI_S2_IPA:
  883. case SMMU_CMD_ATC_INV:
  884. case SMMU_CMD_PRI_RESP:
  885. case SMMU_CMD_RESUME:
  886. case SMMU_CMD_STALL_TERM:
  887. trace_smmuv3_unhandled_cmd(type);
  888. break;
  889. default:
  890. cmd_error = SMMU_CERROR_ILL;
  891. qemu_log_mask(LOG_GUEST_ERROR,
  892. "Illegal command type: %d\n", CMD_TYPE(&cmd));
  893. break;
  894. }
  895. qemu_mutex_unlock(&s->mutex);
  896. if (cmd_error) {
  897. break;
  898. }
  899. /*
  900. * We only increment the cons index after the completion of
  901. * the command. We do that because the SYNC returns immediately
  902. * and does not check the completion of previous commands
  903. */
  904. queue_cons_incr(q);
  905. }
  906. if (cmd_error) {
  907. trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
  908. smmu_write_cmdq_err(s, cmd_error);
  909. smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
  910. }
  911. trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
  912. Q_PROD_WRAP(q), Q_CONS_WRAP(q));
  913. return 0;
  914. }
  915. static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
  916. uint64_t data, MemTxAttrs attrs)
  917. {
  918. switch (offset) {
  919. case A_GERROR_IRQ_CFG0:
  920. s->gerror_irq_cfg0 = data;
  921. return MEMTX_OK;
  922. case A_STRTAB_BASE:
  923. s->strtab_base = data;
  924. return MEMTX_OK;
  925. case A_CMDQ_BASE:
  926. s->cmdq.base = data;
  927. s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
  928. if (s->cmdq.log2size > SMMU_CMDQS) {
  929. s->cmdq.log2size = SMMU_CMDQS;
  930. }
  931. return MEMTX_OK;
  932. case A_EVENTQ_BASE:
  933. s->eventq.base = data;
  934. s->eventq.log2size = extract64(s->eventq.base, 0, 5);
  935. if (s->eventq.log2size > SMMU_EVENTQS) {
  936. s->eventq.log2size = SMMU_EVENTQS;
  937. }
  938. return MEMTX_OK;
  939. case A_EVENTQ_IRQ_CFG0:
  940. s->eventq_irq_cfg0 = data;
  941. return MEMTX_OK;
  942. default:
  943. qemu_log_mask(LOG_UNIMP,
  944. "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
  945. __func__, offset);
  946. return MEMTX_OK;
  947. }
  948. }
  949. static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
  950. uint64_t data, MemTxAttrs attrs)
  951. {
  952. switch (offset) {
  953. case A_CR0:
  954. s->cr[0] = data;
  955. s->cr0ack = data & ~SMMU_CR0_RESERVED;
  956. /* in case the command queue has been enabled */
  957. smmuv3_cmdq_consume(s);
  958. return MEMTX_OK;
  959. case A_CR1:
  960. s->cr[1] = data;
  961. return MEMTX_OK;
  962. case A_CR2:
  963. s->cr[2] = data;
  964. return MEMTX_OK;
  965. case A_IRQ_CTRL:
  966. s->irq_ctrl = data;
  967. return MEMTX_OK;
  968. case A_GERRORN:
  969. smmuv3_write_gerrorn(s, data);
  970. /*
  971. * By acknowledging the CMDQ_ERR, SW may notify cmds can
  972. * be processed again
  973. */
  974. smmuv3_cmdq_consume(s);
  975. return MEMTX_OK;
  976. case A_GERROR_IRQ_CFG0: /* 64b */
  977. s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
  978. return MEMTX_OK;
  979. case A_GERROR_IRQ_CFG0 + 4:
  980. s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
  981. return MEMTX_OK;
  982. case A_GERROR_IRQ_CFG1:
  983. s->gerror_irq_cfg1 = data;
  984. return MEMTX_OK;
  985. case A_GERROR_IRQ_CFG2:
  986. s->gerror_irq_cfg2 = data;
  987. return MEMTX_OK;
  988. case A_STRTAB_BASE: /* 64b */
  989. s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
  990. return MEMTX_OK;
  991. case A_STRTAB_BASE + 4:
  992. s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
  993. return MEMTX_OK;
  994. case A_STRTAB_BASE_CFG:
  995. s->strtab_base_cfg = data;
  996. if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
  997. s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
  998. s->features |= SMMU_FEATURE_2LVL_STE;
  999. }
  1000. return MEMTX_OK;
  1001. case A_CMDQ_BASE: /* 64b */
  1002. s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
  1003. s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
  1004. if (s->cmdq.log2size > SMMU_CMDQS) {
  1005. s->cmdq.log2size = SMMU_CMDQS;
  1006. }
  1007. return MEMTX_OK;
  1008. case A_CMDQ_BASE + 4: /* 64b */
  1009. s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
  1010. return MEMTX_OK;
  1011. case A_CMDQ_PROD:
  1012. s->cmdq.prod = data;
  1013. smmuv3_cmdq_consume(s);
  1014. return MEMTX_OK;
  1015. case A_CMDQ_CONS:
  1016. s->cmdq.cons = data;
  1017. return MEMTX_OK;
  1018. case A_EVENTQ_BASE: /* 64b */
  1019. s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
  1020. s->eventq.log2size = extract64(s->eventq.base, 0, 5);
  1021. if (s->eventq.log2size > SMMU_EVENTQS) {
  1022. s->eventq.log2size = SMMU_EVENTQS;
  1023. }
  1024. return MEMTX_OK;
  1025. case A_EVENTQ_BASE + 4:
  1026. s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
  1027. return MEMTX_OK;
  1028. case A_EVENTQ_PROD:
  1029. s->eventq.prod = data;
  1030. return MEMTX_OK;
  1031. case A_EVENTQ_CONS:
  1032. s->eventq.cons = data;
  1033. return MEMTX_OK;
  1034. case A_EVENTQ_IRQ_CFG0: /* 64b */
  1035. s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
  1036. return MEMTX_OK;
  1037. case A_EVENTQ_IRQ_CFG0 + 4:
  1038. s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
  1039. return MEMTX_OK;
  1040. case A_EVENTQ_IRQ_CFG1:
  1041. s->eventq_irq_cfg1 = data;
  1042. return MEMTX_OK;
  1043. case A_EVENTQ_IRQ_CFG2:
  1044. s->eventq_irq_cfg2 = data;
  1045. return MEMTX_OK;
  1046. default:
  1047. qemu_log_mask(LOG_UNIMP,
  1048. "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
  1049. __func__, offset);
  1050. return MEMTX_OK;
  1051. }
  1052. }
  1053. static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
  1054. unsigned size, MemTxAttrs attrs)
  1055. {
  1056. SMMUState *sys = opaque;
  1057. SMMUv3State *s = ARM_SMMUV3(sys);
  1058. MemTxResult r;
  1059. /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
  1060. offset &= ~0x10000;
  1061. switch (size) {
  1062. case 8:
  1063. r = smmu_writell(s, offset, data, attrs);
  1064. break;
  1065. case 4:
  1066. r = smmu_writel(s, offset, data, attrs);
  1067. break;
  1068. default:
  1069. r = MEMTX_ERROR;
  1070. break;
  1071. }
  1072. trace_smmuv3_write_mmio(offset, data, size, r);
  1073. return r;
  1074. }
  1075. static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
  1076. uint64_t *data, MemTxAttrs attrs)
  1077. {
  1078. switch (offset) {
  1079. case A_GERROR_IRQ_CFG0:
  1080. *data = s->gerror_irq_cfg0;
  1081. return MEMTX_OK;
  1082. case A_STRTAB_BASE:
  1083. *data = s->strtab_base;
  1084. return MEMTX_OK;
  1085. case A_CMDQ_BASE:
  1086. *data = s->cmdq.base;
  1087. return MEMTX_OK;
  1088. case A_EVENTQ_BASE:
  1089. *data = s->eventq.base;
  1090. return MEMTX_OK;
  1091. default:
  1092. *data = 0;
  1093. qemu_log_mask(LOG_UNIMP,
  1094. "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
  1095. __func__, offset);
  1096. return MEMTX_OK;
  1097. }
  1098. }
  1099. static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
  1100. uint64_t *data, MemTxAttrs attrs)
  1101. {
  1102. switch (offset) {
  1103. case A_IDREGS ... A_IDREGS + 0x2f:
  1104. *data = smmuv3_idreg(offset - A_IDREGS);
  1105. return MEMTX_OK;
  1106. case A_IDR0 ... A_IDR5:
  1107. *data = s->idr[(offset - A_IDR0) / 4];
  1108. return MEMTX_OK;
  1109. case A_IIDR:
  1110. *data = s->iidr;
  1111. return MEMTX_OK;
  1112. case A_CR0:
  1113. *data = s->cr[0];
  1114. return MEMTX_OK;
  1115. case A_CR0ACK:
  1116. *data = s->cr0ack;
  1117. return MEMTX_OK;
  1118. case A_CR1:
  1119. *data = s->cr[1];
  1120. return MEMTX_OK;
  1121. case A_CR2:
  1122. *data = s->cr[2];
  1123. return MEMTX_OK;
  1124. case A_STATUSR:
  1125. *data = s->statusr;
  1126. return MEMTX_OK;
  1127. case A_IRQ_CTRL:
  1128. case A_IRQ_CTRL_ACK:
  1129. *data = s->irq_ctrl;
  1130. return MEMTX_OK;
  1131. case A_GERROR:
  1132. *data = s->gerror;
  1133. return MEMTX_OK;
  1134. case A_GERRORN:
  1135. *data = s->gerrorn;
  1136. return MEMTX_OK;
  1137. case A_GERROR_IRQ_CFG0: /* 64b */
  1138. *data = extract64(s->gerror_irq_cfg0, 0, 32);
  1139. return MEMTX_OK;
  1140. case A_GERROR_IRQ_CFG0 + 4:
  1141. *data = extract64(s->gerror_irq_cfg0, 32, 32);
  1142. return MEMTX_OK;
  1143. case A_GERROR_IRQ_CFG1:
  1144. *data = s->gerror_irq_cfg1;
  1145. return MEMTX_OK;
  1146. case A_GERROR_IRQ_CFG2:
  1147. *data = s->gerror_irq_cfg2;
  1148. return MEMTX_OK;
  1149. case A_STRTAB_BASE: /* 64b */
  1150. *data = extract64(s->strtab_base, 0, 32);
  1151. return MEMTX_OK;
  1152. case A_STRTAB_BASE + 4: /* 64b */
  1153. *data = extract64(s->strtab_base, 32, 32);
  1154. return MEMTX_OK;
  1155. case A_STRTAB_BASE_CFG:
  1156. *data = s->strtab_base_cfg;
  1157. return MEMTX_OK;
  1158. case A_CMDQ_BASE: /* 64b */
  1159. *data = extract64(s->cmdq.base, 0, 32);
  1160. return MEMTX_OK;
  1161. case A_CMDQ_BASE + 4:
  1162. *data = extract64(s->cmdq.base, 32, 32);
  1163. return MEMTX_OK;
  1164. case A_CMDQ_PROD:
  1165. *data = s->cmdq.prod;
  1166. return MEMTX_OK;
  1167. case A_CMDQ_CONS:
  1168. *data = s->cmdq.cons;
  1169. return MEMTX_OK;
  1170. case A_EVENTQ_BASE: /* 64b */
  1171. *data = extract64(s->eventq.base, 0, 32);
  1172. return MEMTX_OK;
  1173. case A_EVENTQ_BASE + 4: /* 64b */
  1174. *data = extract64(s->eventq.base, 32, 32);
  1175. return MEMTX_OK;
  1176. case A_EVENTQ_PROD:
  1177. *data = s->eventq.prod;
  1178. return MEMTX_OK;
  1179. case A_EVENTQ_CONS:
  1180. *data = s->eventq.cons;
  1181. return MEMTX_OK;
  1182. default:
  1183. *data = 0;
  1184. qemu_log_mask(LOG_UNIMP,
  1185. "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
  1186. __func__, offset);
  1187. return MEMTX_OK;
  1188. }
  1189. }
  1190. static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
  1191. unsigned size, MemTxAttrs attrs)
  1192. {
  1193. SMMUState *sys = opaque;
  1194. SMMUv3State *s = ARM_SMMUV3(sys);
  1195. MemTxResult r;
  1196. /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
  1197. offset &= ~0x10000;
  1198. switch (size) {
  1199. case 8:
  1200. r = smmu_readll(s, offset, data, attrs);
  1201. break;
  1202. case 4:
  1203. r = smmu_readl(s, offset, data, attrs);
  1204. break;
  1205. default:
  1206. r = MEMTX_ERROR;
  1207. break;
  1208. }
  1209. trace_smmuv3_read_mmio(offset, *data, size, r);
  1210. return r;
  1211. }
  1212. static const MemoryRegionOps smmu_mem_ops = {
  1213. .read_with_attrs = smmu_read_mmio,
  1214. .write_with_attrs = smmu_write_mmio,
  1215. .endianness = DEVICE_LITTLE_ENDIAN,
  1216. .valid = {
  1217. .min_access_size = 4,
  1218. .max_access_size = 8,
  1219. },
  1220. .impl = {
  1221. .min_access_size = 4,
  1222. .max_access_size = 8,
  1223. },
  1224. };
  1225. static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
  1226. {
  1227. int i;
  1228. for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
  1229. sysbus_init_irq(dev, &s->irq[i]);
  1230. }
  1231. }
  1232. static void smmu_reset(DeviceState *dev)
  1233. {
  1234. SMMUv3State *s = ARM_SMMUV3(dev);
  1235. SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
  1236. c->parent_reset(dev);
  1237. smmuv3_init_regs(s);
  1238. }
  1239. static void smmu_realize(DeviceState *d, Error **errp)
  1240. {
  1241. SMMUState *sys = ARM_SMMU(d);
  1242. SMMUv3State *s = ARM_SMMUV3(sys);
  1243. SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
  1244. SysBusDevice *dev = SYS_BUS_DEVICE(d);
  1245. Error *local_err = NULL;
  1246. c->parent_realize(d, &local_err);
  1247. if (local_err) {
  1248. error_propagate(errp, local_err);
  1249. return;
  1250. }
  1251. qemu_mutex_init(&s->mutex);
  1252. memory_region_init_io(&sys->iomem, OBJECT(s),
  1253. &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
  1254. sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
  1255. sysbus_init_mmio(dev, &sys->iomem);
  1256. smmu_init_irq(s, dev);
  1257. }
  1258. static const VMStateDescription vmstate_smmuv3_queue = {
  1259. .name = "smmuv3_queue",
  1260. .version_id = 1,
  1261. .minimum_version_id = 1,
  1262. .fields = (VMStateField[]) {
  1263. VMSTATE_UINT64(base, SMMUQueue),
  1264. VMSTATE_UINT32(prod, SMMUQueue),
  1265. VMSTATE_UINT32(cons, SMMUQueue),
  1266. VMSTATE_UINT8(log2size, SMMUQueue),
  1267. VMSTATE_END_OF_LIST(),
  1268. },
  1269. };
  1270. static const VMStateDescription vmstate_smmuv3 = {
  1271. .name = "smmuv3",
  1272. .version_id = 1,
  1273. .minimum_version_id = 1,
  1274. .fields = (VMStateField[]) {
  1275. VMSTATE_UINT32(features, SMMUv3State),
  1276. VMSTATE_UINT8(sid_size, SMMUv3State),
  1277. VMSTATE_UINT8(sid_split, SMMUv3State),
  1278. VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
  1279. VMSTATE_UINT32(cr0ack, SMMUv3State),
  1280. VMSTATE_UINT32(statusr, SMMUv3State),
  1281. VMSTATE_UINT32(irq_ctrl, SMMUv3State),
  1282. VMSTATE_UINT32(gerror, SMMUv3State),
  1283. VMSTATE_UINT32(gerrorn, SMMUv3State),
  1284. VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
  1285. VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
  1286. VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
  1287. VMSTATE_UINT64(strtab_base, SMMUv3State),
  1288. VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
  1289. VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
  1290. VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
  1291. VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
  1292. VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
  1293. VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
  1294. VMSTATE_END_OF_LIST(),
  1295. },
  1296. };
  1297. static void smmuv3_instance_init(Object *obj)
  1298. {
  1299. /* Nothing much to do here as of now */
  1300. }
  1301. static void smmuv3_class_init(ObjectClass *klass, void *data)
  1302. {
  1303. DeviceClass *dc = DEVICE_CLASS(klass);
  1304. SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
  1305. dc->vmsd = &vmstate_smmuv3;
  1306. device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
  1307. c->parent_realize = dc->realize;
  1308. dc->realize = smmu_realize;
  1309. }
  1310. static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
  1311. IOMMUNotifierFlag old,
  1312. IOMMUNotifierFlag new,
  1313. Error **errp)
  1314. {
  1315. SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
  1316. SMMUv3State *s3 = sdev->smmu;
  1317. SMMUState *s = &(s3->smmu_state);
  1318. if (new & IOMMU_NOTIFIER_MAP) {
  1319. error_setg(errp,
  1320. "device %02x.%02x.%x requires iommu MAP notifier which is "
  1321. "not currently supported", pci_bus_num(sdev->bus),
  1322. PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
  1323. return -EINVAL;
  1324. }
  1325. if (old == IOMMU_NOTIFIER_NONE) {
  1326. trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
  1327. QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
  1328. } else if (new == IOMMU_NOTIFIER_NONE) {
  1329. trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
  1330. QLIST_REMOVE(sdev, next);
  1331. }
  1332. return 0;
  1333. }
  1334. static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
  1335. void *data)
  1336. {
  1337. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
  1338. imrc->translate = smmuv3_translate;
  1339. imrc->notify_flag_changed = smmuv3_notify_flag_changed;
  1340. }
  1341. static const TypeInfo smmuv3_type_info = {
  1342. .name = TYPE_ARM_SMMUV3,
  1343. .parent = TYPE_ARM_SMMU,
  1344. .instance_size = sizeof(SMMUv3State),
  1345. .instance_init = smmuv3_instance_init,
  1346. .class_size = sizeof(SMMUv3Class),
  1347. .class_init = smmuv3_class_init,
  1348. };
  1349. static const TypeInfo smmuv3_iommu_memory_region_info = {
  1350. .parent = TYPE_IOMMU_MEMORY_REGION,
  1351. .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
  1352. .class_init = smmuv3_iommu_memory_region_class_init,
  1353. };
  1354. static void smmuv3_register_types(void)
  1355. {
  1356. type_register(&smmuv3_type_info);
  1357. type_register(&smmuv3_iommu_memory_region_info);
  1358. }
  1359. type_init(smmuv3_register_types)