smmuv3.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * Copyright (C) 2014-2016 Broadcom Corporation
  3. * Copyright (c) 2017 Red Hat, Inc.
  4. * Written by Prem Mallappa, Eric Auger
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program; if not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "qemu/osdep.h"
  19. #include "hw/boards.h"
  20. #include "sysemu/sysemu.h"
  21. #include "hw/sysbus.h"
  22. #include "hw/qdev-core.h"
  23. #include "hw/pci/pci.h"
  24. #include "exec/address-spaces.h"
  25. #include "trace.h"
  26. #include "qemu/log.h"
  27. #include "qemu/error-report.h"
  28. #include "qapi/error.h"
  29. #include "hw/arm/smmuv3.h"
  30. #include "smmuv3-internal.h"
  31. /**
  32. * smmuv3_trigger_irq - pulse @irq if enabled and update
  33. * GERROR register in case of GERROR interrupt
  34. *
  35. * @irq: irq type
  36. * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
  37. */
  38. static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
  39. uint32_t gerror_mask)
  40. {
  41. bool pulse = false;
  42. switch (irq) {
  43. case SMMU_IRQ_EVTQ:
  44. pulse = smmuv3_eventq_irq_enabled(s);
  45. break;
  46. case SMMU_IRQ_PRIQ:
  47. qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
  48. break;
  49. case SMMU_IRQ_CMD_SYNC:
  50. pulse = true;
  51. break;
  52. case SMMU_IRQ_GERROR:
  53. {
  54. uint32_t pending = s->gerror ^ s->gerrorn;
  55. uint32_t new_gerrors = ~pending & gerror_mask;
  56. if (!new_gerrors) {
  57. /* only toggle non pending errors */
  58. return;
  59. }
  60. s->gerror ^= new_gerrors;
  61. trace_smmuv3_write_gerror(new_gerrors, s->gerror);
  62. pulse = smmuv3_gerror_irq_enabled(s);
  63. break;
  64. }
  65. }
  66. if (pulse) {
  67. trace_smmuv3_trigger_irq(irq);
  68. qemu_irq_pulse(s->irq[irq]);
  69. }
  70. }
  71. static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
  72. {
  73. uint32_t pending = s->gerror ^ s->gerrorn;
  74. uint32_t toggled = s->gerrorn ^ new_gerrorn;
  75. if (toggled & ~pending) {
  76. qemu_log_mask(LOG_GUEST_ERROR,
  77. "guest toggles non pending errors = 0x%x\n",
  78. toggled & ~pending);
  79. }
  80. /*
  81. * We do not raise any error in case guest toggles bits corresponding
  82. * to not active IRQs (CONSTRAINED UNPREDICTABLE)
  83. */
  84. s->gerrorn = new_gerrorn;
  85. trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
  86. }
  87. static inline MemTxResult queue_read(SMMUQueue *q, void *data)
  88. {
  89. dma_addr_t addr = Q_CONS_ENTRY(q);
  90. return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
  91. }
  92. static MemTxResult queue_write(SMMUQueue *q, void *data)
  93. {
  94. dma_addr_t addr = Q_PROD_ENTRY(q);
  95. MemTxResult ret;
  96. ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
  97. if (ret != MEMTX_OK) {
  98. return ret;
  99. }
  100. queue_prod_incr(q);
  101. return MEMTX_OK;
  102. }
  103. static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
  104. {
  105. SMMUQueue *q = &s->eventq;
  106. MemTxResult r;
  107. if (!smmuv3_eventq_enabled(s)) {
  108. return MEMTX_ERROR;
  109. }
  110. if (smmuv3_q_full(q)) {
  111. return MEMTX_ERROR;
  112. }
  113. r = queue_write(q, evt);
  114. if (r != MEMTX_OK) {
  115. return r;
  116. }
  117. if (smmuv3_q_empty(q)) {
  118. smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
  119. }
  120. return MEMTX_OK;
  121. }
  122. void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
  123. {
  124. Evt evt = {};
  125. MemTxResult r;
  126. if (!smmuv3_eventq_enabled(s)) {
  127. return;
  128. }
  129. EVT_SET_TYPE(&evt, info->type);
  130. EVT_SET_SID(&evt, info->sid);
  131. switch (info->type) {
  132. case SMMU_EVT_OK:
  133. return;
  134. case SMMU_EVT_F_UUT:
  135. EVT_SET_SSID(&evt, info->u.f_uut.ssid);
  136. EVT_SET_SSV(&evt, info->u.f_uut.ssv);
  137. EVT_SET_ADDR(&evt, info->u.f_uut.addr);
  138. EVT_SET_RNW(&evt, info->u.f_uut.rnw);
  139. EVT_SET_PNU(&evt, info->u.f_uut.pnu);
  140. EVT_SET_IND(&evt, info->u.f_uut.ind);
  141. break;
  142. case SMMU_EVT_C_BAD_STREAMID:
  143. EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
  144. EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
  145. break;
  146. case SMMU_EVT_F_STE_FETCH:
  147. EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
  148. EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
  149. EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
  150. break;
  151. case SMMU_EVT_C_BAD_STE:
  152. EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
  153. EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
  154. break;
  155. case SMMU_EVT_F_STREAM_DISABLED:
  156. break;
  157. case SMMU_EVT_F_TRANS_FORBIDDEN:
  158. EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
  159. EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
  160. break;
  161. case SMMU_EVT_C_BAD_SUBSTREAMID:
  162. EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
  163. break;
  164. case SMMU_EVT_F_CD_FETCH:
  165. EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
  166. EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
  167. EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
  168. break;
  169. case SMMU_EVT_C_BAD_CD:
  170. EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
  171. EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
  172. break;
  173. case SMMU_EVT_F_WALK_EABT:
  174. case SMMU_EVT_F_TRANSLATION:
  175. case SMMU_EVT_F_ADDR_SIZE:
  176. case SMMU_EVT_F_ACCESS:
  177. case SMMU_EVT_F_PERMISSION:
  178. EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
  179. EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
  180. EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
  181. EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
  182. EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
  183. EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
  184. EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
  185. EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
  186. EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
  187. EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
  188. EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
  189. break;
  190. case SMMU_EVT_F_CFG_CONFLICT:
  191. EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
  192. EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
  193. break;
  194. /* rest is not implemented */
  195. case SMMU_EVT_F_BAD_ATS_TREQ:
  196. case SMMU_EVT_F_TLB_CONFLICT:
  197. case SMMU_EVT_E_PAGE_REQ:
  198. default:
  199. g_assert_not_reached();
  200. }
  201. trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
  202. r = smmuv3_write_eventq(s, &evt);
  203. if (r != MEMTX_OK) {
  204. smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
  205. }
  206. info->recorded = true;
  207. }
  208. static void smmuv3_init_regs(SMMUv3State *s)
  209. {
  210. /**
  211. * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
  212. * multi-level stream table
  213. */
  214. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
  215. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
  216. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
  217. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
  218. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
  219. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
  220. /* terminated transaction will always be aborted/error returned */
  221. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
  222. /* 2-level stream table supported */
  223. s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
  224. s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
  225. s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
  226. s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
  227. /* 4K and 64K granule support */
  228. s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
  229. s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
  230. s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
  231. s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
  232. s->cmdq.prod = 0;
  233. s->cmdq.cons = 0;
  234. s->cmdq.entry_size = sizeof(struct Cmd);
  235. s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
  236. s->eventq.prod = 0;
  237. s->eventq.cons = 0;
  238. s->eventq.entry_size = sizeof(struct Evt);
  239. s->features = 0;
  240. s->sid_split = 0;
  241. }
  242. static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
  243. SMMUEventInfo *event)
  244. {
  245. int ret;
  246. trace_smmuv3_get_ste(addr);
  247. /* TODO: guarantee 64-bit single-copy atomicity */
  248. ret = dma_memory_read(&address_space_memory, addr,
  249. (void *)buf, sizeof(*buf));
  250. if (ret != MEMTX_OK) {
  251. qemu_log_mask(LOG_GUEST_ERROR,
  252. "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
  253. event->type = SMMU_EVT_F_STE_FETCH;
  254. event->u.f_ste_fetch.addr = addr;
  255. return -EINVAL;
  256. }
  257. return 0;
  258. }
  259. /* @ssid > 0 not supported yet */
  260. static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
  261. CD *buf, SMMUEventInfo *event)
  262. {
  263. dma_addr_t addr = STE_CTXPTR(ste);
  264. int ret;
  265. trace_smmuv3_get_cd(addr);
  266. /* TODO: guarantee 64-bit single-copy atomicity */
  267. ret = dma_memory_read(&address_space_memory, addr,
  268. (void *)buf, sizeof(*buf));
  269. if (ret != MEMTX_OK) {
  270. qemu_log_mask(LOG_GUEST_ERROR,
  271. "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
  272. event->type = SMMU_EVT_F_CD_FETCH;
  273. event->u.f_ste_fetch.addr = addr;
  274. return -EINVAL;
  275. }
  276. return 0;
  277. }
  278. /* Returns <0 if the caller has no need to continue the translation */
  279. static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
  280. STE *ste, SMMUEventInfo *event)
  281. {
  282. uint32_t config;
  283. int ret = -EINVAL;
  284. if (!STE_VALID(ste)) {
  285. goto bad_ste;
  286. }
  287. config = STE_CONFIG(ste);
  288. if (STE_CFG_ABORT(config)) {
  289. cfg->aborted = true; /* abort but don't record any event */
  290. return ret;
  291. }
  292. if (STE_CFG_BYPASS(config)) {
  293. cfg->bypassed = true;
  294. return ret;
  295. }
  296. if (STE_CFG_S2_ENABLED(config)) {
  297. qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
  298. goto bad_ste;
  299. }
  300. if (STE_S1CDMAX(ste) != 0) {
  301. qemu_log_mask(LOG_UNIMP,
  302. "SMMUv3 does not support multiple context descriptors yet\n");
  303. goto bad_ste;
  304. }
  305. if (STE_S1STALLD(ste)) {
  306. qemu_log_mask(LOG_UNIMP,
  307. "SMMUv3 S1 stalling fault model not allowed yet\n");
  308. goto bad_ste;
  309. }
  310. return 0;
  311. bad_ste:
  312. event->type = SMMU_EVT_C_BAD_STE;
  313. return -EINVAL;
  314. }
  315. /**
  316. * smmu_find_ste - Return the stream table entry associated
  317. * to the sid
  318. *
  319. * @s: smmuv3 handle
  320. * @sid: stream ID
  321. * @ste: returned stream table entry
  322. * @event: handle to an event info
  323. *
  324. * Supports linear and 2-level stream table
  325. * Return 0 on success, -EINVAL otherwise
  326. */
  327. static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
  328. SMMUEventInfo *event)
  329. {
  330. dma_addr_t addr;
  331. int ret;
  332. trace_smmuv3_find_ste(sid, s->features, s->sid_split);
  333. /* Check SID range */
  334. if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
  335. event->type = SMMU_EVT_C_BAD_STREAMID;
  336. return -EINVAL;
  337. }
  338. if (s->features & SMMU_FEATURE_2LVL_STE) {
  339. int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
  340. dma_addr_t strtab_base, l1ptr, l2ptr;
  341. STEDesc l1std;
  342. strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
  343. l1_ste_offset = sid >> s->sid_split;
  344. l2_ste_offset = sid & ((1 << s->sid_split) - 1);
  345. l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
  346. /* TODO: guarantee 64-bit single-copy atomicity */
  347. ret = dma_memory_read(&address_space_memory, l1ptr,
  348. (uint8_t *)&l1std, sizeof(l1std));
  349. if (ret != MEMTX_OK) {
  350. qemu_log_mask(LOG_GUEST_ERROR,
  351. "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
  352. event->type = SMMU_EVT_F_STE_FETCH;
  353. event->u.f_ste_fetch.addr = l1ptr;
  354. return -EINVAL;
  355. }
  356. span = L1STD_SPAN(&l1std);
  357. if (!span) {
  358. /* l2ptr is not valid */
  359. qemu_log_mask(LOG_GUEST_ERROR,
  360. "invalid sid=%d (L1STD span=0)\n", sid);
  361. event->type = SMMU_EVT_C_BAD_STREAMID;
  362. return -EINVAL;
  363. }
  364. max_l2_ste = (1 << span) - 1;
  365. l2ptr = l1std_l2ptr(&l1std);
  366. trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
  367. l2ptr, l2_ste_offset, max_l2_ste);
  368. if (l2_ste_offset > max_l2_ste) {
  369. qemu_log_mask(LOG_GUEST_ERROR,
  370. "l2_ste_offset=%d > max_l2_ste=%d\n",
  371. l2_ste_offset, max_l2_ste);
  372. event->type = SMMU_EVT_C_BAD_STE;
  373. return -EINVAL;
  374. }
  375. addr = l2ptr + l2_ste_offset * sizeof(*ste);
  376. } else {
  377. addr = s->strtab_base + sid * sizeof(*ste);
  378. }
  379. if (smmu_get_ste(s, addr, ste, event)) {
  380. return -EINVAL;
  381. }
  382. return 0;
  383. }
  384. static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
  385. {
  386. int ret = -EINVAL;
  387. int i;
  388. if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
  389. goto bad_cd;
  390. }
  391. if (!CD_A(cd)) {
  392. goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
  393. }
  394. if (CD_S(cd)) {
  395. goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
  396. }
  397. if (CD_HA(cd) || CD_HD(cd)) {
  398. goto bad_cd; /* HTTU = 0 */
  399. }
  400. /* we support only those at the moment */
  401. cfg->aa64 = true;
  402. cfg->stage = 1;
  403. cfg->oas = oas2bits(CD_IPS(cd));
  404. cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
  405. cfg->tbi = CD_TBI(cd);
  406. cfg->asid = CD_ASID(cd);
  407. trace_smmuv3_decode_cd(cfg->oas);
  408. /* decode data dependent on TT */
  409. for (i = 0; i <= 1; i++) {
  410. int tg, tsz;
  411. SMMUTransTableInfo *tt = &cfg->tt[i];
  412. cfg->tt[i].disabled = CD_EPD(cd, i);
  413. if (cfg->tt[i].disabled) {
  414. continue;
  415. }
  416. tsz = CD_TSZ(cd, i);
  417. if (tsz < 16 || tsz > 39) {
  418. goto bad_cd;
  419. }
  420. tg = CD_TG(cd, i);
  421. tt->granule_sz = tg2granule(tg, i);
  422. if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
  423. goto bad_cd;
  424. }
  425. tt->tsz = tsz;
  426. tt->ttb = CD_TTB(cd, i);
  427. if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
  428. goto bad_cd;
  429. }
  430. trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
  431. }
  432. event->record_trans_faults = CD_R(cd);
  433. return 0;
  434. bad_cd:
  435. event->type = SMMU_EVT_C_BAD_CD;
  436. return ret;
  437. }
  438. /**
  439. * smmuv3_decode_config - Prepare the translation configuration
  440. * for the @mr iommu region
  441. * @mr: iommu memory region the translation config must be prepared for
  442. * @cfg: output translation configuration which is populated through
  443. * the different configuration decoding steps
  444. * @event: must be zero'ed by the caller
  445. *
  446. * return < 0 if the translation needs to be aborted (@event is filled
  447. * accordingly). Return 0 otherwise.
  448. */
  449. static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
  450. SMMUEventInfo *event)
  451. {
  452. SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
  453. uint32_t sid = smmu_get_sid(sdev);
  454. SMMUv3State *s = sdev->smmu;
  455. int ret = -EINVAL;
  456. STE ste;
  457. CD cd;
  458. if (smmu_find_ste(s, sid, &ste, event)) {
  459. return ret;
  460. }
  461. if (decode_ste(s, cfg, &ste, event)) {
  462. return ret;
  463. }
  464. if (smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event)) {
  465. return ret;
  466. }
  467. return decode_cd(cfg, &cd, event);
  468. }
  469. static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
  470. IOMMUAccessFlags flag, int iommu_idx)
  471. {
  472. SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
  473. SMMUv3State *s = sdev->smmu;
  474. uint32_t sid = smmu_get_sid(sdev);
  475. SMMUEventInfo event = {.type = SMMU_EVT_OK, .sid = sid};
  476. SMMUPTWEventInfo ptw_info = {};
  477. SMMUTransCfg cfg = {};
  478. IOMMUTLBEntry entry = {
  479. .target_as = &address_space_memory,
  480. .iova = addr,
  481. .translated_addr = addr,
  482. .addr_mask = ~(hwaddr)0,
  483. .perm = IOMMU_NONE,
  484. };
  485. int ret = 0;
  486. if (!smmu_enabled(s)) {
  487. goto out;
  488. }
  489. ret = smmuv3_decode_config(mr, &cfg, &event);
  490. if (ret) {
  491. goto out;
  492. }
  493. if (cfg.aborted) {
  494. goto out;
  495. }
  496. ret = smmu_ptw(&cfg, addr, flag, &entry, &ptw_info);
  497. if (ret) {
  498. switch (ptw_info.type) {
  499. case SMMU_PTW_ERR_WALK_EABT:
  500. event.type = SMMU_EVT_F_WALK_EABT;
  501. event.u.f_walk_eabt.addr = addr;
  502. event.u.f_walk_eabt.rnw = flag & 0x1;
  503. event.u.f_walk_eabt.class = 0x1;
  504. event.u.f_walk_eabt.addr2 = ptw_info.addr;
  505. break;
  506. case SMMU_PTW_ERR_TRANSLATION:
  507. if (event.record_trans_faults) {
  508. event.type = SMMU_EVT_F_TRANSLATION;
  509. event.u.f_translation.addr = addr;
  510. event.u.f_translation.rnw = flag & 0x1;
  511. }
  512. break;
  513. case SMMU_PTW_ERR_ADDR_SIZE:
  514. if (event.record_trans_faults) {
  515. event.type = SMMU_EVT_F_ADDR_SIZE;
  516. event.u.f_addr_size.addr = addr;
  517. event.u.f_addr_size.rnw = flag & 0x1;
  518. }
  519. break;
  520. case SMMU_PTW_ERR_ACCESS:
  521. if (event.record_trans_faults) {
  522. event.type = SMMU_EVT_F_ACCESS;
  523. event.u.f_access.addr = addr;
  524. event.u.f_access.rnw = flag & 0x1;
  525. }
  526. break;
  527. case SMMU_PTW_ERR_PERMISSION:
  528. if (event.record_trans_faults) {
  529. event.type = SMMU_EVT_F_PERMISSION;
  530. event.u.f_permission.addr = addr;
  531. event.u.f_permission.rnw = flag & 0x1;
  532. }
  533. break;
  534. default:
  535. g_assert_not_reached();
  536. }
  537. }
  538. out:
  539. if (ret) {
  540. qemu_log_mask(LOG_GUEST_ERROR,
  541. "%s translation failed for iova=0x%"PRIx64"(%d)\n",
  542. mr->parent_obj.name, addr, ret);
  543. entry.perm = IOMMU_NONE;
  544. smmuv3_record_event(s, &event);
  545. } else if (!cfg.aborted) {
  546. entry.perm = flag;
  547. trace_smmuv3_translate(mr->parent_obj.name, sid, addr,
  548. entry.translated_addr, entry.perm);
  549. }
  550. return entry;
  551. }
  552. static int smmuv3_cmdq_consume(SMMUv3State *s)
  553. {
  554. SMMUCmdError cmd_error = SMMU_CERROR_NONE;
  555. SMMUQueue *q = &s->cmdq;
  556. SMMUCommandType type = 0;
  557. if (!smmuv3_cmdq_enabled(s)) {
  558. return 0;
  559. }
  560. /*
  561. * some commands depend on register values, typically CR0. In case those
  562. * register values change while handling the command, spec says it
  563. * is UNPREDICTABLE whether the command is interpreted under the new
  564. * or old value.
  565. */
  566. while (!smmuv3_q_empty(q)) {
  567. uint32_t pending = s->gerror ^ s->gerrorn;
  568. Cmd cmd;
  569. trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
  570. Q_PROD_WRAP(q), Q_CONS_WRAP(q));
  571. if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
  572. break;
  573. }
  574. if (queue_read(q, &cmd) != MEMTX_OK) {
  575. cmd_error = SMMU_CERROR_ABT;
  576. break;
  577. }
  578. type = CMD_TYPE(&cmd);
  579. trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
  580. switch (type) {
  581. case SMMU_CMD_SYNC:
  582. if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
  583. smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
  584. }
  585. break;
  586. case SMMU_CMD_PREFETCH_CONFIG:
  587. case SMMU_CMD_PREFETCH_ADDR:
  588. case SMMU_CMD_CFGI_STE:
  589. case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
  590. case SMMU_CMD_CFGI_CD:
  591. case SMMU_CMD_CFGI_CD_ALL:
  592. case SMMU_CMD_TLBI_NH_ALL:
  593. case SMMU_CMD_TLBI_NH_ASID:
  594. case SMMU_CMD_TLBI_NH_VA:
  595. case SMMU_CMD_TLBI_NH_VAA:
  596. case SMMU_CMD_TLBI_EL3_ALL:
  597. case SMMU_CMD_TLBI_EL3_VA:
  598. case SMMU_CMD_TLBI_EL2_ALL:
  599. case SMMU_CMD_TLBI_EL2_ASID:
  600. case SMMU_CMD_TLBI_EL2_VA:
  601. case SMMU_CMD_TLBI_EL2_VAA:
  602. case SMMU_CMD_TLBI_S12_VMALL:
  603. case SMMU_CMD_TLBI_S2_IPA:
  604. case SMMU_CMD_TLBI_NSNH_ALL:
  605. case SMMU_CMD_ATC_INV:
  606. case SMMU_CMD_PRI_RESP:
  607. case SMMU_CMD_RESUME:
  608. case SMMU_CMD_STALL_TERM:
  609. trace_smmuv3_unhandled_cmd(type);
  610. break;
  611. default:
  612. cmd_error = SMMU_CERROR_ILL;
  613. qemu_log_mask(LOG_GUEST_ERROR,
  614. "Illegal command type: %d\n", CMD_TYPE(&cmd));
  615. break;
  616. }
  617. if (cmd_error) {
  618. break;
  619. }
  620. /*
  621. * We only increment the cons index after the completion of
  622. * the command. We do that because the SYNC returns immediately
  623. * and does not check the completion of previous commands
  624. */
  625. queue_cons_incr(q);
  626. }
  627. if (cmd_error) {
  628. trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
  629. smmu_write_cmdq_err(s, cmd_error);
  630. smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
  631. }
  632. trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
  633. Q_PROD_WRAP(q), Q_CONS_WRAP(q));
  634. return 0;
  635. }
  636. static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
  637. uint64_t data, MemTxAttrs attrs)
  638. {
  639. switch (offset) {
  640. case A_GERROR_IRQ_CFG0:
  641. s->gerror_irq_cfg0 = data;
  642. return MEMTX_OK;
  643. case A_STRTAB_BASE:
  644. s->strtab_base = data;
  645. return MEMTX_OK;
  646. case A_CMDQ_BASE:
  647. s->cmdq.base = data;
  648. s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
  649. if (s->cmdq.log2size > SMMU_CMDQS) {
  650. s->cmdq.log2size = SMMU_CMDQS;
  651. }
  652. return MEMTX_OK;
  653. case A_EVENTQ_BASE:
  654. s->eventq.base = data;
  655. s->eventq.log2size = extract64(s->eventq.base, 0, 5);
  656. if (s->eventq.log2size > SMMU_EVENTQS) {
  657. s->eventq.log2size = SMMU_EVENTQS;
  658. }
  659. return MEMTX_OK;
  660. case A_EVENTQ_IRQ_CFG0:
  661. s->eventq_irq_cfg0 = data;
  662. return MEMTX_OK;
  663. default:
  664. qemu_log_mask(LOG_UNIMP,
  665. "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
  666. __func__, offset);
  667. return MEMTX_OK;
  668. }
  669. }
  670. static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
  671. uint64_t data, MemTxAttrs attrs)
  672. {
  673. switch (offset) {
  674. case A_CR0:
  675. s->cr[0] = data;
  676. s->cr0ack = data & ~SMMU_CR0_RESERVED;
  677. /* in case the command queue has been enabled */
  678. smmuv3_cmdq_consume(s);
  679. return MEMTX_OK;
  680. case A_CR1:
  681. s->cr[1] = data;
  682. return MEMTX_OK;
  683. case A_CR2:
  684. s->cr[2] = data;
  685. return MEMTX_OK;
  686. case A_IRQ_CTRL:
  687. s->irq_ctrl = data;
  688. return MEMTX_OK;
  689. case A_GERRORN:
  690. smmuv3_write_gerrorn(s, data);
  691. /*
  692. * By acknowledging the CMDQ_ERR, SW may notify cmds can
  693. * be processed again
  694. */
  695. smmuv3_cmdq_consume(s);
  696. return MEMTX_OK;
  697. case A_GERROR_IRQ_CFG0: /* 64b */
  698. s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
  699. return MEMTX_OK;
  700. case A_GERROR_IRQ_CFG0 + 4:
  701. s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
  702. return MEMTX_OK;
  703. case A_GERROR_IRQ_CFG1:
  704. s->gerror_irq_cfg1 = data;
  705. return MEMTX_OK;
  706. case A_GERROR_IRQ_CFG2:
  707. s->gerror_irq_cfg2 = data;
  708. return MEMTX_OK;
  709. case A_STRTAB_BASE: /* 64b */
  710. s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
  711. return MEMTX_OK;
  712. case A_STRTAB_BASE + 4:
  713. s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
  714. return MEMTX_OK;
  715. case A_STRTAB_BASE_CFG:
  716. s->strtab_base_cfg = data;
  717. if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
  718. s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
  719. s->features |= SMMU_FEATURE_2LVL_STE;
  720. }
  721. return MEMTX_OK;
  722. case A_CMDQ_BASE: /* 64b */
  723. s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
  724. s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
  725. if (s->cmdq.log2size > SMMU_CMDQS) {
  726. s->cmdq.log2size = SMMU_CMDQS;
  727. }
  728. return MEMTX_OK;
  729. case A_CMDQ_BASE + 4: /* 64b */
  730. s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
  731. return MEMTX_OK;
  732. case A_CMDQ_PROD:
  733. s->cmdq.prod = data;
  734. smmuv3_cmdq_consume(s);
  735. return MEMTX_OK;
  736. case A_CMDQ_CONS:
  737. s->cmdq.cons = data;
  738. return MEMTX_OK;
  739. case A_EVENTQ_BASE: /* 64b */
  740. s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
  741. s->eventq.log2size = extract64(s->eventq.base, 0, 5);
  742. if (s->eventq.log2size > SMMU_EVENTQS) {
  743. s->eventq.log2size = SMMU_EVENTQS;
  744. }
  745. return MEMTX_OK;
  746. case A_EVENTQ_BASE + 4:
  747. s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
  748. return MEMTX_OK;
  749. case A_EVENTQ_PROD:
  750. s->eventq.prod = data;
  751. return MEMTX_OK;
  752. case A_EVENTQ_CONS:
  753. s->eventq.cons = data;
  754. return MEMTX_OK;
  755. case A_EVENTQ_IRQ_CFG0: /* 64b */
  756. s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
  757. return MEMTX_OK;
  758. case A_EVENTQ_IRQ_CFG0 + 4:
  759. s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
  760. return MEMTX_OK;
  761. case A_EVENTQ_IRQ_CFG1:
  762. s->eventq_irq_cfg1 = data;
  763. return MEMTX_OK;
  764. case A_EVENTQ_IRQ_CFG2:
  765. s->eventq_irq_cfg2 = data;
  766. return MEMTX_OK;
  767. default:
  768. qemu_log_mask(LOG_UNIMP,
  769. "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
  770. __func__, offset);
  771. return MEMTX_OK;
  772. }
  773. }
  774. static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
  775. unsigned size, MemTxAttrs attrs)
  776. {
  777. SMMUState *sys = opaque;
  778. SMMUv3State *s = ARM_SMMUV3(sys);
  779. MemTxResult r;
  780. /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
  781. offset &= ~0x10000;
  782. switch (size) {
  783. case 8:
  784. r = smmu_writell(s, offset, data, attrs);
  785. break;
  786. case 4:
  787. r = smmu_writel(s, offset, data, attrs);
  788. break;
  789. default:
  790. r = MEMTX_ERROR;
  791. break;
  792. }
  793. trace_smmuv3_write_mmio(offset, data, size, r);
  794. return r;
  795. }
  796. static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
  797. uint64_t *data, MemTxAttrs attrs)
  798. {
  799. switch (offset) {
  800. case A_GERROR_IRQ_CFG0:
  801. *data = s->gerror_irq_cfg0;
  802. return MEMTX_OK;
  803. case A_STRTAB_BASE:
  804. *data = s->strtab_base;
  805. return MEMTX_OK;
  806. case A_CMDQ_BASE:
  807. *data = s->cmdq.base;
  808. return MEMTX_OK;
  809. case A_EVENTQ_BASE:
  810. *data = s->eventq.base;
  811. return MEMTX_OK;
  812. default:
  813. *data = 0;
  814. qemu_log_mask(LOG_UNIMP,
  815. "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
  816. __func__, offset);
  817. return MEMTX_OK;
  818. }
  819. }
  820. static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
  821. uint64_t *data, MemTxAttrs attrs)
  822. {
  823. switch (offset) {
  824. case A_IDREGS ... A_IDREGS + 0x1f:
  825. *data = smmuv3_idreg(offset - A_IDREGS);
  826. return MEMTX_OK;
  827. case A_IDR0 ... A_IDR5:
  828. *data = s->idr[(offset - A_IDR0) / 4];
  829. return MEMTX_OK;
  830. case A_IIDR:
  831. *data = s->iidr;
  832. return MEMTX_OK;
  833. case A_CR0:
  834. *data = s->cr[0];
  835. return MEMTX_OK;
  836. case A_CR0ACK:
  837. *data = s->cr0ack;
  838. return MEMTX_OK;
  839. case A_CR1:
  840. *data = s->cr[1];
  841. return MEMTX_OK;
  842. case A_CR2:
  843. *data = s->cr[2];
  844. return MEMTX_OK;
  845. case A_STATUSR:
  846. *data = s->statusr;
  847. return MEMTX_OK;
  848. case A_IRQ_CTRL:
  849. case A_IRQ_CTRL_ACK:
  850. *data = s->irq_ctrl;
  851. return MEMTX_OK;
  852. case A_GERROR:
  853. *data = s->gerror;
  854. return MEMTX_OK;
  855. case A_GERRORN:
  856. *data = s->gerrorn;
  857. return MEMTX_OK;
  858. case A_GERROR_IRQ_CFG0: /* 64b */
  859. *data = extract64(s->gerror_irq_cfg0, 0, 32);
  860. return MEMTX_OK;
  861. case A_GERROR_IRQ_CFG0 + 4:
  862. *data = extract64(s->gerror_irq_cfg0, 32, 32);
  863. return MEMTX_OK;
  864. case A_GERROR_IRQ_CFG1:
  865. *data = s->gerror_irq_cfg1;
  866. return MEMTX_OK;
  867. case A_GERROR_IRQ_CFG2:
  868. *data = s->gerror_irq_cfg2;
  869. return MEMTX_OK;
  870. case A_STRTAB_BASE: /* 64b */
  871. *data = extract64(s->strtab_base, 0, 32);
  872. return MEMTX_OK;
  873. case A_STRTAB_BASE + 4: /* 64b */
  874. *data = extract64(s->strtab_base, 32, 32);
  875. return MEMTX_OK;
  876. case A_STRTAB_BASE_CFG:
  877. *data = s->strtab_base_cfg;
  878. return MEMTX_OK;
  879. case A_CMDQ_BASE: /* 64b */
  880. *data = extract64(s->cmdq.base, 0, 32);
  881. return MEMTX_OK;
  882. case A_CMDQ_BASE + 4:
  883. *data = extract64(s->cmdq.base, 32, 32);
  884. return MEMTX_OK;
  885. case A_CMDQ_PROD:
  886. *data = s->cmdq.prod;
  887. return MEMTX_OK;
  888. case A_CMDQ_CONS:
  889. *data = s->cmdq.cons;
  890. return MEMTX_OK;
  891. case A_EVENTQ_BASE: /* 64b */
  892. *data = extract64(s->eventq.base, 0, 32);
  893. return MEMTX_OK;
  894. case A_EVENTQ_BASE + 4: /* 64b */
  895. *data = extract64(s->eventq.base, 32, 32);
  896. return MEMTX_OK;
  897. case A_EVENTQ_PROD:
  898. *data = s->eventq.prod;
  899. return MEMTX_OK;
  900. case A_EVENTQ_CONS:
  901. *data = s->eventq.cons;
  902. return MEMTX_OK;
  903. default:
  904. *data = 0;
  905. qemu_log_mask(LOG_UNIMP,
  906. "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
  907. __func__, offset);
  908. return MEMTX_OK;
  909. }
  910. }
  911. static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
  912. unsigned size, MemTxAttrs attrs)
  913. {
  914. SMMUState *sys = opaque;
  915. SMMUv3State *s = ARM_SMMUV3(sys);
  916. MemTxResult r;
  917. /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
  918. offset &= ~0x10000;
  919. switch (size) {
  920. case 8:
  921. r = smmu_readll(s, offset, data, attrs);
  922. break;
  923. case 4:
  924. r = smmu_readl(s, offset, data, attrs);
  925. break;
  926. default:
  927. r = MEMTX_ERROR;
  928. break;
  929. }
  930. trace_smmuv3_read_mmio(offset, *data, size, r);
  931. return r;
  932. }
  933. static const MemoryRegionOps smmu_mem_ops = {
  934. .read_with_attrs = smmu_read_mmio,
  935. .write_with_attrs = smmu_write_mmio,
  936. .endianness = DEVICE_LITTLE_ENDIAN,
  937. .valid = {
  938. .min_access_size = 4,
  939. .max_access_size = 8,
  940. },
  941. .impl = {
  942. .min_access_size = 4,
  943. .max_access_size = 8,
  944. },
  945. };
  946. static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
  947. {
  948. int i;
  949. for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
  950. sysbus_init_irq(dev, &s->irq[i]);
  951. }
  952. }
  953. static void smmu_reset(DeviceState *dev)
  954. {
  955. SMMUv3State *s = ARM_SMMUV3(dev);
  956. SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
  957. c->parent_reset(dev);
  958. smmuv3_init_regs(s);
  959. }
  960. static void smmu_realize(DeviceState *d, Error **errp)
  961. {
  962. SMMUState *sys = ARM_SMMU(d);
  963. SMMUv3State *s = ARM_SMMUV3(sys);
  964. SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
  965. SysBusDevice *dev = SYS_BUS_DEVICE(d);
  966. Error *local_err = NULL;
  967. c->parent_realize(d, &local_err);
  968. if (local_err) {
  969. error_propagate(errp, local_err);
  970. return;
  971. }
  972. memory_region_init_io(&sys->iomem, OBJECT(s),
  973. &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
  974. sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
  975. sysbus_init_mmio(dev, &sys->iomem);
  976. smmu_init_irq(s, dev);
  977. }
  978. static const VMStateDescription vmstate_smmuv3_queue = {
  979. .name = "smmuv3_queue",
  980. .version_id = 1,
  981. .minimum_version_id = 1,
  982. .fields = (VMStateField[]) {
  983. VMSTATE_UINT64(base, SMMUQueue),
  984. VMSTATE_UINT32(prod, SMMUQueue),
  985. VMSTATE_UINT32(cons, SMMUQueue),
  986. VMSTATE_UINT8(log2size, SMMUQueue),
  987. },
  988. };
  989. static const VMStateDescription vmstate_smmuv3 = {
  990. .name = "smmuv3",
  991. .version_id = 1,
  992. .minimum_version_id = 1,
  993. .fields = (VMStateField[]) {
  994. VMSTATE_UINT32(features, SMMUv3State),
  995. VMSTATE_UINT8(sid_size, SMMUv3State),
  996. VMSTATE_UINT8(sid_split, SMMUv3State),
  997. VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
  998. VMSTATE_UINT32(cr0ack, SMMUv3State),
  999. VMSTATE_UINT32(statusr, SMMUv3State),
  1000. VMSTATE_UINT32(irq_ctrl, SMMUv3State),
  1001. VMSTATE_UINT32(gerror, SMMUv3State),
  1002. VMSTATE_UINT32(gerrorn, SMMUv3State),
  1003. VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
  1004. VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
  1005. VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
  1006. VMSTATE_UINT64(strtab_base, SMMUv3State),
  1007. VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
  1008. VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
  1009. VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
  1010. VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
  1011. VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
  1012. VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
  1013. VMSTATE_END_OF_LIST(),
  1014. },
  1015. };
  1016. static void smmuv3_instance_init(Object *obj)
  1017. {
  1018. /* Nothing much to do here as of now */
  1019. }
  1020. static void smmuv3_class_init(ObjectClass *klass, void *data)
  1021. {
  1022. DeviceClass *dc = DEVICE_CLASS(klass);
  1023. SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
  1024. dc->vmsd = &vmstate_smmuv3;
  1025. device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
  1026. c->parent_realize = dc->realize;
  1027. dc->realize = smmu_realize;
  1028. }
  1029. static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
  1030. IOMMUNotifierFlag old,
  1031. IOMMUNotifierFlag new)
  1032. {
  1033. if (old == IOMMU_NOTIFIER_NONE) {
  1034. warn_report("SMMUV3 does not support vhost/vfio integration yet: "
  1035. "devices of those types will not function properly");
  1036. }
  1037. }
  1038. static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
  1039. void *data)
  1040. {
  1041. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
  1042. imrc->translate = smmuv3_translate;
  1043. imrc->notify_flag_changed = smmuv3_notify_flag_changed;
  1044. }
  1045. static const TypeInfo smmuv3_type_info = {
  1046. .name = TYPE_ARM_SMMUV3,
  1047. .parent = TYPE_ARM_SMMU,
  1048. .instance_size = sizeof(SMMUv3State),
  1049. .instance_init = smmuv3_instance_init,
  1050. .class_size = sizeof(SMMUv3Class),
  1051. .class_init = smmuv3_class_init,
  1052. };
  1053. static const TypeInfo smmuv3_iommu_memory_region_info = {
  1054. .parent = TYPE_IOMMU_MEMORY_REGION,
  1055. .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
  1056. .class_init = smmuv3_iommu_memory_region_class_init,
  1057. };
  1058. static void smmuv3_register_types(void)
  1059. {
  1060. type_register(&smmuv3_type_info);
  1061. type_register(&smmuv3_iommu_memory_region_info);
  1062. }
  1063. type_init(smmuv3_register_types)