2
0

tz-mpc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /*
  2. * ARM AHB5 TrustZone Memory Protection Controller emulation
  3. *
  4. * Copyright (c) 2018 Linaro Limited
  5. * Written by Peter Maydell
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 or
  9. * (at your option) any later version.
  10. */
  11. #include "qemu/osdep.h"
  12. #include "qemu/log.h"
  13. #include "qemu/module.h"
  14. #include "qapi/error.h"
  15. #include "trace.h"
  16. #include "hw/sysbus.h"
  17. #include "migration/vmstate.h"
  18. #include "hw/registerfields.h"
  19. #include "hw/irq.h"
  20. #include "hw/misc/tz-mpc.h"
  21. #include "hw/qdev-properties.h"
  22. /* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
  23. * non-secure transactions.
  24. */
  25. enum {
  26. IOMMU_IDX_S,
  27. IOMMU_IDX_NS,
  28. IOMMU_NUM_INDEXES,
  29. };
  30. /* Config registers */
  31. REG32(CTRL, 0x00)
  32. FIELD(CTRL, SEC_RESP, 4, 1)
  33. FIELD(CTRL, AUTOINC, 8, 1)
  34. FIELD(CTRL, LOCKDOWN, 31, 1)
  35. REG32(BLK_MAX, 0x10)
  36. REG32(BLK_CFG, 0x14)
  37. REG32(BLK_IDX, 0x18)
  38. REG32(BLK_LUT, 0x1c)
  39. REG32(INT_STAT, 0x20)
  40. FIELD(INT_STAT, IRQ, 0, 1)
  41. REG32(INT_CLEAR, 0x24)
  42. FIELD(INT_CLEAR, IRQ, 0, 1)
  43. REG32(INT_EN, 0x28)
  44. FIELD(INT_EN, IRQ, 0, 1)
  45. REG32(INT_INFO1, 0x2c)
  46. REG32(INT_INFO2, 0x30)
  47. FIELD(INT_INFO2, HMASTER, 0, 16)
  48. FIELD(INT_INFO2, HNONSEC, 16, 1)
  49. FIELD(INT_INFO2, CFG_NS, 17, 1)
  50. REG32(INT_SET, 0x34)
  51. FIELD(INT_SET, IRQ, 0, 1)
  52. REG32(PIDR4, 0xfd0)
  53. REG32(PIDR5, 0xfd4)
  54. REG32(PIDR6, 0xfd8)
  55. REG32(PIDR7, 0xfdc)
  56. REG32(PIDR0, 0xfe0)
  57. REG32(PIDR1, 0xfe4)
  58. REG32(PIDR2, 0xfe8)
  59. REG32(PIDR3, 0xfec)
  60. REG32(CIDR0, 0xff0)
  61. REG32(CIDR1, 0xff4)
  62. REG32(CIDR2, 0xff8)
  63. REG32(CIDR3, 0xffc)
  64. static const uint8_t tz_mpc_idregs[] = {
  65. 0x04, 0x00, 0x00, 0x00,
  66. 0x60, 0xb8, 0x1b, 0x00,
  67. 0x0d, 0xf0, 0x05, 0xb1,
  68. };
  69. static void tz_mpc_irq_update(TZMPC *s)
  70. {
  71. qemu_set_irq(s->irq, s->int_stat && s->int_en);
  72. }
  73. static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
  74. uint32_t oldlut, uint32_t newlut)
  75. {
  76. /* Called when the LUT word at lutidx has changed from oldlut to newlut;
  77. * must call the IOMMU notifiers for the changed blocks.
  78. */
  79. IOMMUTLBEvent event = {
  80. .entry = {
  81. .addr_mask = s->blocksize - 1,
  82. }
  83. };
  84. hwaddr addr = lutidx * s->blocksize * 32;
  85. int i;
  86. for (i = 0; i < 32; i++, addr += s->blocksize) {
  87. bool block_is_ns;
  88. if (!((oldlut ^ newlut) & (1 << i))) {
  89. continue;
  90. }
  91. /* This changes the mappings for both the S and the NS space,
  92. * so we need to do four notifies: an UNMAP then a MAP for each.
  93. */
  94. block_is_ns = newlut & (1 << i);
  95. trace_tz_mpc_iommu_notify(addr);
  96. event.entry.iova = addr;
  97. event.entry.translated_addr = addr;
  98. event.type = IOMMU_NOTIFIER_UNMAP;
  99. event.entry.perm = IOMMU_NONE;
  100. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
  101. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
  102. event.type = IOMMU_NOTIFIER_MAP;
  103. event.entry.perm = IOMMU_RW;
  104. if (block_is_ns) {
  105. event.entry.target_as = &s->blocked_io_as;
  106. } else {
  107. event.entry.target_as = &s->downstream_as;
  108. }
  109. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
  110. if (block_is_ns) {
  111. event.entry.target_as = &s->downstream_as;
  112. } else {
  113. event.entry.target_as = &s->blocked_io_as;
  114. }
  115. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
  116. }
  117. }
  118. static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
  119. {
  120. /* Auto-increment BLK_IDX if necessary */
  121. if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
  122. s->blk_idx++;
  123. s->blk_idx %= s->blk_max;
  124. }
  125. }
  126. static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
  127. uint64_t *pdata,
  128. unsigned size, MemTxAttrs attrs)
  129. {
  130. TZMPC *s = TZ_MPC(opaque);
  131. uint64_t r;
  132. uint32_t offset = addr & ~0x3;
  133. if (!attrs.secure && offset < A_PIDR4) {
  134. /* NS accesses can only see the ID registers */
  135. qemu_log_mask(LOG_GUEST_ERROR,
  136. "TZ MPC register read: NS access to offset 0x%x\n",
  137. offset);
  138. r = 0;
  139. goto read_out;
  140. }
  141. switch (offset) {
  142. case A_CTRL:
  143. r = s->ctrl;
  144. break;
  145. case A_BLK_MAX:
  146. r = s->blk_max - 1;
  147. break;
  148. case A_BLK_CFG:
  149. /* We are never in "init in progress state", so this just indicates
  150. * the block size. s->blocksize == (1 << BLK_CFG + 5), so
  151. * BLK_CFG == ctz32(s->blocksize) - 5
  152. */
  153. r = ctz32(s->blocksize) - 5;
  154. break;
  155. case A_BLK_IDX:
  156. r = s->blk_idx;
  157. break;
  158. case A_BLK_LUT:
  159. r = s->blk_lut[s->blk_idx];
  160. tz_mpc_autoinc_idx(s, size);
  161. break;
  162. case A_INT_STAT:
  163. r = s->int_stat;
  164. break;
  165. case A_INT_EN:
  166. r = s->int_en;
  167. break;
  168. case A_INT_INFO1:
  169. r = s->int_info1;
  170. break;
  171. case A_INT_INFO2:
  172. r = s->int_info2;
  173. break;
  174. case A_PIDR4:
  175. case A_PIDR5:
  176. case A_PIDR6:
  177. case A_PIDR7:
  178. case A_PIDR0:
  179. case A_PIDR1:
  180. case A_PIDR2:
  181. case A_PIDR3:
  182. case A_CIDR0:
  183. case A_CIDR1:
  184. case A_CIDR2:
  185. case A_CIDR3:
  186. r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
  187. break;
  188. case A_INT_CLEAR:
  189. case A_INT_SET:
  190. qemu_log_mask(LOG_GUEST_ERROR,
  191. "TZ MPC register read: write-only offset 0x%x\n",
  192. offset);
  193. r = 0;
  194. break;
  195. default:
  196. qemu_log_mask(LOG_GUEST_ERROR,
  197. "TZ MPC register read: bad offset 0x%x\n", offset);
  198. r = 0;
  199. break;
  200. }
  201. if (size != 4) {
  202. /* None of our registers are read-sensitive (except BLK_LUT,
  203. * which can special case the "size not 4" case), so just
  204. * pull the right bytes out of the word read result.
  205. */
  206. r = extract32(r, (addr & 3) * 8, size * 8);
  207. }
  208. read_out:
  209. trace_tz_mpc_reg_read(addr, r, size);
  210. *pdata = r;
  211. return MEMTX_OK;
  212. }
  213. static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
  214. uint64_t value,
  215. unsigned size, MemTxAttrs attrs)
  216. {
  217. TZMPC *s = TZ_MPC(opaque);
  218. uint32_t offset = addr & ~0x3;
  219. trace_tz_mpc_reg_write(addr, value, size);
  220. if (!attrs.secure && offset < A_PIDR4) {
  221. /* NS accesses can only see the ID registers */
  222. qemu_log_mask(LOG_GUEST_ERROR,
  223. "TZ MPC register write: NS access to offset 0x%x\n",
  224. offset);
  225. return MEMTX_OK;
  226. }
  227. if (size != 4) {
  228. /* Expand the byte or halfword write to a full word size.
  229. * In most cases we can do this with zeroes; the exceptions
  230. * are CTRL, BLK_IDX and BLK_LUT.
  231. */
  232. uint32_t oldval;
  233. switch (offset) {
  234. case A_CTRL:
  235. oldval = s->ctrl;
  236. break;
  237. case A_BLK_IDX:
  238. oldval = s->blk_idx;
  239. break;
  240. case A_BLK_LUT:
  241. oldval = s->blk_lut[s->blk_idx];
  242. break;
  243. default:
  244. oldval = 0;
  245. break;
  246. }
  247. value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
  248. }
  249. if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
  250. (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
  251. /* Lockdown mode makes these three registers read-only, and
  252. * the only way out of it is to reset the device.
  253. */
  254. qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
  255. "while MPC is in lockdown mode\n", offset);
  256. return MEMTX_OK;
  257. }
  258. switch (offset) {
  259. case A_CTRL:
  260. /* We don't implement the 'data gating' feature so all other bits
  261. * are reserved and we make them RAZ/WI.
  262. */
  263. s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
  264. R_CTRL_AUTOINC_MASK |
  265. R_CTRL_LOCKDOWN_MASK);
  266. break;
  267. case A_BLK_IDX:
  268. s->blk_idx = value % s->blk_max;
  269. break;
  270. case A_BLK_LUT:
  271. tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
  272. s->blk_lut[s->blk_idx] = value;
  273. tz_mpc_autoinc_idx(s, size);
  274. break;
  275. case A_INT_CLEAR:
  276. if (value & R_INT_CLEAR_IRQ_MASK) {
  277. s->int_stat = 0;
  278. tz_mpc_irq_update(s);
  279. }
  280. break;
  281. case A_INT_EN:
  282. s->int_en = value & R_INT_EN_IRQ_MASK;
  283. tz_mpc_irq_update(s);
  284. break;
  285. case A_INT_SET:
  286. if (value & R_INT_SET_IRQ_MASK) {
  287. s->int_stat = R_INT_STAT_IRQ_MASK;
  288. tz_mpc_irq_update(s);
  289. }
  290. break;
  291. case A_PIDR4:
  292. case A_PIDR5:
  293. case A_PIDR6:
  294. case A_PIDR7:
  295. case A_PIDR0:
  296. case A_PIDR1:
  297. case A_PIDR2:
  298. case A_PIDR3:
  299. case A_CIDR0:
  300. case A_CIDR1:
  301. case A_CIDR2:
  302. case A_CIDR3:
  303. qemu_log_mask(LOG_GUEST_ERROR,
  304. "TZ MPC register write: read-only offset 0x%x\n", offset);
  305. break;
  306. default:
  307. qemu_log_mask(LOG_GUEST_ERROR,
  308. "TZ MPC register write: bad offset 0x%x\n", offset);
  309. break;
  310. }
  311. return MEMTX_OK;
  312. }
  313. static const MemoryRegionOps tz_mpc_reg_ops = {
  314. .read_with_attrs = tz_mpc_reg_read,
  315. .write_with_attrs = tz_mpc_reg_write,
  316. .endianness = DEVICE_LITTLE_ENDIAN,
  317. .valid.min_access_size = 1,
  318. .valid.max_access_size = 4,
  319. .impl.min_access_size = 1,
  320. .impl.max_access_size = 4,
  321. };
  322. static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
  323. {
  324. /* Return the cfg_ns bit from the LUT for the specified address */
  325. hwaddr blknum = addr / s->blocksize;
  326. hwaddr blkword = blknum / 32;
  327. uint32_t blkbit = 1U << (blknum % 32);
  328. /* This would imply the address was larger than the size we
  329. * defined this memory region to be, so it can't happen.
  330. */
  331. assert(blkword < s->blk_max);
  332. return s->blk_lut[blkword] & blkbit;
  333. }
  334. static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
  335. {
  336. /* Handle a blocked transaction: raise IRQ, capture info, etc */
  337. if (!s->int_stat) {
  338. /* First blocked transfer: capture information into INT_INFO1 and
  339. * INT_INFO2. Subsequent transfers are still blocked but don't
  340. * capture information until the guest clears the interrupt.
  341. */
  342. s->int_info1 = addr;
  343. s->int_info2 = 0;
  344. s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
  345. attrs.requester_id & 0xffff);
  346. s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
  347. ~attrs.secure);
  348. s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
  349. tz_mpc_cfg_ns(s, addr));
  350. s->int_stat |= R_INT_STAT_IRQ_MASK;
  351. tz_mpc_irq_update(s);
  352. }
  353. /* Generate bus error if desired; otherwise RAZ/WI */
  354. return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
  355. }
  356. /* Accesses only reach these read and write functions if the MPC is
  357. * blocking them; non-blocked accesses go directly to the downstream
  358. * memory region without passing through this code.
  359. */
  360. static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
  361. uint64_t *pdata,
  362. unsigned size, MemTxAttrs attrs)
  363. {
  364. TZMPC *s = TZ_MPC(opaque);
  365. trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
  366. *pdata = 0;
  367. return tz_mpc_handle_block(s, addr, attrs);
  368. }
  369. static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
  370. uint64_t value,
  371. unsigned size, MemTxAttrs attrs)
  372. {
  373. TZMPC *s = TZ_MPC(opaque);
  374. trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
  375. return tz_mpc_handle_block(s, addr, attrs);
  376. }
  377. static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
  378. .read_with_attrs = tz_mpc_mem_blocked_read,
  379. .write_with_attrs = tz_mpc_mem_blocked_write,
  380. .endianness = DEVICE_LITTLE_ENDIAN,
  381. .valid.min_access_size = 1,
  382. .valid.max_access_size = 8,
  383. .impl.min_access_size = 1,
  384. .impl.max_access_size = 8,
  385. };
  386. static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
  387. hwaddr addr, IOMMUAccessFlags flags,
  388. int iommu_idx)
  389. {
  390. TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
  391. bool ok;
  392. IOMMUTLBEntry ret = {
  393. .iova = addr & ~(s->blocksize - 1),
  394. .translated_addr = addr & ~(s->blocksize - 1),
  395. .addr_mask = s->blocksize - 1,
  396. .perm = IOMMU_RW,
  397. };
  398. /* Look at the per-block configuration for this address, and
  399. * return a TLB entry directing the transaction at either
  400. * downstream_as or blocked_io_as, as appropriate.
  401. * If the LUT cfg_ns bit is 1, only non-secure transactions
  402. * may pass. If the bit is 0, only secure transactions may pass.
  403. */
  404. ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
  405. trace_tz_mpc_translate(addr, flags,
  406. iommu_idx == IOMMU_IDX_S ? "S" : "NS",
  407. ok ? "pass" : "block");
  408. ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
  409. return ret;
  410. }
  411. static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
  412. {
  413. /* We treat unspecified attributes like secure. Transactions with
  414. * unspecified attributes come from places like
  415. * rom_reset() for initial image load, and we want
  416. * those to pass through the from-reset "everything is secure" config.
  417. * All the real during-emulation transactions from the CPU will
  418. * specify attributes.
  419. */
  420. return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
  421. }
  422. static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
  423. {
  424. return IOMMU_NUM_INDEXES;
  425. }
  426. static void tz_mpc_reset(DeviceState *dev)
  427. {
  428. TZMPC *s = TZ_MPC(dev);
  429. s->ctrl = 0x00000100;
  430. s->blk_idx = 0;
  431. s->int_stat = 0;
  432. s->int_en = 1;
  433. s->int_info1 = 0;
  434. s->int_info2 = 0;
  435. memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
  436. }
  437. static void tz_mpc_init(Object *obj)
  438. {
  439. DeviceState *dev = DEVICE(obj);
  440. TZMPC *s = TZ_MPC(obj);
  441. qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
  442. }
  443. static void tz_mpc_realize(DeviceState *dev, Error **errp)
  444. {
  445. Object *obj = OBJECT(dev);
  446. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  447. TZMPC *s = TZ_MPC(dev);
  448. uint64_t size;
  449. /* We can't create the upstream end of the port until realize,
  450. * as we don't know the size of the MR used as the downstream until then.
  451. * We insist on having a downstream, to avoid complicating the code
  452. * with handling the "don't know how big this is" case. It's easy
  453. * enough for the user to create an unimplemented_device as downstream
  454. * if they have nothing else to plug into this.
  455. */
  456. if (!s->downstream) {
  457. error_setg(errp, "MPC 'downstream' link not set");
  458. return;
  459. }
  460. size = memory_region_size(s->downstream);
  461. memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
  462. TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
  463. obj, "tz-mpc-upstream", size);
  464. /* In real hardware the block size is configurable. In QEMU we could
  465. * make it configurable but will need it to be at least as big as the
  466. * target page size so we can execute out of the resulting MRs. Guest
  467. * software is supposed to check the block size using the BLK_CFG
  468. * register, so make it fixed at the page size.
  469. */
  470. s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
  471. if (size % s->blocksize != 0) {
  472. error_setg(errp,
  473. "MPC 'downstream' size %" PRId64
  474. " is not a multiple of %" HWADDR_PRIx " bytes",
  475. size, s->blocksize);
  476. object_unref(OBJECT(&s->upstream));
  477. return;
  478. }
  479. /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
  480. * words, each bit of which indicates one block.
  481. */
  482. s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
  483. memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
  484. s, "tz-mpc-regs", 0x1000);
  485. sysbus_init_mmio(sbd, &s->regmr);
  486. sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
  487. /* This memory region is not exposed to users of this device as a
  488. * sysbus MMIO region, but is instead used internally as something
  489. * that our IOMMU translate function might direct accesses to.
  490. */
  491. memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
  492. s, "tz-mpc-blocked-io", size);
  493. address_space_init(&s->downstream_as, s->downstream,
  494. "tz-mpc-downstream");
  495. address_space_init(&s->blocked_io_as, &s->blocked_io,
  496. "tz-mpc-blocked-io");
  497. s->blk_lut = g_new0(uint32_t, s->blk_max);
  498. }
  499. static int tz_mpc_post_load(void *opaque, int version_id)
  500. {
  501. TZMPC *s = TZ_MPC(opaque);
  502. /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
  503. if (s->blk_idx >= s->blk_max) {
  504. return -1;
  505. }
  506. return 0;
  507. }
  508. static const VMStateDescription tz_mpc_vmstate = {
  509. .name = "tz-mpc",
  510. .version_id = 1,
  511. .minimum_version_id = 1,
  512. .post_load = tz_mpc_post_load,
  513. .fields = (const VMStateField[]) {
  514. VMSTATE_UINT32(ctrl, TZMPC),
  515. VMSTATE_UINT32(blk_idx, TZMPC),
  516. VMSTATE_UINT32(int_stat, TZMPC),
  517. VMSTATE_UINT32(int_en, TZMPC),
  518. VMSTATE_UINT32(int_info1, TZMPC),
  519. VMSTATE_UINT32(int_info2, TZMPC),
  520. VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
  521. 0, vmstate_info_uint32, uint32_t),
  522. VMSTATE_END_OF_LIST()
  523. }
  524. };
  525. static const Property tz_mpc_properties[] = {
  526. DEFINE_PROP_LINK("downstream", TZMPC, downstream,
  527. TYPE_MEMORY_REGION, MemoryRegion *),
  528. };
  529. static void tz_mpc_class_init(ObjectClass *klass, void *data)
  530. {
  531. DeviceClass *dc = DEVICE_CLASS(klass);
  532. dc->realize = tz_mpc_realize;
  533. dc->vmsd = &tz_mpc_vmstate;
  534. device_class_set_legacy_reset(dc, tz_mpc_reset);
  535. device_class_set_props(dc, tz_mpc_properties);
  536. }
  537. static const TypeInfo tz_mpc_info = {
  538. .name = TYPE_TZ_MPC,
  539. .parent = TYPE_SYS_BUS_DEVICE,
  540. .instance_size = sizeof(TZMPC),
  541. .instance_init = tz_mpc_init,
  542. .class_init = tz_mpc_class_init,
  543. };
  544. static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
  545. void *data)
  546. {
  547. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
  548. imrc->translate = tz_mpc_translate;
  549. imrc->attrs_to_index = tz_mpc_attrs_to_index;
  550. imrc->num_indexes = tz_mpc_num_indexes;
  551. }
  552. static const TypeInfo tz_mpc_iommu_memory_region_info = {
  553. .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
  554. .parent = TYPE_IOMMU_MEMORY_REGION,
  555. .class_init = tz_mpc_iommu_memory_region_class_init,
  556. };
  557. static void tz_mpc_register_types(void)
  558. {
  559. type_register_static(&tz_mpc_info);
  560. type_register_static(&tz_mpc_iommu_memory_region_info);
  561. }
  562. type_init(tz_mpc_register_types);