2
0

tz-mpc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /*
  2. * ARM AHB5 TrustZone Memory Protection Controller emulation
  3. *
  4. * Copyright (c) 2018 Linaro Limited
  5. * Written by Peter Maydell
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 or
  9. * (at your option) any later version.
  10. */
  11. #include "qemu/osdep.h"
  12. #include "qemu/log.h"
  13. #include "qemu/module.h"
  14. #include "qapi/error.h"
  15. #include "trace.h"
  16. #include "hw/sysbus.h"
  17. #include "migration/vmstate.h"
  18. #include "hw/registerfields.h"
  19. #include "hw/irq.h"
  20. #include "hw/misc/tz-mpc.h"
  21. #include "hw/qdev-properties.h"
  22. /* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
  23. * non-secure transactions.
  24. */
  25. enum {
  26. IOMMU_IDX_S,
  27. IOMMU_IDX_NS,
  28. IOMMU_NUM_INDEXES,
  29. };
  30. /* Config registers */
  31. REG32(CTRL, 0x00)
  32. FIELD(CTRL, SEC_RESP, 4, 1)
  33. FIELD(CTRL, AUTOINC, 8, 1)
  34. FIELD(CTRL, LOCKDOWN, 31, 1)
  35. REG32(BLK_MAX, 0x10)
  36. REG32(BLK_CFG, 0x14)
  37. REG32(BLK_IDX, 0x18)
  38. REG32(BLK_LUT, 0x1c)
  39. REG32(INT_STAT, 0x20)
  40. FIELD(INT_STAT, IRQ, 0, 1)
  41. REG32(INT_CLEAR, 0x24)
  42. FIELD(INT_CLEAR, IRQ, 0, 1)
  43. REG32(INT_EN, 0x28)
  44. FIELD(INT_EN, IRQ, 0, 1)
  45. REG32(INT_INFO1, 0x2c)
  46. REG32(INT_INFO2, 0x30)
  47. FIELD(INT_INFO2, HMASTER, 0, 16)
  48. FIELD(INT_INFO2, HNONSEC, 16, 1)
  49. FIELD(INT_INFO2, CFG_NS, 17, 1)
  50. REG32(INT_SET, 0x34)
  51. FIELD(INT_SET, IRQ, 0, 1)
  52. REG32(PIDR4, 0xfd0)
  53. REG32(PIDR5, 0xfd4)
  54. REG32(PIDR6, 0xfd8)
  55. REG32(PIDR7, 0xfdc)
  56. REG32(PIDR0, 0xfe0)
  57. REG32(PIDR1, 0xfe4)
  58. REG32(PIDR2, 0xfe8)
  59. REG32(PIDR3, 0xfec)
  60. REG32(CIDR0, 0xff0)
  61. REG32(CIDR1, 0xff4)
  62. REG32(CIDR2, 0xff8)
  63. REG32(CIDR3, 0xffc)
  64. static const uint8_t tz_mpc_idregs[] = {
  65. 0x04, 0x00, 0x00, 0x00,
  66. 0x60, 0xb8, 0x1b, 0x00,
  67. 0x0d, 0xf0, 0x05, 0xb1,
  68. };
  69. static void tz_mpc_irq_update(TZMPC *s)
  70. {
  71. qemu_set_irq(s->irq, s->int_stat && s->int_en);
  72. }
  73. static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
  74. uint32_t oldlut, uint32_t newlut)
  75. {
  76. /* Called when the LUT word at lutidx has changed from oldlut to newlut;
  77. * must call the IOMMU notifiers for the changed blocks.
  78. */
  79. IOMMUTLBEntry entry = {
  80. .addr_mask = s->blocksize - 1,
  81. };
  82. hwaddr addr = lutidx * s->blocksize * 32;
  83. int i;
  84. for (i = 0; i < 32; i++, addr += s->blocksize) {
  85. bool block_is_ns;
  86. if (!((oldlut ^ newlut) & (1 << i))) {
  87. continue;
  88. }
  89. /* This changes the mappings for both the S and the NS space,
  90. * so we need to do four notifies: an UNMAP then a MAP for each.
  91. */
  92. block_is_ns = newlut & (1 << i);
  93. trace_tz_mpc_iommu_notify(addr);
  94. entry.iova = addr;
  95. entry.translated_addr = addr;
  96. entry.perm = IOMMU_NONE;
  97. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
  98. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
  99. entry.perm = IOMMU_RW;
  100. if (block_is_ns) {
  101. entry.target_as = &s->blocked_io_as;
  102. } else {
  103. entry.target_as = &s->downstream_as;
  104. }
  105. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
  106. if (block_is_ns) {
  107. entry.target_as = &s->downstream_as;
  108. } else {
  109. entry.target_as = &s->blocked_io_as;
  110. }
  111. memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
  112. }
  113. }
  114. static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
  115. {
  116. /* Auto-increment BLK_IDX if necessary */
  117. if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
  118. s->blk_idx++;
  119. s->blk_idx %= s->blk_max;
  120. }
  121. }
  122. static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
  123. uint64_t *pdata,
  124. unsigned size, MemTxAttrs attrs)
  125. {
  126. TZMPC *s = TZ_MPC(opaque);
  127. uint64_t r;
  128. uint32_t offset = addr & ~0x3;
  129. if (!attrs.secure && offset < A_PIDR4) {
  130. /* NS accesses can only see the ID registers */
  131. qemu_log_mask(LOG_GUEST_ERROR,
  132. "TZ MPC register read: NS access to offset 0x%x\n",
  133. offset);
  134. r = 0;
  135. goto read_out;
  136. }
  137. switch (offset) {
  138. case A_CTRL:
  139. r = s->ctrl;
  140. break;
  141. case A_BLK_MAX:
  142. r = s->blk_max - 1;
  143. break;
  144. case A_BLK_CFG:
  145. /* We are never in "init in progress state", so this just indicates
  146. * the block size. s->blocksize == (1 << BLK_CFG + 5), so
  147. * BLK_CFG == ctz32(s->blocksize) - 5
  148. */
  149. r = ctz32(s->blocksize) - 5;
  150. break;
  151. case A_BLK_IDX:
  152. r = s->blk_idx;
  153. break;
  154. case A_BLK_LUT:
  155. r = s->blk_lut[s->blk_idx];
  156. tz_mpc_autoinc_idx(s, size);
  157. break;
  158. case A_INT_STAT:
  159. r = s->int_stat;
  160. break;
  161. case A_INT_EN:
  162. r = s->int_en;
  163. break;
  164. case A_INT_INFO1:
  165. r = s->int_info1;
  166. break;
  167. case A_INT_INFO2:
  168. r = s->int_info2;
  169. break;
  170. case A_PIDR4:
  171. case A_PIDR5:
  172. case A_PIDR6:
  173. case A_PIDR7:
  174. case A_PIDR0:
  175. case A_PIDR1:
  176. case A_PIDR2:
  177. case A_PIDR3:
  178. case A_CIDR0:
  179. case A_CIDR1:
  180. case A_CIDR2:
  181. case A_CIDR3:
  182. r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
  183. break;
  184. case A_INT_CLEAR:
  185. case A_INT_SET:
  186. qemu_log_mask(LOG_GUEST_ERROR,
  187. "TZ MPC register read: write-only offset 0x%x\n",
  188. offset);
  189. r = 0;
  190. break;
  191. default:
  192. qemu_log_mask(LOG_GUEST_ERROR,
  193. "TZ MPC register read: bad offset 0x%x\n", offset);
  194. r = 0;
  195. break;
  196. }
  197. if (size != 4) {
  198. /* None of our registers are read-sensitive (except BLK_LUT,
  199. * which can special case the "size not 4" case), so just
  200. * pull the right bytes out of the word read result.
  201. */
  202. r = extract32(r, (addr & 3) * 8, size * 8);
  203. }
  204. read_out:
  205. trace_tz_mpc_reg_read(addr, r, size);
  206. *pdata = r;
  207. return MEMTX_OK;
  208. }
  209. static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
  210. uint64_t value,
  211. unsigned size, MemTxAttrs attrs)
  212. {
  213. TZMPC *s = TZ_MPC(opaque);
  214. uint32_t offset = addr & ~0x3;
  215. trace_tz_mpc_reg_write(addr, value, size);
  216. if (!attrs.secure && offset < A_PIDR4) {
  217. /* NS accesses can only see the ID registers */
  218. qemu_log_mask(LOG_GUEST_ERROR,
  219. "TZ MPC register write: NS access to offset 0x%x\n",
  220. offset);
  221. return MEMTX_OK;
  222. }
  223. if (size != 4) {
  224. /* Expand the byte or halfword write to a full word size.
  225. * In most cases we can do this with zeroes; the exceptions
  226. * are CTRL, BLK_IDX and BLK_LUT.
  227. */
  228. uint32_t oldval;
  229. switch (offset) {
  230. case A_CTRL:
  231. oldval = s->ctrl;
  232. break;
  233. case A_BLK_IDX:
  234. oldval = s->blk_idx;
  235. break;
  236. case A_BLK_LUT:
  237. oldval = s->blk_lut[s->blk_idx];
  238. break;
  239. default:
  240. oldval = 0;
  241. break;
  242. }
  243. value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
  244. }
  245. if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
  246. (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
  247. /* Lockdown mode makes these three registers read-only, and
  248. * the only way out of it is to reset the device.
  249. */
  250. qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
  251. "while MPC is in lockdown mode\n", offset);
  252. return MEMTX_OK;
  253. }
  254. switch (offset) {
  255. case A_CTRL:
  256. /* We don't implement the 'data gating' feature so all other bits
  257. * are reserved and we make them RAZ/WI.
  258. */
  259. s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
  260. R_CTRL_AUTOINC_MASK |
  261. R_CTRL_LOCKDOWN_MASK);
  262. break;
  263. case A_BLK_IDX:
  264. s->blk_idx = value % s->blk_max;
  265. break;
  266. case A_BLK_LUT:
  267. tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
  268. s->blk_lut[s->blk_idx] = value;
  269. tz_mpc_autoinc_idx(s, size);
  270. break;
  271. case A_INT_CLEAR:
  272. if (value & R_INT_CLEAR_IRQ_MASK) {
  273. s->int_stat = 0;
  274. tz_mpc_irq_update(s);
  275. }
  276. break;
  277. case A_INT_EN:
  278. s->int_en = value & R_INT_EN_IRQ_MASK;
  279. tz_mpc_irq_update(s);
  280. break;
  281. case A_INT_SET:
  282. if (value & R_INT_SET_IRQ_MASK) {
  283. s->int_stat = R_INT_STAT_IRQ_MASK;
  284. tz_mpc_irq_update(s);
  285. }
  286. break;
  287. case A_PIDR4:
  288. case A_PIDR5:
  289. case A_PIDR6:
  290. case A_PIDR7:
  291. case A_PIDR0:
  292. case A_PIDR1:
  293. case A_PIDR2:
  294. case A_PIDR3:
  295. case A_CIDR0:
  296. case A_CIDR1:
  297. case A_CIDR2:
  298. case A_CIDR3:
  299. qemu_log_mask(LOG_GUEST_ERROR,
  300. "TZ MPC register write: read-only offset 0x%x\n", offset);
  301. break;
  302. default:
  303. qemu_log_mask(LOG_GUEST_ERROR,
  304. "TZ MPC register write: bad offset 0x%x\n", offset);
  305. break;
  306. }
  307. return MEMTX_OK;
  308. }
  309. static const MemoryRegionOps tz_mpc_reg_ops = {
  310. .read_with_attrs = tz_mpc_reg_read,
  311. .write_with_attrs = tz_mpc_reg_write,
  312. .endianness = DEVICE_LITTLE_ENDIAN,
  313. .valid.min_access_size = 1,
  314. .valid.max_access_size = 4,
  315. .impl.min_access_size = 1,
  316. .impl.max_access_size = 4,
  317. };
  318. static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
  319. {
  320. /* Return the cfg_ns bit from the LUT for the specified address */
  321. hwaddr blknum = addr / s->blocksize;
  322. hwaddr blkword = blknum / 32;
  323. uint32_t blkbit = 1U << (blknum % 32);
  324. /* This would imply the address was larger than the size we
  325. * defined this memory region to be, so it can't happen.
  326. */
  327. assert(blkword < s->blk_max);
  328. return s->blk_lut[blkword] & blkbit;
  329. }
  330. static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
  331. {
  332. /* Handle a blocked transaction: raise IRQ, capture info, etc */
  333. if (!s->int_stat) {
  334. /* First blocked transfer: capture information into INT_INFO1 and
  335. * INT_INFO2. Subsequent transfers are still blocked but don't
  336. * capture information until the guest clears the interrupt.
  337. */
  338. s->int_info1 = addr;
  339. s->int_info2 = 0;
  340. s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
  341. attrs.requester_id & 0xffff);
  342. s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
  343. ~attrs.secure);
  344. s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
  345. tz_mpc_cfg_ns(s, addr));
  346. s->int_stat |= R_INT_STAT_IRQ_MASK;
  347. tz_mpc_irq_update(s);
  348. }
  349. /* Generate bus error if desired; otherwise RAZ/WI */
  350. return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
  351. }
  352. /* Accesses only reach these read and write functions if the MPC is
  353. * blocking them; non-blocked accesses go directly to the downstream
  354. * memory region without passing through this code.
  355. */
  356. static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
  357. uint64_t *pdata,
  358. unsigned size, MemTxAttrs attrs)
  359. {
  360. TZMPC *s = TZ_MPC(opaque);
  361. trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
  362. *pdata = 0;
  363. return tz_mpc_handle_block(s, addr, attrs);
  364. }
  365. static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
  366. uint64_t value,
  367. unsigned size, MemTxAttrs attrs)
  368. {
  369. TZMPC *s = TZ_MPC(opaque);
  370. trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
  371. return tz_mpc_handle_block(s, addr, attrs);
  372. }
  373. static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
  374. .read_with_attrs = tz_mpc_mem_blocked_read,
  375. .write_with_attrs = tz_mpc_mem_blocked_write,
  376. .endianness = DEVICE_LITTLE_ENDIAN,
  377. .valid.min_access_size = 1,
  378. .valid.max_access_size = 8,
  379. .impl.min_access_size = 1,
  380. .impl.max_access_size = 8,
  381. };
  382. static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
  383. hwaddr addr, IOMMUAccessFlags flags,
  384. int iommu_idx)
  385. {
  386. TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
  387. bool ok;
  388. IOMMUTLBEntry ret = {
  389. .iova = addr & ~(s->blocksize - 1),
  390. .translated_addr = addr & ~(s->blocksize - 1),
  391. .addr_mask = s->blocksize - 1,
  392. .perm = IOMMU_RW,
  393. };
  394. /* Look at the per-block configuration for this address, and
  395. * return a TLB entry directing the transaction at either
  396. * downstream_as or blocked_io_as, as appropriate.
  397. * If the LUT cfg_ns bit is 1, only non-secure transactions
  398. * may pass. If the bit is 0, only secure transactions may pass.
  399. */
  400. ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
  401. trace_tz_mpc_translate(addr, flags,
  402. iommu_idx == IOMMU_IDX_S ? "S" : "NS",
  403. ok ? "pass" : "block");
  404. ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
  405. return ret;
  406. }
  407. static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
  408. {
  409. /* We treat unspecified attributes like secure. Transactions with
  410. * unspecified attributes come from places like
  411. * rom_reset() for initial image load, and we want
  412. * those to pass through the from-reset "everything is secure" config.
  413. * All the real during-emulation transactions from the CPU will
  414. * specify attributes.
  415. */
  416. return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
  417. }
  418. static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
  419. {
  420. return IOMMU_NUM_INDEXES;
  421. }
  422. static void tz_mpc_reset(DeviceState *dev)
  423. {
  424. TZMPC *s = TZ_MPC(dev);
  425. s->ctrl = 0x00000100;
  426. s->blk_idx = 0;
  427. s->int_stat = 0;
  428. s->int_en = 1;
  429. s->int_info1 = 0;
  430. s->int_info2 = 0;
  431. memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
  432. }
  433. static void tz_mpc_init(Object *obj)
  434. {
  435. DeviceState *dev = DEVICE(obj);
  436. TZMPC *s = TZ_MPC(obj);
  437. qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
  438. }
  439. static void tz_mpc_realize(DeviceState *dev, Error **errp)
  440. {
  441. Object *obj = OBJECT(dev);
  442. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  443. TZMPC *s = TZ_MPC(dev);
  444. uint64_t size;
  445. /* We can't create the upstream end of the port until realize,
  446. * as we don't know the size of the MR used as the downstream until then.
  447. * We insist on having a downstream, to avoid complicating the code
  448. * with handling the "don't know how big this is" case. It's easy
  449. * enough for the user to create an unimplemented_device as downstream
  450. * if they have nothing else to plug into this.
  451. */
  452. if (!s->downstream) {
  453. error_setg(errp, "MPC 'downstream' link not set");
  454. return;
  455. }
  456. size = memory_region_size(s->downstream);
  457. memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
  458. TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
  459. obj, "tz-mpc-upstream", size);
  460. /* In real hardware the block size is configurable. In QEMU we could
  461. * make it configurable but will need it to be at least as big as the
  462. * target page size so we can execute out of the resulting MRs. Guest
  463. * software is supposed to check the block size using the BLK_CFG
  464. * register, so make it fixed at the page size.
  465. */
  466. s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
  467. if (size % s->blocksize != 0) {
  468. error_setg(errp,
  469. "MPC 'downstream' size %" PRId64
  470. " is not a multiple of %" HWADDR_PRIx " bytes",
  471. size, s->blocksize);
  472. object_unref(OBJECT(&s->upstream));
  473. return;
  474. }
  475. /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
  476. * words, each bit of which indicates one block.
  477. */
  478. s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
  479. memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
  480. s, "tz-mpc-regs", 0x1000);
  481. sysbus_init_mmio(sbd, &s->regmr);
  482. sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
  483. /* This memory region is not exposed to users of this device as a
  484. * sysbus MMIO region, but is instead used internally as something
  485. * that our IOMMU translate function might direct accesses to.
  486. */
  487. memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
  488. s, "tz-mpc-blocked-io", size);
  489. address_space_init(&s->downstream_as, s->downstream,
  490. "tz-mpc-downstream");
  491. address_space_init(&s->blocked_io_as, &s->blocked_io,
  492. "tz-mpc-blocked-io");
  493. s->blk_lut = g_new0(uint32_t, s->blk_max);
  494. }
  495. static int tz_mpc_post_load(void *opaque, int version_id)
  496. {
  497. TZMPC *s = TZ_MPC(opaque);
  498. /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
  499. if (s->blk_idx >= s->blk_max) {
  500. return -1;
  501. }
  502. return 0;
  503. }
  504. static const VMStateDescription tz_mpc_vmstate = {
  505. .name = "tz-mpc",
  506. .version_id = 1,
  507. .minimum_version_id = 1,
  508. .post_load = tz_mpc_post_load,
  509. .fields = (VMStateField[]) {
  510. VMSTATE_UINT32(ctrl, TZMPC),
  511. VMSTATE_UINT32(blk_idx, TZMPC),
  512. VMSTATE_UINT32(int_stat, TZMPC),
  513. VMSTATE_UINT32(int_en, TZMPC),
  514. VMSTATE_UINT32(int_info1, TZMPC),
  515. VMSTATE_UINT32(int_info2, TZMPC),
  516. VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
  517. 0, vmstate_info_uint32, uint32_t),
  518. VMSTATE_END_OF_LIST()
  519. }
  520. };
  521. static Property tz_mpc_properties[] = {
  522. DEFINE_PROP_LINK("downstream", TZMPC, downstream,
  523. TYPE_MEMORY_REGION, MemoryRegion *),
  524. DEFINE_PROP_END_OF_LIST(),
  525. };
  526. static void tz_mpc_class_init(ObjectClass *klass, void *data)
  527. {
  528. DeviceClass *dc = DEVICE_CLASS(klass);
  529. dc->realize = tz_mpc_realize;
  530. dc->vmsd = &tz_mpc_vmstate;
  531. dc->reset = tz_mpc_reset;
  532. device_class_set_props(dc, tz_mpc_properties);
  533. }
  534. static const TypeInfo tz_mpc_info = {
  535. .name = TYPE_TZ_MPC,
  536. .parent = TYPE_SYS_BUS_DEVICE,
  537. .instance_size = sizeof(TZMPC),
  538. .instance_init = tz_mpc_init,
  539. .class_init = tz_mpc_class_init,
  540. };
  541. static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
  542. void *data)
  543. {
  544. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
  545. imrc->translate = tz_mpc_translate;
  546. imrc->attrs_to_index = tz_mpc_attrs_to_index;
  547. imrc->num_indexes = tz_mpc_num_indexes;
  548. }
  549. static const TypeInfo tz_mpc_iommu_memory_region_info = {
  550. .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
  551. .parent = TYPE_IOMMU_MEMORY_REGION,
  552. .class_init = tz_mpc_iommu_memory_region_class_init,
  553. };
  554. static void tz_mpc_register_types(void)
  555. {
  556. type_register_static(&tz_mpc_info);
  557. type_register_static(&tz_mpc_iommu_memory_region_info);
  558. }
  559. type_init(tz_mpc_register_types);