xlnx-versal-efuse-ctrl.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801
  1. /*
  2. * QEMU model of the Versal eFuse controller
  3. *
  4. * Copyright (c) 2020 Xilinx Inc.
  5. * Copyright (c) 2023 Advanced Micro Devices, Inc.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include "hw/nvram/xlnx-versal-efuse.h"
  27. #include "qemu/log.h"
  28. #include "qapi/error.h"
  29. #include "migration/vmstate.h"
  30. #include "hw/qdev-properties.h"
  31. #ifndef XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG
  32. #define XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG 0
  33. #endif
  34. REG32(WR_LOCK, 0x0)
  35. FIELD(WR_LOCK, LOCK, 0, 16)
  36. REG32(CFG, 0x4)
  37. FIELD(CFG, SLVERR_ENABLE, 5, 1)
  38. FIELD(CFG, MARGIN_RD, 2, 1)
  39. FIELD(CFG, PGM_EN, 1, 1)
  40. REG32(STATUS, 0x8)
  41. FIELD(STATUS, AES_USER_KEY_1_CRC_PASS, 11, 1)
  42. FIELD(STATUS, AES_USER_KEY_1_CRC_DONE, 10, 1)
  43. FIELD(STATUS, AES_USER_KEY_0_CRC_PASS, 9, 1)
  44. FIELD(STATUS, AES_USER_KEY_0_CRC_DONE, 8, 1)
  45. FIELD(STATUS, AES_CRC_PASS, 7, 1)
  46. FIELD(STATUS, AES_CRC_DONE, 6, 1)
  47. FIELD(STATUS, CACHE_DONE, 5, 1)
  48. FIELD(STATUS, CACHE_LOAD, 4, 1)
  49. FIELD(STATUS, EFUSE_2_TBIT, 2, 1)
  50. FIELD(STATUS, EFUSE_1_TBIT, 1, 1)
  51. FIELD(STATUS, EFUSE_0_TBIT, 0, 1)
  52. REG32(EFUSE_PGM_ADDR, 0xc)
  53. FIELD(EFUSE_PGM_ADDR, PAGE, 13, 4)
  54. FIELD(EFUSE_PGM_ADDR, ROW, 5, 8)
  55. FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5)
  56. REG32(EFUSE_RD_ADDR, 0x10)
  57. FIELD(EFUSE_RD_ADDR, PAGE, 13, 4)
  58. FIELD(EFUSE_RD_ADDR, ROW, 5, 8)
  59. REG32(EFUSE_RD_DATA, 0x14)
  60. REG32(TPGM, 0x18)
  61. FIELD(TPGM, VALUE, 0, 16)
  62. REG32(TRD, 0x1c)
  63. FIELD(TRD, VALUE, 0, 8)
  64. REG32(TSU_H_PS, 0x20)
  65. FIELD(TSU_H_PS, VALUE, 0, 8)
  66. REG32(TSU_H_PS_CS, 0x24)
  67. FIELD(TSU_H_PS_CS, VALUE, 0, 8)
  68. REG32(TRDM, 0x28)
  69. FIELD(TRDM, VALUE, 0, 8)
  70. REG32(TSU_H_CS, 0x2c)
  71. FIELD(TSU_H_CS, VALUE, 0, 8)
  72. REG32(EFUSE_ISR, 0x30)
  73. FIELD(EFUSE_ISR, APB_SLVERR, 31, 1)
  74. FIELD(EFUSE_ISR, CACHE_PARITY_E2, 14, 1)
  75. FIELD(EFUSE_ISR, CACHE_PARITY_E1, 13, 1)
  76. FIELD(EFUSE_ISR, CACHE_PARITY_E0S, 12, 1)
  77. FIELD(EFUSE_ISR, CACHE_PARITY_E0R, 11, 1)
  78. FIELD(EFUSE_ISR, CACHE_APB_SLVERR, 10, 1)
  79. FIELD(EFUSE_ISR, CACHE_REQ_ERROR, 9, 1)
  80. FIELD(EFUSE_ISR, MAIN_REQ_ERROR, 8, 1)
  81. FIELD(EFUSE_ISR, READ_ON_CACHE_LD, 7, 1)
  82. FIELD(EFUSE_ISR, CACHE_FSM_ERROR, 6, 1)
  83. FIELD(EFUSE_ISR, MAIN_FSM_ERROR, 5, 1)
  84. FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1)
  85. FIELD(EFUSE_ISR, RD_ERROR, 3, 1)
  86. FIELD(EFUSE_ISR, RD_DONE, 2, 1)
  87. FIELD(EFUSE_ISR, PGM_ERROR, 1, 1)
  88. FIELD(EFUSE_ISR, PGM_DONE, 0, 1)
  89. REG32(EFUSE_IMR, 0x34)
  90. FIELD(EFUSE_IMR, APB_SLVERR, 31, 1)
  91. FIELD(EFUSE_IMR, CACHE_PARITY_E2, 14, 1)
  92. FIELD(EFUSE_IMR, CACHE_PARITY_E1, 13, 1)
  93. FIELD(EFUSE_IMR, CACHE_PARITY_E0S, 12, 1)
  94. FIELD(EFUSE_IMR, CACHE_PARITY_E0R, 11, 1)
  95. FIELD(EFUSE_IMR, CACHE_APB_SLVERR, 10, 1)
  96. FIELD(EFUSE_IMR, CACHE_REQ_ERROR, 9, 1)
  97. FIELD(EFUSE_IMR, MAIN_REQ_ERROR, 8, 1)
  98. FIELD(EFUSE_IMR, READ_ON_CACHE_LD, 7, 1)
  99. FIELD(EFUSE_IMR, CACHE_FSM_ERROR, 6, 1)
  100. FIELD(EFUSE_IMR, MAIN_FSM_ERROR, 5, 1)
  101. FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1)
  102. FIELD(EFUSE_IMR, RD_ERROR, 3, 1)
  103. FIELD(EFUSE_IMR, RD_DONE, 2, 1)
  104. FIELD(EFUSE_IMR, PGM_ERROR, 1, 1)
  105. FIELD(EFUSE_IMR, PGM_DONE, 0, 1)
  106. REG32(EFUSE_IER, 0x38)
  107. FIELD(EFUSE_IER, APB_SLVERR, 31, 1)
  108. FIELD(EFUSE_IER, CACHE_PARITY_E2, 14, 1)
  109. FIELD(EFUSE_IER, CACHE_PARITY_E1, 13, 1)
  110. FIELD(EFUSE_IER, CACHE_PARITY_E0S, 12, 1)
  111. FIELD(EFUSE_IER, CACHE_PARITY_E0R, 11, 1)
  112. FIELD(EFUSE_IER, CACHE_APB_SLVERR, 10, 1)
  113. FIELD(EFUSE_IER, CACHE_REQ_ERROR, 9, 1)
  114. FIELD(EFUSE_IER, MAIN_REQ_ERROR, 8, 1)
  115. FIELD(EFUSE_IER, READ_ON_CACHE_LD, 7, 1)
  116. FIELD(EFUSE_IER, CACHE_FSM_ERROR, 6, 1)
  117. FIELD(EFUSE_IER, MAIN_FSM_ERROR, 5, 1)
  118. FIELD(EFUSE_IER, CACHE_ERROR, 4, 1)
  119. FIELD(EFUSE_IER, RD_ERROR, 3, 1)
  120. FIELD(EFUSE_IER, RD_DONE, 2, 1)
  121. FIELD(EFUSE_IER, PGM_ERROR, 1, 1)
  122. FIELD(EFUSE_IER, PGM_DONE, 0, 1)
  123. REG32(EFUSE_IDR, 0x3c)
  124. FIELD(EFUSE_IDR, APB_SLVERR, 31, 1)
  125. FIELD(EFUSE_IDR, CACHE_PARITY_E2, 14, 1)
  126. FIELD(EFUSE_IDR, CACHE_PARITY_E1, 13, 1)
  127. FIELD(EFUSE_IDR, CACHE_PARITY_E0S, 12, 1)
  128. FIELD(EFUSE_IDR, CACHE_PARITY_E0R, 11, 1)
  129. FIELD(EFUSE_IDR, CACHE_APB_SLVERR, 10, 1)
  130. FIELD(EFUSE_IDR, CACHE_REQ_ERROR, 9, 1)
  131. FIELD(EFUSE_IDR, MAIN_REQ_ERROR, 8, 1)
  132. FIELD(EFUSE_IDR, READ_ON_CACHE_LD, 7, 1)
  133. FIELD(EFUSE_IDR, CACHE_FSM_ERROR, 6, 1)
  134. FIELD(EFUSE_IDR, MAIN_FSM_ERROR, 5, 1)
  135. FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1)
  136. FIELD(EFUSE_IDR, RD_ERROR, 3, 1)
  137. FIELD(EFUSE_IDR, RD_DONE, 2, 1)
  138. FIELD(EFUSE_IDR, PGM_ERROR, 1, 1)
  139. FIELD(EFUSE_IDR, PGM_DONE, 0, 1)
  140. REG32(EFUSE_CACHE_LOAD, 0x40)
  141. FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1)
  142. REG32(EFUSE_PGM_LOCK, 0x44)
  143. FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1)
  144. REG32(EFUSE_AES_CRC, 0x48)
  145. REG32(EFUSE_AES_USR_KEY0_CRC, 0x4c)
  146. REG32(EFUSE_AES_USR_KEY1_CRC, 0x50)
  147. REG32(EFUSE_PD, 0x54)
  148. REG32(EFUSE_ANLG_OSC_SW_1LP, 0x60)
  149. REG32(EFUSE_TEST_CTRL, 0x100)
  150. #define R_MAX (R_EFUSE_TEST_CTRL + 1)
  151. #define R_WR_LOCK_UNLOCK_PASSCODE (0xDF0D)
  152. /*
  153. * eFuse layout references:
  154. * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilnvm/src/xnvm_efuse_hw.h
  155. */
  156. #define BIT_POS_OF(A_) \
  157. ((uint32_t)((A_) & (R_EFUSE_PGM_ADDR_ROW_MASK | \
  158. R_EFUSE_PGM_ADDR_COLUMN_MASK)))
  159. #define BIT_POS(R_, C_) \
  160. ((uint32_t)((R_EFUSE_PGM_ADDR_ROW_MASK \
  161. & ((R_) << R_EFUSE_PGM_ADDR_ROW_SHIFT)) \
  162. | \
  163. (R_EFUSE_PGM_ADDR_COLUMN_MASK \
  164. & ((C_) << R_EFUSE_PGM_ADDR_COLUMN_SHIFT))))
  165. #define EFUSE_TBIT_POS(A_) (BIT_POS_OF(A_) >= BIT_POS(0, 28))
  166. #define EFUSE_ANCHOR_ROW (0)
  167. #define EFUSE_ANCHOR_3_COL (27)
  168. #define EFUSE_ANCHOR_1_COL (1)
  169. #define EFUSE_AES_KEY_START BIT_POS(12, 0)
  170. #define EFUSE_AES_KEY_END BIT_POS(19, 31)
  171. #define EFUSE_USER_KEY_0_START BIT_POS(20, 0)
  172. #define EFUSE_USER_KEY_0_END BIT_POS(27, 31)
  173. #define EFUSE_USER_KEY_1_START BIT_POS(28, 0)
  174. #define EFUSE_USER_KEY_1_END BIT_POS(35, 31)
  175. #define EFUSE_RD_BLOCKED_START EFUSE_AES_KEY_START
  176. #define EFUSE_RD_BLOCKED_END EFUSE_USER_KEY_1_END
  177. #define EFUSE_GLITCH_DET_WR_LK BIT_POS(4, 31)
  178. #define EFUSE_PPK0_WR_LK BIT_POS(43, 6)
  179. #define EFUSE_PPK1_WR_LK BIT_POS(43, 7)
  180. #define EFUSE_PPK2_WR_LK BIT_POS(43, 8)
  181. #define EFUSE_AES_WR_LK BIT_POS(43, 11)
  182. #define EFUSE_USER_KEY_0_WR_LK BIT_POS(43, 13)
  183. #define EFUSE_USER_KEY_1_WR_LK BIT_POS(43, 15)
  184. #define EFUSE_PUF_SYN_LK BIT_POS(43, 16)
  185. #define EFUSE_DNA_WR_LK BIT_POS(43, 27)
  186. #define EFUSE_BOOT_ENV_WR_LK BIT_POS(43, 28)
  187. #define EFUSE_PGM_LOCKED_START BIT_POS(44, 0)
  188. #define EFUSE_PGM_LOCKED_END BIT_POS(51, 31)
  189. #define EFUSE_PUF_PAGE (2)
  190. #define EFUSE_PUF_SYN_START BIT_POS(129, 0)
  191. #define EFUSE_PUF_SYN_END BIT_POS(255, 27)
  192. #define EFUSE_KEY_CRC_LK_ROW (43)
  193. #define EFUSE_AES_KEY_CRC_LK_MASK ((1U << 9) | (1U << 10))
  194. #define EFUSE_USER_KEY_0_CRC_LK_MASK (1U << 12)
  195. #define EFUSE_USER_KEY_1_CRC_LK_MASK (1U << 14)
  196. /*
  197. * A handy macro to return value of an array element,
  198. * or a specific default if given index is out of bound.
  199. */
  200. #define ARRAY_GET(A_, I_, D_) \
  201. ((unsigned int)(I_) < ARRAY_SIZE(A_) ? (A_)[I_] : (D_))
  202. QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxVersalEFuseCtrl *)0)->regs));
  203. typedef struct XlnxEFuseLkSpec {
  204. uint16_t row;
  205. uint16_t lk_bit;
  206. } XlnxEFuseLkSpec;
  207. static void efuse_imr_update_irq(XlnxVersalEFuseCtrl *s)
  208. {
  209. bool pending = s->regs[R_EFUSE_ISR] & ~s->regs[R_EFUSE_IMR];
  210. qemu_set_irq(s->irq_efuse_imr, pending);
  211. }
  212. static void efuse_isr_postw(RegisterInfo *reg, uint64_t val64)
  213. {
  214. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  215. efuse_imr_update_irq(s);
  216. }
  217. static uint64_t efuse_ier_prew(RegisterInfo *reg, uint64_t val64)
  218. {
  219. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  220. uint32_t val = val64;
  221. s->regs[R_EFUSE_IMR] &= ~val;
  222. efuse_imr_update_irq(s);
  223. return 0;
  224. }
  225. static uint64_t efuse_idr_prew(RegisterInfo *reg, uint64_t val64)
  226. {
  227. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  228. uint32_t val = val64;
  229. s->regs[R_EFUSE_IMR] |= val;
  230. efuse_imr_update_irq(s);
  231. return 0;
  232. }
  233. static void efuse_status_tbits_sync(XlnxVersalEFuseCtrl *s)
  234. {
  235. uint32_t check = xlnx_efuse_tbits_check(s->efuse);
  236. uint32_t val = s->regs[R_STATUS];
  237. val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0)));
  238. val = FIELD_DP32(val, STATUS, EFUSE_1_TBIT, !!(check & (1 << 1)));
  239. val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 2)));
  240. s->regs[R_STATUS] = val;
  241. }
  242. static void efuse_anchor_bits_check(XlnxVersalEFuseCtrl *s)
  243. {
  244. unsigned page;
  245. if (!s->efuse || !s->efuse->init_tbits) {
  246. return;
  247. }
  248. for (page = 0; page < s->efuse->efuse_nr; page++) {
  249. uint32_t row = 0, bit;
  250. row = FIELD_DP32(row, EFUSE_PGM_ADDR, PAGE, page);
  251. row = FIELD_DP32(row, EFUSE_PGM_ADDR, ROW, EFUSE_ANCHOR_ROW);
  252. bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_3_COL);
  253. if (!xlnx_efuse_get_bit(s->efuse, bit)) {
  254. xlnx_efuse_set_bit(s->efuse, bit);
  255. }
  256. bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_1_COL);
  257. if (!xlnx_efuse_get_bit(s->efuse, bit)) {
  258. xlnx_efuse_set_bit(s->efuse, bit);
  259. }
  260. }
  261. }
  262. static void efuse_key_crc_check(RegisterInfo *reg, uint32_t crc,
  263. uint32_t pass_mask, uint32_t done_mask,
  264. unsigned first, uint32_t lk_mask)
  265. {
  266. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  267. uint32_t r, lk_bits;
  268. /*
  269. * To start, assume both DONE and PASS, and clear PASS by xor
  270. * if CRC-check fails or CRC-check disabled by lock fuse.
  271. */
  272. r = s->regs[R_STATUS] | done_mask | pass_mask;
  273. lk_bits = xlnx_efuse_get_row(s->efuse, EFUSE_KEY_CRC_LK_ROW) & lk_mask;
  274. if (lk_bits == 0 && xlnx_efuse_k256_check(s->efuse, crc, first)) {
  275. pass_mask = 0;
  276. }
  277. s->regs[R_STATUS] = r ^ pass_mask;
  278. }
  279. static void efuse_data_sync(XlnxVersalEFuseCtrl *s)
  280. {
  281. efuse_status_tbits_sync(s);
  282. }
  283. static int efuse_lk_spec_cmp(const void *a, const void *b)
  284. {
  285. uint16_t r1 = ((const XlnxEFuseLkSpec *)a)->row;
  286. uint16_t r2 = ((const XlnxEFuseLkSpec *)b)->row;
  287. return (r1 > r2) - (r1 < r2);
  288. }
  289. static void efuse_lk_spec_sort(XlnxVersalEFuseCtrl *s)
  290. {
  291. XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec;
  292. const uint32_t n8 = s->extra_pg0_lock_n16 * 2;
  293. const uint32_t sz = sizeof(ary[0]);
  294. const uint32_t cnt = n8 / sz;
  295. if (ary && cnt) {
  296. qsort(ary, cnt, sz, efuse_lk_spec_cmp);
  297. }
  298. }
  299. static uint32_t efuse_lk_spec_find(XlnxVersalEFuseCtrl *s, uint32_t row)
  300. {
  301. const XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec;
  302. const uint32_t n8 = s->extra_pg0_lock_n16 * 2;
  303. const uint32_t sz = sizeof(ary[0]);
  304. const uint32_t cnt = n8 / sz;
  305. const XlnxEFuseLkSpec *item = NULL;
  306. if (ary && cnt) {
  307. XlnxEFuseLkSpec k = { .row = row, };
  308. item = bsearch(&k, ary, cnt, sz, efuse_lk_spec_cmp);
  309. }
  310. return item ? item->lk_bit : 0;
  311. }
  312. static uint32_t efuse_bit_locked(XlnxVersalEFuseCtrl *s, uint32_t bit)
  313. {
  314. /* Hard-coded locks */
  315. static const uint16_t pg0_hard_lock[] = {
  316. [4] = EFUSE_GLITCH_DET_WR_LK,
  317. [37] = EFUSE_BOOT_ENV_WR_LK,
  318. [8 ... 11] = EFUSE_DNA_WR_LK,
  319. [12 ... 19] = EFUSE_AES_WR_LK,
  320. [20 ... 27] = EFUSE_USER_KEY_0_WR_LK,
  321. [28 ... 35] = EFUSE_USER_KEY_1_WR_LK,
  322. [64 ... 71] = EFUSE_PPK0_WR_LK,
  323. [72 ... 79] = EFUSE_PPK1_WR_LK,
  324. [80 ... 87] = EFUSE_PPK2_WR_LK,
  325. };
  326. uint32_t row = FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW);
  327. uint32_t lk_bit = ARRAY_GET(pg0_hard_lock, row, 0);
  328. return lk_bit ? lk_bit : efuse_lk_spec_find(s, row);
  329. }
  330. static bool efuse_pgm_locked(XlnxVersalEFuseCtrl *s, unsigned int bit)
  331. {
  332. unsigned int lock = 1;
  333. /* Global lock */
  334. if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) {
  335. goto ret_lock;
  336. }
  337. /* Row lock */
  338. switch (FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE)) {
  339. case 0:
  340. if (ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK) &&
  341. bit >= EFUSE_PGM_LOCKED_START && bit <= EFUSE_PGM_LOCKED_END) {
  342. goto ret_lock;
  343. }
  344. lock = efuse_bit_locked(s, bit);
  345. break;
  346. case EFUSE_PUF_PAGE:
  347. if (bit < EFUSE_PUF_SYN_START || bit > EFUSE_PUF_SYN_END) {
  348. lock = 0;
  349. goto ret_lock;
  350. }
  351. lock = EFUSE_PUF_SYN_LK;
  352. break;
  353. default:
  354. lock = 0;
  355. goto ret_lock;
  356. }
  357. /* Row lock by an efuse bit */
  358. if (lock) {
  359. lock = xlnx_efuse_get_bit(s->efuse, lock);
  360. }
  361. ret_lock:
  362. return lock != 0;
  363. }
  364. static void efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64)
  365. {
  366. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  367. unsigned bit = val64;
  368. bool ok = false;
  369. /* Always zero out PGM_ADDR because it is write-only */
  370. s->regs[R_EFUSE_PGM_ADDR] = 0;
  371. /*
  372. * Indicate error if bit is write-protected (or read-only
  373. * as guarded by efuse_set_bit()).
  374. *
  375. * Keep it simple by not modeling program timing.
  376. *
  377. * Note: model must NEVER clear the PGM_ERROR bit; it is
  378. * up to guest to do so (or by reset).
  379. */
  380. if (efuse_pgm_locked(s, bit)) {
  381. g_autofree char *path = object_get_canonical_path(OBJECT(s));
  382. qemu_log_mask(LOG_GUEST_ERROR,
  383. "%s: Denied setting of efuse<%u, %u, %u>\n",
  384. path,
  385. FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE),
  386. FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW),
  387. FIELD_EX32(bit, EFUSE_PGM_ADDR, COLUMN));
  388. } else if (xlnx_efuse_set_bit(s->efuse, bit)) {
  389. ok = true;
  390. if (EFUSE_TBIT_POS(bit)) {
  391. efuse_status_tbits_sync(s);
  392. }
  393. }
  394. if (!ok) {
  395. ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1);
  396. }
  397. ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1);
  398. efuse_imr_update_irq(s);
  399. }
  400. static void efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64)
  401. {
  402. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  403. unsigned bit = val64;
  404. bool denied;
  405. /* Always zero out RD_ADDR because it is write-only */
  406. s->regs[R_EFUSE_RD_ADDR] = 0;
  407. /*
  408. * Indicate error if row is read-blocked.
  409. *
  410. * Note: model must NEVER clear the RD_ERROR bit; it is
  411. * up to guest to do so (or by reset).
  412. */
  413. s->regs[R_EFUSE_RD_DATA] = xlnx_versal_efuse_read_row(s->efuse,
  414. bit, &denied);
  415. if (denied) {
  416. g_autofree char *path = object_get_canonical_path(OBJECT(s));
  417. qemu_log_mask(LOG_GUEST_ERROR,
  418. "%s: Denied reading of efuse<%u, %u>\n",
  419. path,
  420. FIELD_EX32(bit, EFUSE_RD_ADDR, PAGE),
  421. FIELD_EX32(bit, EFUSE_RD_ADDR, ROW));
  422. ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1);
  423. }
  424. ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1);
  425. efuse_imr_update_irq(s);
  426. return;
  427. }
  428. static uint64_t efuse_cache_load_prew(RegisterInfo *reg, uint64_t val64)
  429. {
  430. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  431. if (val64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) {
  432. efuse_data_sync(s);
  433. ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1);
  434. efuse_imr_update_irq(s);
  435. }
  436. return 0;
  437. }
  438. static uint64_t efuse_pgm_lock_prew(RegisterInfo *reg, uint64_t val64)
  439. {
  440. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
  441. /* Ignore all other bits */
  442. val64 = FIELD_EX32(val64, EFUSE_PGM_LOCK, SPK_ID_LOCK);
  443. /* Once the bit is written 1, only reset will clear it to 0 */
  444. val64 |= ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK);
  445. return val64;
  446. }
  447. static void efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64)
  448. {
  449. efuse_key_crc_check(reg, val64,
  450. R_STATUS_AES_CRC_PASS_MASK,
  451. R_STATUS_AES_CRC_DONE_MASK,
  452. EFUSE_AES_KEY_START,
  453. EFUSE_AES_KEY_CRC_LK_MASK);
  454. }
  455. static void efuse_aes_u0_crc_postw(RegisterInfo *reg, uint64_t val64)
  456. {
  457. efuse_key_crc_check(reg, val64,
  458. R_STATUS_AES_USER_KEY_0_CRC_PASS_MASK,
  459. R_STATUS_AES_USER_KEY_0_CRC_DONE_MASK,
  460. EFUSE_USER_KEY_0_START,
  461. EFUSE_USER_KEY_0_CRC_LK_MASK);
  462. }
  463. static void efuse_aes_u1_crc_postw(RegisterInfo *reg, uint64_t val64)
  464. {
  465. efuse_key_crc_check(reg, val64,
  466. R_STATUS_AES_USER_KEY_1_CRC_PASS_MASK,
  467. R_STATUS_AES_USER_KEY_1_CRC_DONE_MASK,
  468. EFUSE_USER_KEY_1_START,
  469. EFUSE_USER_KEY_1_CRC_LK_MASK);
  470. }
  471. static uint64_t efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val)
  472. {
  473. return val != R_WR_LOCK_UNLOCK_PASSCODE;
  474. }
  475. static const RegisterAccessInfo efuse_ctrl_regs_info[] = {
  476. { .name = "WR_LOCK", .addr = A_WR_LOCK,
  477. .reset = 0x1,
  478. .pre_write = efuse_wr_lock_prew,
  479. },{ .name = "CFG", .addr = A_CFG,
  480. .rsvd = 0x9,
  481. },{ .name = "STATUS", .addr = A_STATUS,
  482. .rsvd = 0x8,
  483. .ro = 0xfff,
  484. },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR,
  485. .post_write = efuse_pgm_addr_postw,
  486. },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR,
  487. .rsvd = 0x1f,
  488. .post_write = efuse_rd_addr_postw,
  489. },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA,
  490. .ro = 0xffffffff,
  491. },{ .name = "TPGM", .addr = A_TPGM,
  492. },{ .name = "TRD", .addr = A_TRD,
  493. .reset = 0x19,
  494. },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS,
  495. .reset = 0xff,
  496. },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS,
  497. .reset = 0x11,
  498. },{ .name = "TRDM", .addr = A_TRDM,
  499. .reset = 0x3a,
  500. },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS,
  501. .reset = 0x16,
  502. },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR,
  503. .rsvd = 0x7fff8000,
  504. .w1c = 0x80007fff,
  505. .post_write = efuse_isr_postw,
  506. },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR,
  507. .reset = 0x80007fff,
  508. .rsvd = 0x7fff8000,
  509. .ro = 0xffffffff,
  510. },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER,
  511. .rsvd = 0x7fff8000,
  512. .pre_write = efuse_ier_prew,
  513. },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR,
  514. .rsvd = 0x7fff8000,
  515. .pre_write = efuse_idr_prew,
  516. },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD,
  517. .pre_write = efuse_cache_load_prew,
  518. },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK,
  519. .pre_write = efuse_pgm_lock_prew,
  520. },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC,
  521. .post_write = efuse_aes_crc_postw,
  522. },{ .name = "EFUSE_AES_USR_KEY0_CRC", .addr = A_EFUSE_AES_USR_KEY0_CRC,
  523. .post_write = efuse_aes_u0_crc_postw,
  524. },{ .name = "EFUSE_AES_USR_KEY1_CRC", .addr = A_EFUSE_AES_USR_KEY1_CRC,
  525. .post_write = efuse_aes_u1_crc_postw,
  526. },{ .name = "EFUSE_PD", .addr = A_EFUSE_PD,
  527. .ro = 0xfffffffe,
  528. },{ .name = "EFUSE_ANLG_OSC_SW_1LP", .addr = A_EFUSE_ANLG_OSC_SW_1LP,
  529. },{ .name = "EFUSE_TEST_CTRL", .addr = A_EFUSE_TEST_CTRL,
  530. .reset = 0x8,
  531. }
  532. };
  533. static void efuse_ctrl_reg_write(void *opaque, hwaddr addr,
  534. uint64_t data, unsigned size)
  535. {
  536. RegisterInfoArray *reg_array = opaque;
  537. XlnxVersalEFuseCtrl *s;
  538. Object *dev;
  539. assert(reg_array != NULL);
  540. dev = reg_array->mem.owner;
  541. assert(dev);
  542. s = XLNX_VERSAL_EFUSE_CTRL(dev);
  543. if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) {
  544. g_autofree char *path = object_get_canonical_path(OBJECT(s));
  545. qemu_log_mask(LOG_GUEST_ERROR,
  546. "%s[reg_0x%02lx]: Attempt to write locked register.\n",
  547. path, (long)addr);
  548. } else {
  549. register_write_memory(opaque, addr, data, size);
  550. }
  551. }
  552. static void efuse_ctrl_register_reset(RegisterInfo *reg)
  553. {
  554. if (!reg->data || !reg->access) {
  555. return;
  556. }
  557. /* Reset must not trigger some registers' writers */
  558. switch (reg->access->addr) {
  559. case A_EFUSE_AES_CRC:
  560. case A_EFUSE_AES_USR_KEY0_CRC:
  561. case A_EFUSE_AES_USR_KEY1_CRC:
  562. *(uint32_t *)reg->data = reg->access->reset;
  563. return;
  564. }
  565. register_reset(reg);
  566. }
  567. static void efuse_ctrl_reset_hold(Object *obj, ResetType type)
  568. {
  569. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
  570. unsigned int i;
  571. for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
  572. efuse_ctrl_register_reset(&s->regs_info[i]);
  573. }
  574. efuse_anchor_bits_check(s);
  575. efuse_data_sync(s);
  576. efuse_imr_update_irq(s);
  577. }
  578. static const MemoryRegionOps efuse_ctrl_ops = {
  579. .read = register_read_memory,
  580. .write = efuse_ctrl_reg_write,
  581. .endianness = DEVICE_LITTLE_ENDIAN,
  582. .valid = {
  583. .min_access_size = 4,
  584. .max_access_size = 4,
  585. },
  586. };
  587. static void efuse_ctrl_realize(DeviceState *dev, Error **errp)
  588. {
  589. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev);
  590. const uint32_t lks_sz = sizeof(XlnxEFuseLkSpec) / 2;
  591. if (!s->efuse) {
  592. g_autofree char *path = object_get_canonical_path(OBJECT(s));
  593. error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE",
  594. path);
  595. return;
  596. }
  597. /* Sort property-defined pgm-locks for bsearch lookup */
  598. if ((s->extra_pg0_lock_n16 % lks_sz) != 0) {
  599. g_autofree char *path = object_get_canonical_path(OBJECT(s));
  600. error_setg(errp,
  601. "%s.pg0-lock: array property item-count not multiple of %u",
  602. path, lks_sz);
  603. return;
  604. }
  605. efuse_lk_spec_sort(s);
  606. }
  607. static void efuse_ctrl_init(Object *obj)
  608. {
  609. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
  610. SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
  611. s->reg_array =
  612. register_init_block32(DEVICE(obj), efuse_ctrl_regs_info,
  613. ARRAY_SIZE(efuse_ctrl_regs_info),
  614. s->regs_info, s->regs,
  615. &efuse_ctrl_ops,
  616. XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG,
  617. R_MAX * 4);
  618. sysbus_init_mmio(sbd, &s->reg_array->mem);
  619. sysbus_init_irq(sbd, &s->irq_efuse_imr);
  620. }
  621. static void efuse_ctrl_finalize(Object *obj)
  622. {
  623. XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
  624. register_finalize_block(s->reg_array);
  625. g_free(s->extra_pg0_lock_spec);
  626. }
  627. static const VMStateDescription vmstate_efuse_ctrl = {
  628. .name = TYPE_XLNX_VERSAL_EFUSE_CTRL,
  629. .version_id = 1,
  630. .minimum_version_id = 1,
  631. .fields = (const VMStateField[]) {
  632. VMSTATE_UINT32_ARRAY(regs, XlnxVersalEFuseCtrl, R_MAX),
  633. VMSTATE_END_OF_LIST(),
  634. }
  635. };
  636. static const Property efuse_ctrl_props[] = {
  637. DEFINE_PROP_LINK("efuse",
  638. XlnxVersalEFuseCtrl, efuse,
  639. TYPE_XLNX_EFUSE, XlnxEFuse *),
  640. DEFINE_PROP_ARRAY("pg0-lock",
  641. XlnxVersalEFuseCtrl, extra_pg0_lock_n16,
  642. extra_pg0_lock_spec, qdev_prop_uint16, uint16_t),
  643. };
  644. static void efuse_ctrl_class_init(ObjectClass *klass, void *data)
  645. {
  646. DeviceClass *dc = DEVICE_CLASS(klass);
  647. ResettableClass *rc = RESETTABLE_CLASS(klass);
  648. rc->phases.hold = efuse_ctrl_reset_hold;
  649. dc->realize = efuse_ctrl_realize;
  650. dc->vmsd = &vmstate_efuse_ctrl;
  651. device_class_set_props(dc, efuse_ctrl_props);
  652. }
  653. static const TypeInfo efuse_ctrl_info = {
  654. .name = TYPE_XLNX_VERSAL_EFUSE_CTRL,
  655. .parent = TYPE_SYS_BUS_DEVICE,
  656. .instance_size = sizeof(XlnxVersalEFuseCtrl),
  657. .class_init = efuse_ctrl_class_init,
  658. .instance_init = efuse_ctrl_init,
  659. .instance_finalize = efuse_ctrl_finalize,
  660. };
  661. static void efuse_ctrl_register_types(void)
  662. {
  663. type_register_static(&efuse_ctrl_info);
  664. }
  665. type_init(efuse_ctrl_register_types)
  666. /*
  667. * Retrieve a row, with unreadable bits returned as 0.
  668. */
  669. uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *efuse,
  670. uint32_t bit, bool *denied)
  671. {
  672. bool dummy;
  673. if (!denied) {
  674. denied = &dummy;
  675. }
  676. if (bit >= EFUSE_RD_BLOCKED_START && bit <= EFUSE_RD_BLOCKED_END) {
  677. *denied = true;
  678. return 0;
  679. }
  680. *denied = false;
  681. return xlnx_efuse_get_row(efuse, bit);
  682. }