aspeed_hace.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /*
  2. * ASPEED Hash and Crypto Engine
  3. *
  4. * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
  5. * Copyright (C) 2021 IBM Corp.
  6. *
  7. * Joel Stanley <joel@jms.id.au>
  8. *
  9. * SPDX-License-Identifier: GPL-2.0-or-later
  10. */
  11. #include "qemu/osdep.h"
  12. #include "qemu/log.h"
  13. #include "qemu/error-report.h"
  14. #include "hw/misc/aspeed_hace.h"
  15. #include "qapi/error.h"
  16. #include "migration/vmstate.h"
  17. #include "crypto/hash.h"
  18. #include "hw/qdev-properties.h"
  19. #include "hw/irq.h"
  20. #define R_CRYPT_CMD (0x10 / 4)
  21. #define R_STATUS (0x1c / 4)
  22. #define HASH_IRQ BIT(9)
  23. #define CRYPT_IRQ BIT(12)
  24. #define TAG_IRQ BIT(15)
  25. #define R_HASH_SRC (0x20 / 4)
  26. #define R_HASH_DEST (0x24 / 4)
  27. #define R_HASH_KEY_BUFF (0x28 / 4)
  28. #define R_HASH_SRC_LEN (0x2c / 4)
  29. #define R_HASH_CMD (0x30 / 4)
  30. /* Hash algorithm selection */
  31. #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6))
  32. #define HASH_ALGO_MD5 0
  33. #define HASH_ALGO_SHA1 BIT(5)
  34. #define HASH_ALGO_SHA224 BIT(6)
  35. #define HASH_ALGO_SHA256 (BIT(4) | BIT(6))
  36. #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6))
  37. /* SHA512 algorithm selection */
  38. #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12))
  39. #define HASH_ALGO_SHA512_SHA512 0
  40. #define HASH_ALGO_SHA512_SHA384 BIT(10)
  41. #define HASH_ALGO_SHA512_SHA256 BIT(11)
  42. #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11))
  43. /* HMAC modes */
  44. #define HASH_HMAC_MASK (BIT(7) | BIT(8))
  45. #define HASH_DIGEST 0
  46. #define HASH_DIGEST_HMAC BIT(7)
  47. #define HASH_DIGEST_ACCUM BIT(8)
  48. #define HASH_HMAC_KEY (BIT(7) | BIT(8))
  49. /* Cascaded operation modes */
  50. #define HASH_ONLY 0
  51. #define HASH_ONLY2 BIT(0)
  52. #define HASH_CRYPT_THEN_HASH BIT(1)
  53. #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1))
  54. /* Other cmd bits */
  55. #define HASH_IRQ_EN BIT(9)
  56. #define HASH_SG_EN BIT(18)
  57. #define CRYPT_IRQ_EN BIT(12)
  58. /* Scatter-gather data list */
  59. #define SG_LIST_LEN_SIZE 4
  60. #define SG_LIST_LEN_MASK 0x0FFFFFFF
  61. #define SG_LIST_LEN_LAST BIT(31)
  62. #define SG_LIST_ADDR_SIZE 4
  63. #define SG_LIST_ADDR_MASK 0x7FFFFFFF
  64. #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
  65. static const struct {
  66. uint32_t mask;
  67. QCryptoHashAlgo algo;
  68. } hash_algo_map[] = {
  69. { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 },
  70. { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 },
  71. { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 },
  72. { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
  73. { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512,
  74. QCRYPTO_HASH_ALGO_SHA512 },
  75. { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384,
  76. QCRYPTO_HASH_ALGO_SHA384 },
  77. { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256,
  78. QCRYPTO_HASH_ALGO_SHA256 },
  79. };
  80. static int hash_algo_lookup(uint32_t reg)
  81. {
  82. int i;
  83. reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
  84. for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
  85. if (reg == hash_algo_map[i].mask) {
  86. return hash_algo_map[i].algo;
  87. }
  88. }
  89. return -1;
  90. }
  91. /**
  92. * Check whether the request contains padding message.
  93. *
  94. * @param s aspeed hace state object
  95. * @param iov iov of current request
  96. * @param req_len length of the current request
  97. * @param total_msg_len length of all acc_mode requests(excluding padding msg)
  98. * @param pad_offset start offset of padding message
  99. */
  100. static bool has_padding(AspeedHACEState *s, struct iovec *iov,
  101. hwaddr req_len, uint32_t *total_msg_len,
  102. uint32_t *pad_offset)
  103. {
  104. *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
  105. /*
  106. * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
  107. * last request. The last request should contain padding message.
  108. * We check whether message contains padding by
  109. * 1. Get total message length. If the current message contains
  110. * padding, the last 8 bytes are total message length.
  111. * 2. Check whether the total message length is valid.
  112. * If it is valid, the value should less than or equal to
  113. * total_req_len.
  114. * 3. Current request len - padding_size to get padding offset.
  115. * The padding message's first byte should be 0x80
  116. */
  117. if (*total_msg_len <= s->total_req_len) {
  118. uint32_t padding_size = s->total_req_len - *total_msg_len;
  119. uint8_t *padding = iov->iov_base;
  120. if (padding_size > req_len) {
  121. return false;
  122. }
  123. *pad_offset = req_len - padding_size;
  124. if (padding[*pad_offset] == 0x80) {
  125. return true;
  126. }
  127. }
  128. return false;
  129. }
  130. static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
  131. uint32_t *pad_offset)
  132. {
  133. int i, iov_count;
  134. if (*pad_offset != 0) {
  135. s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
  136. s->iov_cache[s->iov_count].iov_len = *pad_offset;
  137. ++s->iov_count;
  138. }
  139. for (i = 0; i < s->iov_count; i++) {
  140. iov[i].iov_base = s->iov_cache[i].iov_base;
  141. iov[i].iov_len = s->iov_cache[i].iov_len;
  142. }
  143. iov_count = s->iov_count;
  144. s->iov_count = 0;
  145. s->total_req_len = 0;
  146. return iov_count;
  147. }
  148. static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
  149. bool acc_mode)
  150. {
  151. struct iovec iov[ASPEED_HACE_MAX_SG];
  152. uint32_t total_msg_len;
  153. uint32_t pad_offset;
  154. g_autofree uint8_t *digest_buf = NULL;
  155. size_t digest_len = 0;
  156. bool sg_acc_mode_final_request = false;
  157. int i;
  158. void *haddr;
  159. Error *local_err = NULL;
  160. if (acc_mode && s->hash_ctx == NULL) {
  161. s->hash_ctx = qcrypto_hash_new(algo, &local_err);
  162. if (s->hash_ctx == NULL) {
  163. qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash failed : %s",
  164. error_get_pretty(local_err));
  165. error_free(local_err);
  166. return;
  167. }
  168. }
  169. if (sg_mode) {
  170. uint32_t len = 0;
  171. for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
  172. uint32_t addr, src;
  173. hwaddr plen;
  174. if (i == ASPEED_HACE_MAX_SG) {
  175. qemu_log_mask(LOG_GUEST_ERROR,
  176. "aspeed_hace: guest failed to set end of sg list marker\n");
  177. break;
  178. }
  179. src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE);
  180. len = address_space_ldl_le(&s->dram_as, src,
  181. MEMTXATTRS_UNSPECIFIED, NULL);
  182. addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
  183. MEMTXATTRS_UNSPECIFIED, NULL);
  184. addr &= SG_LIST_ADDR_MASK;
  185. plen = len & SG_LIST_LEN_MASK;
  186. haddr = address_space_map(&s->dram_as, addr, &plen, false,
  187. MEMTXATTRS_UNSPECIFIED);
  188. if (haddr == NULL) {
  189. qemu_log_mask(LOG_GUEST_ERROR,
  190. "%s: qcrypto failed\n", __func__);
  191. return;
  192. }
  193. iov[i].iov_base = haddr;
  194. if (acc_mode) {
  195. s->total_req_len += plen;
  196. if (has_padding(s, &iov[i], plen, &total_msg_len,
  197. &pad_offset)) {
  198. /* Padding being present indicates the final request */
  199. sg_acc_mode_final_request = true;
  200. iov[i].iov_len = pad_offset;
  201. } else {
  202. iov[i].iov_len = plen;
  203. }
  204. } else {
  205. iov[i].iov_len = plen;
  206. }
  207. }
  208. } else {
  209. hwaddr len = s->regs[R_HASH_SRC_LEN];
  210. haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
  211. &len, false, MEMTXATTRS_UNSPECIFIED);
  212. if (haddr == NULL) {
  213. qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
  214. return;
  215. }
  216. iov[0].iov_base = haddr;
  217. iov[0].iov_len = len;
  218. i = 1;
  219. if (s->iov_count) {
  220. /*
  221. * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
  222. * Thus if we received a request with sg_mode disabled, it is
  223. * required to check whether cache is empty. If no, we should
  224. * combine cached iov and the current iov.
  225. */
  226. s->total_req_len += len;
  227. if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
  228. i = reconstruct_iov(s, iov, 0, &pad_offset);
  229. }
  230. }
  231. }
  232. if (acc_mode) {
  233. if (qcrypto_hash_updatev(s->hash_ctx, iov, i, &local_err) < 0) {
  234. qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash update failed : %s",
  235. error_get_pretty(local_err));
  236. error_free(local_err);
  237. return;
  238. }
  239. if (sg_acc_mode_final_request) {
  240. if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
  241. &digest_len, &local_err)) {
  242. qemu_log_mask(LOG_GUEST_ERROR,
  243. "qcrypto hash finalize failed : %s",
  244. error_get_pretty(local_err));
  245. error_free(local_err);
  246. local_err = NULL;
  247. }
  248. qcrypto_hash_free(s->hash_ctx);
  249. s->hash_ctx = NULL;
  250. s->iov_count = 0;
  251. s->total_req_len = 0;
  252. }
  253. } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
  254. &digest_len, &local_err) < 0) {
  255. qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash bytesv failed : %s",
  256. error_get_pretty(local_err));
  257. error_free(local_err);
  258. return;
  259. }
  260. if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST],
  261. MEMTXATTRS_UNSPECIFIED,
  262. digest_buf, digest_len)) {
  263. qemu_log_mask(LOG_GUEST_ERROR,
  264. "aspeed_hace: address space write failed\n");
  265. }
  266. for (; i > 0; i--) {
  267. address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
  268. iov[i - 1].iov_len, false,
  269. iov[i - 1].iov_len);
  270. }
  271. /*
  272. * Set status bits to indicate completion. Testing shows hardware sets
  273. * these irrespective of HASH_IRQ_EN.
  274. */
  275. s->regs[R_STATUS] |= HASH_IRQ;
  276. }
  277. static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
  278. {
  279. AspeedHACEState *s = ASPEED_HACE(opaque);
  280. addr >>= 2;
  281. if (addr >= ASPEED_HACE_NR_REGS) {
  282. qemu_log_mask(LOG_GUEST_ERROR,
  283. "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
  284. __func__, addr << 2);
  285. return 0;
  286. }
  287. return s->regs[addr];
  288. }
  289. static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
  290. unsigned int size)
  291. {
  292. AspeedHACEState *s = ASPEED_HACE(opaque);
  293. AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
  294. addr >>= 2;
  295. if (addr >= ASPEED_HACE_NR_REGS) {
  296. qemu_log_mask(LOG_GUEST_ERROR,
  297. "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
  298. __func__, addr << 2);
  299. return;
  300. }
  301. switch (addr) {
  302. case R_STATUS:
  303. if (data & HASH_IRQ) {
  304. data &= ~HASH_IRQ;
  305. if (s->regs[addr] & HASH_IRQ) {
  306. qemu_irq_lower(s->irq);
  307. }
  308. }
  309. if (ahc->raise_crypt_interrupt_workaround) {
  310. if (data & CRYPT_IRQ) {
  311. data &= ~CRYPT_IRQ;
  312. if (s->regs[addr] & CRYPT_IRQ) {
  313. qemu_irq_lower(s->irq);
  314. }
  315. }
  316. }
  317. break;
  318. case R_HASH_SRC:
  319. data &= ahc->src_mask;
  320. break;
  321. case R_HASH_DEST:
  322. data &= ahc->dest_mask;
  323. break;
  324. case R_HASH_KEY_BUFF:
  325. data &= ahc->key_mask;
  326. break;
  327. case R_HASH_SRC_LEN:
  328. data &= 0x0FFFFFFF;
  329. break;
  330. case R_HASH_CMD: {
  331. int algo;
  332. data &= ahc->hash_mask;
  333. if ((data & HASH_DIGEST_HMAC)) {
  334. qemu_log_mask(LOG_UNIMP,
  335. "%s: HMAC mode not implemented\n",
  336. __func__);
  337. }
  338. if (data & BIT(1)) {
  339. qemu_log_mask(LOG_UNIMP,
  340. "%s: Cascaded mode not implemented\n",
  341. __func__);
  342. }
  343. algo = hash_algo_lookup(data);
  344. if (algo < 0) {
  345. qemu_log_mask(LOG_GUEST_ERROR,
  346. "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
  347. __func__, data & ahc->hash_mask);
  348. break;
  349. }
  350. do_hash_operation(s, algo, data & HASH_SG_EN,
  351. ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
  352. if (data & HASH_IRQ_EN) {
  353. qemu_irq_raise(s->irq);
  354. }
  355. break;
  356. }
  357. case R_CRYPT_CMD:
  358. qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
  359. __func__);
  360. if (ahc->raise_crypt_interrupt_workaround) {
  361. s->regs[R_STATUS] |= CRYPT_IRQ;
  362. if (data & CRYPT_IRQ_EN) {
  363. qemu_irq_raise(s->irq);
  364. }
  365. }
  366. break;
  367. default:
  368. break;
  369. }
  370. s->regs[addr] = data;
  371. }
  372. static const MemoryRegionOps aspeed_hace_ops = {
  373. .read = aspeed_hace_read,
  374. .write = aspeed_hace_write,
  375. .endianness = DEVICE_LITTLE_ENDIAN,
  376. .valid = {
  377. .min_access_size = 1,
  378. .max_access_size = 4,
  379. },
  380. };
  381. static void aspeed_hace_reset(DeviceState *dev)
  382. {
  383. struct AspeedHACEState *s = ASPEED_HACE(dev);
  384. if (s->hash_ctx != NULL) {
  385. qcrypto_hash_free(s->hash_ctx);
  386. s->hash_ctx = NULL;
  387. }
  388. memset(s->regs, 0, sizeof(s->regs));
  389. s->iov_count = 0;
  390. s->total_req_len = 0;
  391. }
  392. static void aspeed_hace_realize(DeviceState *dev, Error **errp)
  393. {
  394. AspeedHACEState *s = ASPEED_HACE(dev);
  395. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  396. sysbus_init_irq(sbd, &s->irq);
  397. memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
  398. TYPE_ASPEED_HACE, 0x1000);
  399. if (!s->dram_mr) {
  400. error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
  401. return;
  402. }
  403. address_space_init(&s->dram_as, s->dram_mr, "dram");
  404. sysbus_init_mmio(sbd, &s->iomem);
  405. }
  406. static const Property aspeed_hace_properties[] = {
  407. DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
  408. TYPE_MEMORY_REGION, MemoryRegion *),
  409. };
  410. static const VMStateDescription vmstate_aspeed_hace = {
  411. .name = TYPE_ASPEED_HACE,
  412. .version_id = 1,
  413. .minimum_version_id = 1,
  414. .fields = (const VMStateField[]) {
  415. VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
  416. VMSTATE_UINT32(total_req_len, AspeedHACEState),
  417. VMSTATE_UINT32(iov_count, AspeedHACEState),
  418. VMSTATE_END_OF_LIST(),
  419. }
  420. };
  421. static void aspeed_hace_class_init(ObjectClass *klass, void *data)
  422. {
  423. DeviceClass *dc = DEVICE_CLASS(klass);
  424. dc->realize = aspeed_hace_realize;
  425. device_class_set_legacy_reset(dc, aspeed_hace_reset);
  426. device_class_set_props(dc, aspeed_hace_properties);
  427. dc->vmsd = &vmstate_aspeed_hace;
  428. }
  429. static const TypeInfo aspeed_hace_info = {
  430. .name = TYPE_ASPEED_HACE,
  431. .parent = TYPE_SYS_BUS_DEVICE,
  432. .instance_size = sizeof(AspeedHACEState),
  433. .class_init = aspeed_hace_class_init,
  434. .class_size = sizeof(AspeedHACEClass)
  435. };
  436. static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
  437. {
  438. DeviceClass *dc = DEVICE_CLASS(klass);
  439. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  440. dc->desc = "AST2400 Hash and Crypto Engine";
  441. ahc->src_mask = 0x0FFFFFFF;
  442. ahc->dest_mask = 0x0FFFFFF8;
  443. ahc->key_mask = 0x0FFFFFC0;
  444. ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
  445. }
  446. static const TypeInfo aspeed_ast2400_hace_info = {
  447. .name = TYPE_ASPEED_AST2400_HACE,
  448. .parent = TYPE_ASPEED_HACE,
  449. .class_init = aspeed_ast2400_hace_class_init,
  450. };
  451. static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
  452. {
  453. DeviceClass *dc = DEVICE_CLASS(klass);
  454. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  455. dc->desc = "AST2500 Hash and Crypto Engine";
  456. ahc->src_mask = 0x3fffffff;
  457. ahc->dest_mask = 0x3ffffff8;
  458. ahc->key_mask = 0x3FFFFFC0;
  459. ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
  460. }
  461. static const TypeInfo aspeed_ast2500_hace_info = {
  462. .name = TYPE_ASPEED_AST2500_HACE,
  463. .parent = TYPE_ASPEED_HACE,
  464. .class_init = aspeed_ast2500_hace_class_init,
  465. };
  466. static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
  467. {
  468. DeviceClass *dc = DEVICE_CLASS(klass);
  469. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  470. dc->desc = "AST2600 Hash and Crypto Engine";
  471. ahc->src_mask = 0x7FFFFFFF;
  472. ahc->dest_mask = 0x7FFFFFF8;
  473. ahc->key_mask = 0x7FFFFFF8;
  474. ahc->hash_mask = 0x00147FFF;
  475. }
  476. static const TypeInfo aspeed_ast2600_hace_info = {
  477. .name = TYPE_ASPEED_AST2600_HACE,
  478. .parent = TYPE_ASPEED_HACE,
  479. .class_init = aspeed_ast2600_hace_class_init,
  480. };
  481. static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data)
  482. {
  483. DeviceClass *dc = DEVICE_CLASS(klass);
  484. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  485. dc->desc = "AST1030 Hash and Crypto Engine";
  486. ahc->src_mask = 0x7FFFFFFF;
  487. ahc->dest_mask = 0x7FFFFFF8;
  488. ahc->key_mask = 0x7FFFFFF8;
  489. ahc->hash_mask = 0x00147FFF;
  490. }
  491. static const TypeInfo aspeed_ast1030_hace_info = {
  492. .name = TYPE_ASPEED_AST1030_HACE,
  493. .parent = TYPE_ASPEED_HACE,
  494. .class_init = aspeed_ast1030_hace_class_init,
  495. };
  496. static void aspeed_ast2700_hace_class_init(ObjectClass *klass, void *data)
  497. {
  498. DeviceClass *dc = DEVICE_CLASS(klass);
  499. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  500. dc->desc = "AST2700 Hash and Crypto Engine";
  501. ahc->src_mask = 0x7FFFFFFF;
  502. ahc->dest_mask = 0x7FFFFFF8;
  503. ahc->key_mask = 0x7FFFFFF8;
  504. ahc->hash_mask = 0x00147FFF;
  505. /*
  506. * Currently, it does not support the CRYPT command. Instead, it only
  507. * sends an interrupt to notify the firmware that the crypt command
  508. * has completed. It is a temporary workaround.
  509. */
  510. ahc->raise_crypt_interrupt_workaround = true;
  511. }
  512. static const TypeInfo aspeed_ast2700_hace_info = {
  513. .name = TYPE_ASPEED_AST2700_HACE,
  514. .parent = TYPE_ASPEED_HACE,
  515. .class_init = aspeed_ast2700_hace_class_init,
  516. };
  517. static void aspeed_hace_register_types(void)
  518. {
  519. type_register_static(&aspeed_ast2400_hace_info);
  520. type_register_static(&aspeed_ast2500_hace_info);
  521. type_register_static(&aspeed_ast2600_hace_info);
  522. type_register_static(&aspeed_ast1030_hace_info);
  523. type_register_static(&aspeed_ast2700_hace_info);
  524. type_register_static(&aspeed_hace_info);
  525. }
  526. type_init(aspeed_hace_register_types);