aspeed_hace.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. * ASPEED Hash and Crypto Engine
  3. *
  4. * Copyright (C) 2021 IBM Corp.
  5. *
  6. * Joel Stanley <joel@jms.id.au>
  7. *
  8. * SPDX-License-Identifier: GPL-2.0-or-later
  9. */
  10. #include "qemu/osdep.h"
  11. #include "qemu/log.h"
  12. #include "qemu/error-report.h"
  13. #include "hw/misc/aspeed_hace.h"
  14. #include "qapi/error.h"
  15. #include "migration/vmstate.h"
  16. #include "crypto/hash.h"
  17. #include "hw/qdev-properties.h"
  18. #include "hw/irq.h"
  19. #define R_CRYPT_CMD (0x10 / 4)
  20. #define R_STATUS (0x1c / 4)
  21. #define HASH_IRQ BIT(9)
  22. #define CRYPT_IRQ BIT(12)
  23. #define TAG_IRQ BIT(15)
  24. #define R_HASH_SRC (0x20 / 4)
  25. #define R_HASH_DEST (0x24 / 4)
  26. #define R_HASH_KEY_BUFF (0x28 / 4)
  27. #define R_HASH_SRC_LEN (0x2c / 4)
  28. #define R_HASH_CMD (0x30 / 4)
  29. /* Hash algorithm selection */
  30. #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6))
  31. #define HASH_ALGO_MD5 0
  32. #define HASH_ALGO_SHA1 BIT(5)
  33. #define HASH_ALGO_SHA224 BIT(6)
  34. #define HASH_ALGO_SHA256 (BIT(4) | BIT(6))
  35. #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6))
  36. /* SHA512 algorithm selection */
  37. #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12))
  38. #define HASH_ALGO_SHA512_SHA512 0
  39. #define HASH_ALGO_SHA512_SHA384 BIT(10)
  40. #define HASH_ALGO_SHA512_SHA256 BIT(11)
  41. #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11))
  42. /* HMAC modes */
  43. #define HASH_HMAC_MASK (BIT(7) | BIT(8))
  44. #define HASH_DIGEST 0
  45. #define HASH_DIGEST_HMAC BIT(7)
  46. #define HASH_DIGEST_ACCUM BIT(8)
  47. #define HASH_HMAC_KEY (BIT(7) | BIT(8))
  48. /* Cascaded operation modes */
  49. #define HASH_ONLY 0
  50. #define HASH_ONLY2 BIT(0)
  51. #define HASH_CRYPT_THEN_HASH BIT(1)
  52. #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1))
  53. /* Other cmd bits */
  54. #define HASH_IRQ_EN BIT(9)
  55. #define HASH_SG_EN BIT(18)
  56. /* Scatter-gather data list */
  57. #define SG_LIST_LEN_SIZE 4
  58. #define SG_LIST_LEN_MASK 0x0FFFFFFF
  59. #define SG_LIST_LEN_LAST BIT(31)
  60. #define SG_LIST_ADDR_SIZE 4
  61. #define SG_LIST_ADDR_MASK 0x7FFFFFFF
  62. #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
  63. static const struct {
  64. uint32_t mask;
  65. QCryptoHashAlgorithm algo;
  66. } hash_algo_map[] = {
  67. { HASH_ALGO_MD5, QCRYPTO_HASH_ALG_MD5 },
  68. { HASH_ALGO_SHA1, QCRYPTO_HASH_ALG_SHA1 },
  69. { HASH_ALGO_SHA224, QCRYPTO_HASH_ALG_SHA224 },
  70. { HASH_ALGO_SHA256, QCRYPTO_HASH_ALG_SHA256 },
  71. { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALG_SHA512 },
  72. { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALG_SHA384 },
  73. { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALG_SHA256 },
  74. };
  75. static int hash_algo_lookup(uint32_t reg)
  76. {
  77. int i;
  78. reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
  79. for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
  80. if (reg == hash_algo_map[i].mask) {
  81. return hash_algo_map[i].algo;
  82. }
  83. }
  84. return -1;
  85. }
  86. /**
  87. * Check whether the request contains padding message.
  88. *
  89. * @param s aspeed hace state object
  90. * @param iov iov of current request
  91. * @param req_len length of the current request
  92. * @param total_msg_len length of all acc_mode requests(excluding padding msg)
  93. * @param pad_offset start offset of padding message
  94. */
  95. static bool has_padding(AspeedHACEState *s, struct iovec *iov,
  96. hwaddr req_len, uint32_t *total_msg_len,
  97. uint32_t *pad_offset)
  98. {
  99. *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
  100. /*
  101. * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
  102. * last request. The last request should contain padding message.
  103. * We check whether message contains padding by
  104. * 1. Get total message length. If the current message contains
  105. * padding, the last 8 bytes are total message length.
  106. * 2. Check whether the total message length is valid.
  107. * If it is valid, the value should less than or equal to
  108. * total_req_len.
  109. * 3. Current request len - padding_size to get padding offset.
  110. * The padding message's first byte should be 0x80
  111. */
  112. if (*total_msg_len <= s->total_req_len) {
  113. uint32_t padding_size = s->total_req_len - *total_msg_len;
  114. uint8_t *padding = iov->iov_base;
  115. *pad_offset = req_len - padding_size;
  116. if (padding[*pad_offset] == 0x80) {
  117. return true;
  118. }
  119. }
  120. return false;
  121. }
  122. static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
  123. uint32_t *pad_offset)
  124. {
  125. int i, iov_count;
  126. if (*pad_offset != 0) {
  127. s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
  128. s->iov_cache[s->iov_count].iov_len = *pad_offset;
  129. ++s->iov_count;
  130. }
  131. for (i = 0; i < s->iov_count; i++) {
  132. iov[i].iov_base = s->iov_cache[i].iov_base;
  133. iov[i].iov_len = s->iov_cache[i].iov_len;
  134. }
  135. iov_count = s->iov_count;
  136. s->iov_count = 0;
  137. s->total_req_len = 0;
  138. return iov_count;
  139. }
  140. /**
  141. * Generate iov for accumulative mode.
  142. *
  143. * @param s aspeed hace state object
  144. * @param iov iov of the current request
  145. * @param id index of the current iov
  146. * @param req_len length of the current request
  147. *
  148. * @return count of iov
  149. */
  150. static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
  151. hwaddr *req_len)
  152. {
  153. uint32_t pad_offset;
  154. uint32_t total_msg_len;
  155. s->total_req_len += *req_len;
  156. if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
  157. if (s->iov_count) {
  158. return reconstruct_iov(s, iov, id, &pad_offset);
  159. }
  160. *req_len -= s->total_req_len - total_msg_len;
  161. s->total_req_len = 0;
  162. iov[id].iov_len = *req_len;
  163. } else {
  164. s->iov_cache[s->iov_count].iov_base = iov->iov_base;
  165. s->iov_cache[s->iov_count].iov_len = *req_len;
  166. ++s->iov_count;
  167. }
  168. return id + 1;
  169. }
  170. static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
  171. bool acc_mode)
  172. {
  173. struct iovec iov[ASPEED_HACE_MAX_SG];
  174. g_autofree uint8_t *digest_buf;
  175. size_t digest_len = 0;
  176. int niov = 0;
  177. int i;
  178. void *haddr;
  179. if (sg_mode) {
  180. uint32_t len = 0;
  181. for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
  182. uint32_t addr, src;
  183. hwaddr plen;
  184. if (i == ASPEED_HACE_MAX_SG) {
  185. qemu_log_mask(LOG_GUEST_ERROR,
  186. "aspeed_hace: guest failed to set end of sg list marker\n");
  187. break;
  188. }
  189. src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE);
  190. len = address_space_ldl_le(&s->dram_as, src,
  191. MEMTXATTRS_UNSPECIFIED, NULL);
  192. addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
  193. MEMTXATTRS_UNSPECIFIED, NULL);
  194. addr &= SG_LIST_ADDR_MASK;
  195. plen = len & SG_LIST_LEN_MASK;
  196. haddr = address_space_map(&s->dram_as, addr, &plen, false,
  197. MEMTXATTRS_UNSPECIFIED);
  198. if (haddr == NULL) {
  199. qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
  200. return;
  201. }
  202. iov[i].iov_base = haddr;
  203. if (acc_mode) {
  204. niov = gen_acc_mode_iov(s, iov, i, &plen);
  205. } else {
  206. iov[i].iov_len = plen;
  207. }
  208. }
  209. } else {
  210. hwaddr len = s->regs[R_HASH_SRC_LEN];
  211. haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
  212. &len, false, MEMTXATTRS_UNSPECIFIED);
  213. if (haddr == NULL) {
  214. qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
  215. return;
  216. }
  217. iov[0].iov_base = haddr;
  218. iov[0].iov_len = len;
  219. i = 1;
  220. if (s->iov_count) {
  221. /*
  222. * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
  223. * Thus if we received a request with sg_mode disabled, it is
  224. * required to check whether cache is empty. If no, we should
  225. * combine cached iov and the current iov.
  226. */
  227. uint32_t total_msg_len;
  228. uint32_t pad_offset;
  229. s->total_req_len += len;
  230. if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
  231. niov = reconstruct_iov(s, iov, 0, &pad_offset);
  232. }
  233. }
  234. }
  235. if (niov) {
  236. i = niov;
  237. }
  238. if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
  239. qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
  240. return;
  241. }
  242. if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST],
  243. MEMTXATTRS_UNSPECIFIED,
  244. digest_buf, digest_len)) {
  245. qemu_log_mask(LOG_GUEST_ERROR,
  246. "aspeed_hace: address space write failed\n");
  247. }
  248. for (; i > 0; i--) {
  249. address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
  250. iov[i - 1].iov_len, false,
  251. iov[i - 1].iov_len);
  252. }
  253. /*
  254. * Set status bits to indicate completion. Testing shows hardware sets
  255. * these irrespective of HASH_IRQ_EN.
  256. */
  257. s->regs[R_STATUS] |= HASH_IRQ;
  258. }
  259. static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
  260. {
  261. AspeedHACEState *s = ASPEED_HACE(opaque);
  262. addr >>= 2;
  263. if (addr >= ASPEED_HACE_NR_REGS) {
  264. qemu_log_mask(LOG_GUEST_ERROR,
  265. "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
  266. __func__, addr << 2);
  267. return 0;
  268. }
  269. return s->regs[addr];
  270. }
  271. static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
  272. unsigned int size)
  273. {
  274. AspeedHACEState *s = ASPEED_HACE(opaque);
  275. AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
  276. addr >>= 2;
  277. if (addr >= ASPEED_HACE_NR_REGS) {
  278. qemu_log_mask(LOG_GUEST_ERROR,
  279. "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
  280. __func__, addr << 2);
  281. return;
  282. }
  283. switch (addr) {
  284. case R_STATUS:
  285. if (data & HASH_IRQ) {
  286. data &= ~HASH_IRQ;
  287. if (s->regs[addr] & HASH_IRQ) {
  288. qemu_irq_lower(s->irq);
  289. }
  290. }
  291. break;
  292. case R_HASH_SRC:
  293. data &= ahc->src_mask;
  294. break;
  295. case R_HASH_DEST:
  296. data &= ahc->dest_mask;
  297. break;
  298. case R_HASH_KEY_BUFF:
  299. data &= ahc->key_mask;
  300. break;
  301. case R_HASH_SRC_LEN:
  302. data &= 0x0FFFFFFF;
  303. break;
  304. case R_HASH_CMD: {
  305. int algo;
  306. data &= ahc->hash_mask;
  307. if ((data & HASH_DIGEST_HMAC)) {
  308. qemu_log_mask(LOG_UNIMP,
  309. "%s: HMAC mode not implemented\n",
  310. __func__);
  311. }
  312. if (data & BIT(1)) {
  313. qemu_log_mask(LOG_UNIMP,
  314. "%s: Cascaded mode not implemented\n",
  315. __func__);
  316. }
  317. algo = hash_algo_lookup(data);
  318. if (algo < 0) {
  319. qemu_log_mask(LOG_GUEST_ERROR,
  320. "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
  321. __func__, data & ahc->hash_mask);
  322. break;
  323. }
  324. do_hash_operation(s, algo, data & HASH_SG_EN,
  325. ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
  326. if (data & HASH_IRQ_EN) {
  327. qemu_irq_raise(s->irq);
  328. }
  329. break;
  330. }
  331. case R_CRYPT_CMD:
  332. qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
  333. __func__);
  334. break;
  335. default:
  336. break;
  337. }
  338. s->regs[addr] = data;
  339. }
  340. static const MemoryRegionOps aspeed_hace_ops = {
  341. .read = aspeed_hace_read,
  342. .write = aspeed_hace_write,
  343. .endianness = DEVICE_LITTLE_ENDIAN,
  344. .valid = {
  345. .min_access_size = 1,
  346. .max_access_size = 4,
  347. },
  348. };
  349. static void aspeed_hace_reset(DeviceState *dev)
  350. {
  351. struct AspeedHACEState *s = ASPEED_HACE(dev);
  352. memset(s->regs, 0, sizeof(s->regs));
  353. s->iov_count = 0;
  354. s->total_req_len = 0;
  355. }
  356. static void aspeed_hace_realize(DeviceState *dev, Error **errp)
  357. {
  358. AspeedHACEState *s = ASPEED_HACE(dev);
  359. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  360. sysbus_init_irq(sbd, &s->irq);
  361. memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
  362. TYPE_ASPEED_HACE, 0x1000);
  363. if (!s->dram_mr) {
  364. error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
  365. return;
  366. }
  367. address_space_init(&s->dram_as, s->dram_mr, "dram");
  368. sysbus_init_mmio(sbd, &s->iomem);
  369. }
  370. static Property aspeed_hace_properties[] = {
  371. DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
  372. TYPE_MEMORY_REGION, MemoryRegion *),
  373. DEFINE_PROP_END_OF_LIST(),
  374. };
  375. static const VMStateDescription vmstate_aspeed_hace = {
  376. .name = TYPE_ASPEED_HACE,
  377. .version_id = 1,
  378. .minimum_version_id = 1,
  379. .fields = (VMStateField[]) {
  380. VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
  381. VMSTATE_UINT32(total_req_len, AspeedHACEState),
  382. VMSTATE_UINT32(iov_count, AspeedHACEState),
  383. VMSTATE_END_OF_LIST(),
  384. }
  385. };
  386. static void aspeed_hace_class_init(ObjectClass *klass, void *data)
  387. {
  388. DeviceClass *dc = DEVICE_CLASS(klass);
  389. dc->realize = aspeed_hace_realize;
  390. dc->reset = aspeed_hace_reset;
  391. device_class_set_props(dc, aspeed_hace_properties);
  392. dc->vmsd = &vmstate_aspeed_hace;
  393. }
  394. static const TypeInfo aspeed_hace_info = {
  395. .name = TYPE_ASPEED_HACE,
  396. .parent = TYPE_SYS_BUS_DEVICE,
  397. .instance_size = sizeof(AspeedHACEState),
  398. .class_init = aspeed_hace_class_init,
  399. .class_size = sizeof(AspeedHACEClass)
  400. };
  401. static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
  402. {
  403. DeviceClass *dc = DEVICE_CLASS(klass);
  404. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  405. dc->desc = "AST2400 Hash and Crypto Engine";
  406. ahc->src_mask = 0x0FFFFFFF;
  407. ahc->dest_mask = 0x0FFFFFF8;
  408. ahc->key_mask = 0x0FFFFFC0;
  409. ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
  410. }
  411. static const TypeInfo aspeed_ast2400_hace_info = {
  412. .name = TYPE_ASPEED_AST2400_HACE,
  413. .parent = TYPE_ASPEED_HACE,
  414. .class_init = aspeed_ast2400_hace_class_init,
  415. };
  416. static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
  417. {
  418. DeviceClass *dc = DEVICE_CLASS(klass);
  419. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  420. dc->desc = "AST2500 Hash and Crypto Engine";
  421. ahc->src_mask = 0x3fffffff;
  422. ahc->dest_mask = 0x3ffffff8;
  423. ahc->key_mask = 0x3FFFFFC0;
  424. ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
  425. }
  426. static const TypeInfo aspeed_ast2500_hace_info = {
  427. .name = TYPE_ASPEED_AST2500_HACE,
  428. .parent = TYPE_ASPEED_HACE,
  429. .class_init = aspeed_ast2500_hace_class_init,
  430. };
  431. static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
  432. {
  433. DeviceClass *dc = DEVICE_CLASS(klass);
  434. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  435. dc->desc = "AST2600 Hash and Crypto Engine";
  436. ahc->src_mask = 0x7FFFFFFF;
  437. ahc->dest_mask = 0x7FFFFFF8;
  438. ahc->key_mask = 0x7FFFFFF8;
  439. ahc->hash_mask = 0x00147FFF;
  440. }
  441. static const TypeInfo aspeed_ast2600_hace_info = {
  442. .name = TYPE_ASPEED_AST2600_HACE,
  443. .parent = TYPE_ASPEED_HACE,
  444. .class_init = aspeed_ast2600_hace_class_init,
  445. };
  446. static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data)
  447. {
  448. DeviceClass *dc = DEVICE_CLASS(klass);
  449. AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
  450. dc->desc = "AST1030 Hash and Crypto Engine";
  451. ahc->src_mask = 0x7FFFFFFF;
  452. ahc->dest_mask = 0x7FFFFFF8;
  453. ahc->key_mask = 0x7FFFFFF8;
  454. ahc->hash_mask = 0x00147FFF;
  455. }
  456. static const TypeInfo aspeed_ast1030_hace_info = {
  457. .name = TYPE_ASPEED_AST1030_HACE,
  458. .parent = TYPE_ASPEED_HACE,
  459. .class_init = aspeed_ast1030_hace_class_init,
  460. };
  461. static void aspeed_hace_register_types(void)
  462. {
  463. type_register_static(&aspeed_ast2400_hace_info);
  464. type_register_static(&aspeed_ast2500_hace_info);
  465. type_register_static(&aspeed_ast2600_hace_info);
  466. type_register_static(&aspeed_ast1030_hace_info);
  467. type_register_static(&aspeed_hace_info);
  468. }
  469. type_init(aspeed_hace_register_types);