virtio-crypto.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. /*
  2. * Virtio crypto Support
  3. *
  4. * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
  5. *
  6. * Authors:
  7. * Gonglei <arei.gonglei@huawei.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or
  10. * (at your option) any later version. See the COPYING file in the
  11. * top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/iov.h"
  15. #include "qemu/main-loop.h"
  16. #include "qemu/module.h"
  17. #include "qapi/error.h"
  18. #include "qemu/error-report.h"
  19. #include "hw/virtio/virtio.h"
  20. #include "hw/virtio/virtio-crypto.h"
  21. #include "hw/qdev-properties.h"
  22. #include "hw/virtio/virtio-access.h"
  23. #include "standard-headers/linux/virtio_ids.h"
  24. #include "sysemu/cryptodev-vhost.h"
  25. #define VIRTIO_CRYPTO_VM_VERSION 1
  26. /*
  27. * Transfer virtqueue index to crypto queue index.
  28. * The control virtqueue is after the data virtqueues
  29. * so the input value doesn't need to be adjusted
  30. */
  31. static inline int virtio_crypto_vq2q(int queue_index)
  32. {
  33. return queue_index;
  34. }
  35. static int
  36. virtio_crypto_cipher_session_helper(VirtIODevice *vdev,
  37. CryptoDevBackendSymSessionInfo *info,
  38. struct virtio_crypto_cipher_session_para *cipher_para,
  39. struct iovec **iov, unsigned int *out_num)
  40. {
  41. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  42. unsigned int num = *out_num;
  43. info->cipher_alg = ldl_le_p(&cipher_para->algo);
  44. info->key_len = ldl_le_p(&cipher_para->keylen);
  45. info->direction = ldl_le_p(&cipher_para->op);
  46. DPRINTF("cipher_alg=%" PRIu32 ", info->direction=%" PRIu32 "\n",
  47. info->cipher_alg, info->direction);
  48. if (info->key_len > vcrypto->conf.max_cipher_key_len) {
  49. error_report("virtio-crypto length of cipher key is too big: %u",
  50. info->key_len);
  51. return -VIRTIO_CRYPTO_ERR;
  52. }
  53. /* Get cipher key */
  54. if (info->key_len > 0) {
  55. size_t s;
  56. DPRINTF("keylen=%" PRIu32 "\n", info->key_len);
  57. info->cipher_key = g_malloc(info->key_len);
  58. s = iov_to_buf(*iov, num, 0, info->cipher_key, info->key_len);
  59. if (unlikely(s != info->key_len)) {
  60. virtio_error(vdev, "virtio-crypto cipher key incorrect");
  61. return -EFAULT;
  62. }
  63. iov_discard_front(iov, &num, info->key_len);
  64. *out_num = num;
  65. }
  66. return 0;
  67. }
  68. static int64_t
  69. virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
  70. struct virtio_crypto_sym_create_session_req *sess_req,
  71. uint32_t queue_id,
  72. uint32_t opcode,
  73. struct iovec *iov, unsigned int out_num)
  74. {
  75. VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
  76. CryptoDevBackendSymSessionInfo info;
  77. int64_t session_id;
  78. int queue_index;
  79. uint32_t op_type;
  80. Error *local_err = NULL;
  81. int ret;
  82. memset(&info, 0, sizeof(info));
  83. op_type = ldl_le_p(&sess_req->op_type);
  84. info.op_type = op_type;
  85. info.op_code = opcode;
  86. if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
  87. ret = virtio_crypto_cipher_session_helper(vdev, &info,
  88. &sess_req->u.cipher.para,
  89. &iov, &out_num);
  90. if (ret < 0) {
  91. goto err;
  92. }
  93. } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
  94. size_t s;
  95. /* cipher part */
  96. ret = virtio_crypto_cipher_session_helper(vdev, &info,
  97. &sess_req->u.chain.para.cipher_param,
  98. &iov, &out_num);
  99. if (ret < 0) {
  100. goto err;
  101. }
  102. /* hash part */
  103. info.alg_chain_order = ldl_le_p(
  104. &sess_req->u.chain.para.alg_chain_order);
  105. info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len);
  106. info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode);
  107. if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) {
  108. info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo);
  109. info.auth_key_len = ldl_le_p(
  110. &sess_req->u.chain.para.u.mac_param.auth_key_len);
  111. info.hash_result_len = ldl_le_p(
  112. &sess_req->u.chain.para.u.mac_param.hash_result_len);
  113. if (info.auth_key_len > vcrypto->conf.max_auth_key_len) {
  114. error_report("virtio-crypto length of auth key is too big: %u",
  115. info.auth_key_len);
  116. ret = -VIRTIO_CRYPTO_ERR;
  117. goto err;
  118. }
  119. /* get auth key */
  120. if (info.auth_key_len > 0) {
  121. DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len);
  122. info.auth_key = g_malloc(info.auth_key_len);
  123. s = iov_to_buf(iov, out_num, 0, info.auth_key,
  124. info.auth_key_len);
  125. if (unlikely(s != info.auth_key_len)) {
  126. virtio_error(vdev,
  127. "virtio-crypto authenticated key incorrect");
  128. ret = -EFAULT;
  129. goto err;
  130. }
  131. iov_discard_front(&iov, &out_num, info.auth_key_len);
  132. }
  133. } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) {
  134. info.hash_alg = ldl_le_p(
  135. &sess_req->u.chain.para.u.hash_param.algo);
  136. info.hash_result_len = ldl_le_p(
  137. &sess_req->u.chain.para.u.hash_param.hash_result_len);
  138. } else {
  139. /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
  140. error_report("unsupported hash mode");
  141. ret = -VIRTIO_CRYPTO_NOTSUPP;
  142. goto err;
  143. }
  144. } else {
  145. /* VIRTIO_CRYPTO_SYM_OP_NONE */
  146. error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE");
  147. ret = -VIRTIO_CRYPTO_NOTSUPP;
  148. goto err;
  149. }
  150. queue_index = virtio_crypto_vq2q(queue_id);
  151. session_id = cryptodev_backend_sym_create_session(
  152. vcrypto->cryptodev,
  153. &info, queue_index, &local_err);
  154. if (session_id >= 0) {
  155. DPRINTF("create session_id=%" PRIu64 " successfully\n",
  156. session_id);
  157. ret = session_id;
  158. } else {
  159. if (local_err) {
  160. error_report_err(local_err);
  161. }
  162. ret = -VIRTIO_CRYPTO_ERR;
  163. }
  164. err:
  165. g_free(info.cipher_key);
  166. g_free(info.auth_key);
  167. return ret;
  168. }
  169. static uint8_t
  170. virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
  171. struct virtio_crypto_destroy_session_req *close_sess_req,
  172. uint32_t queue_id)
  173. {
  174. int ret;
  175. uint64_t session_id;
  176. uint32_t status;
  177. Error *local_err = NULL;
  178. session_id = ldq_le_p(&close_sess_req->session_id);
  179. DPRINTF("close session, id=%" PRIu64 "\n", session_id);
  180. ret = cryptodev_backend_sym_close_session(
  181. vcrypto->cryptodev, session_id, queue_id, &local_err);
  182. if (ret == 0) {
  183. status = VIRTIO_CRYPTO_OK;
  184. } else {
  185. if (local_err) {
  186. error_report_err(local_err);
  187. } else {
  188. error_report("destroy session failed");
  189. }
  190. status = VIRTIO_CRYPTO_ERR;
  191. }
  192. return status;
  193. }
  194. static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  195. {
  196. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  197. struct virtio_crypto_op_ctrl_req ctrl;
  198. VirtQueueElement *elem;
  199. struct iovec *in_iov;
  200. struct iovec *out_iov;
  201. unsigned in_num;
  202. unsigned out_num;
  203. uint32_t queue_id;
  204. uint32_t opcode;
  205. struct virtio_crypto_session_input input;
  206. int64_t session_id;
  207. uint8_t status;
  208. size_t s;
  209. for (;;) {
  210. elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
  211. if (!elem) {
  212. break;
  213. }
  214. if (elem->out_num < 1 || elem->in_num < 1) {
  215. virtio_error(vdev, "virtio-crypto ctrl missing headers");
  216. virtqueue_detach_element(vq, elem, 0);
  217. g_free(elem);
  218. break;
  219. }
  220. out_num = elem->out_num;
  221. out_iov = elem->out_sg;
  222. in_num = elem->in_num;
  223. in_iov = elem->in_sg;
  224. if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl))
  225. != sizeof(ctrl))) {
  226. virtio_error(vdev, "virtio-crypto request ctrl_hdr too short");
  227. virtqueue_detach_element(vq, elem, 0);
  228. g_free(elem);
  229. break;
  230. }
  231. iov_discard_front(&out_iov, &out_num, sizeof(ctrl));
  232. opcode = ldl_le_p(&ctrl.header.opcode);
  233. queue_id = ldl_le_p(&ctrl.header.queue_id);
  234. switch (opcode) {
  235. case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
  236. memset(&input, 0, sizeof(input));
  237. session_id = virtio_crypto_create_sym_session(vcrypto,
  238. &ctrl.u.sym_create_session,
  239. queue_id, opcode,
  240. out_iov, out_num);
  241. /* Serious errors, need to reset virtio crypto device */
  242. if (session_id == -EFAULT) {
  243. virtqueue_detach_element(vq, elem, 0);
  244. break;
  245. } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) {
  246. stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
  247. } else if (session_id == -VIRTIO_CRYPTO_ERR) {
  248. stl_le_p(&input.status, VIRTIO_CRYPTO_ERR);
  249. } else {
  250. /* Set the session id */
  251. stq_le_p(&input.session_id, session_id);
  252. stl_le_p(&input.status, VIRTIO_CRYPTO_OK);
  253. }
  254. s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
  255. if (unlikely(s != sizeof(input))) {
  256. virtio_error(vdev, "virtio-crypto input incorrect");
  257. virtqueue_detach_element(vq, elem, 0);
  258. break;
  259. }
  260. virtqueue_push(vq, elem, sizeof(input));
  261. virtio_notify(vdev, vq);
  262. break;
  263. case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION:
  264. case VIRTIO_CRYPTO_HASH_DESTROY_SESSION:
  265. case VIRTIO_CRYPTO_MAC_DESTROY_SESSION:
  266. case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION:
  267. status = virtio_crypto_handle_close_session(vcrypto,
  268. &ctrl.u.destroy_session, queue_id);
  269. /* The status only occupy one byte, we can directly use it */
  270. s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status));
  271. if (unlikely(s != sizeof(status))) {
  272. virtio_error(vdev, "virtio-crypto status incorrect");
  273. virtqueue_detach_element(vq, elem, 0);
  274. break;
  275. }
  276. virtqueue_push(vq, elem, sizeof(status));
  277. virtio_notify(vdev, vq);
  278. break;
  279. case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
  280. case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
  281. case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
  282. default:
  283. error_report("virtio-crypto unsupported ctrl opcode: %d", opcode);
  284. memset(&input, 0, sizeof(input));
  285. stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
  286. s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
  287. if (unlikely(s != sizeof(input))) {
  288. virtio_error(vdev, "virtio-crypto input incorrect");
  289. virtqueue_detach_element(vq, elem, 0);
  290. break;
  291. }
  292. virtqueue_push(vq, elem, sizeof(input));
  293. virtio_notify(vdev, vq);
  294. break;
  295. } /* end switch case */
  296. g_free(elem);
  297. } /* end for loop */
  298. }
  299. static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
  300. VirtIOCryptoReq *req)
  301. {
  302. req->vcrypto = vcrypto;
  303. req->vq = vq;
  304. req->in = NULL;
  305. req->in_iov = NULL;
  306. req->in_num = 0;
  307. req->in_len = 0;
  308. req->flags = CRYPTODEV_BACKEND_ALG__MAX;
  309. req->u.sym_op_info = NULL;
  310. }
  311. static void virtio_crypto_free_request(VirtIOCryptoReq *req)
  312. {
  313. if (req) {
  314. if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
  315. size_t max_len;
  316. CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info;
  317. max_len = op_info->iv_len +
  318. op_info->aad_len +
  319. op_info->src_len +
  320. op_info->dst_len +
  321. op_info->digest_result_len;
  322. /* Zeroize and free request data structure */
  323. memset(op_info, 0, sizeof(*op_info) + max_len);
  324. g_free(op_info);
  325. }
  326. g_free(req);
  327. }
  328. }
  329. static void
  330. virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
  331. VirtIOCryptoReq *req,
  332. uint32_t status,
  333. CryptoDevBackendSymOpInfo *sym_op_info)
  334. {
  335. size_t s, len;
  336. if (status != VIRTIO_CRYPTO_OK) {
  337. return;
  338. }
  339. len = sym_op_info->src_len;
  340. /* Save the cipher result */
  341. s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
  342. if (s != len) {
  343. virtio_error(vdev, "virtio-crypto dest data incorrect");
  344. return;
  345. }
  346. iov_discard_front(&req->in_iov, &req->in_num, len);
  347. if (sym_op_info->op_type ==
  348. VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
  349. /* Save the digest result */
  350. s = iov_from_buf(req->in_iov, req->in_num, 0,
  351. sym_op_info->digest_result,
  352. sym_op_info->digest_result_len);
  353. if (s != sym_op_info->digest_result_len) {
  354. virtio_error(vdev, "virtio-crypto digest result incorrect");
  355. }
  356. }
  357. }
  358. static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
  359. {
  360. VirtIOCrypto *vcrypto = req->vcrypto;
  361. VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
  362. if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
  363. virtio_crypto_sym_input_data_helper(vdev, req, status,
  364. req->u.sym_op_info);
  365. }
  366. stb_p(&req->in->status, status);
  367. virtqueue_push(req->vq, &req->elem, req->in_len);
  368. virtio_notify(vdev, req->vq);
  369. }
  370. static VirtIOCryptoReq *
  371. virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
  372. {
  373. VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
  374. if (req) {
  375. virtio_crypto_init_request(s, vq, req);
  376. }
  377. return req;
  378. }
  379. static CryptoDevBackendSymOpInfo *
  380. virtio_crypto_sym_op_helper(VirtIODevice *vdev,
  381. struct virtio_crypto_cipher_para *cipher_para,
  382. struct virtio_crypto_alg_chain_data_para *alg_chain_para,
  383. struct iovec *iov, unsigned int out_num)
  384. {
  385. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  386. CryptoDevBackendSymOpInfo *op_info;
  387. uint32_t src_len = 0, dst_len = 0;
  388. uint32_t iv_len = 0;
  389. uint32_t aad_len = 0, hash_result_len = 0;
  390. uint32_t hash_start_src_offset = 0, len_to_hash = 0;
  391. uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
  392. uint64_t max_len, curr_size = 0;
  393. size_t s;
  394. /* Plain cipher */
  395. if (cipher_para) {
  396. iv_len = ldl_le_p(&cipher_para->iv_len);
  397. src_len = ldl_le_p(&cipher_para->src_data_len);
  398. dst_len = ldl_le_p(&cipher_para->dst_data_len);
  399. } else if (alg_chain_para) { /* Algorithm chain */
  400. iv_len = ldl_le_p(&alg_chain_para->iv_len);
  401. src_len = ldl_le_p(&alg_chain_para->src_data_len);
  402. dst_len = ldl_le_p(&alg_chain_para->dst_data_len);
  403. aad_len = ldl_le_p(&alg_chain_para->aad_len);
  404. hash_result_len = ldl_le_p(&alg_chain_para->hash_result_len);
  405. hash_start_src_offset = ldl_le_p(
  406. &alg_chain_para->hash_start_src_offset);
  407. cipher_start_src_offset = ldl_le_p(
  408. &alg_chain_para->cipher_start_src_offset);
  409. len_to_cipher = ldl_le_p(&alg_chain_para->len_to_cipher);
  410. len_to_hash = ldl_le_p(&alg_chain_para->len_to_hash);
  411. } else {
  412. return NULL;
  413. }
  414. max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len;
  415. if (unlikely(max_len > vcrypto->conf.max_size)) {
  416. virtio_error(vdev, "virtio-crypto too big length");
  417. return NULL;
  418. }
  419. op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
  420. op_info->iv_len = iv_len;
  421. op_info->src_len = src_len;
  422. op_info->dst_len = dst_len;
  423. op_info->aad_len = aad_len;
  424. op_info->digest_result_len = hash_result_len;
  425. op_info->hash_start_src_offset = hash_start_src_offset;
  426. op_info->len_to_hash = len_to_hash;
  427. op_info->cipher_start_src_offset = cipher_start_src_offset;
  428. op_info->len_to_cipher = len_to_cipher;
  429. /* Handle the initilization vector */
  430. if (op_info->iv_len > 0) {
  431. DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
  432. op_info->iv = op_info->data + curr_size;
  433. s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
  434. if (unlikely(s != op_info->iv_len)) {
  435. virtio_error(vdev, "virtio-crypto iv incorrect");
  436. goto err;
  437. }
  438. iov_discard_front(&iov, &out_num, op_info->iv_len);
  439. curr_size += op_info->iv_len;
  440. }
  441. /* Handle additional authentication data if exists */
  442. if (op_info->aad_len > 0) {
  443. DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
  444. op_info->aad_data = op_info->data + curr_size;
  445. s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
  446. if (unlikely(s != op_info->aad_len)) {
  447. virtio_error(vdev, "virtio-crypto additional auth data incorrect");
  448. goto err;
  449. }
  450. iov_discard_front(&iov, &out_num, op_info->aad_len);
  451. curr_size += op_info->aad_len;
  452. }
  453. /* Handle the source data */
  454. if (op_info->src_len > 0) {
  455. DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
  456. op_info->src = op_info->data + curr_size;
  457. s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
  458. if (unlikely(s != op_info->src_len)) {
  459. virtio_error(vdev, "virtio-crypto source data incorrect");
  460. goto err;
  461. }
  462. iov_discard_front(&iov, &out_num, op_info->src_len);
  463. curr_size += op_info->src_len;
  464. }
  465. /* Handle the destination data */
  466. op_info->dst = op_info->data + curr_size;
  467. curr_size += op_info->dst_len;
  468. DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
  469. /* Handle the hash digest result */
  470. if (hash_result_len > 0) {
  471. DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
  472. op_info->digest_result = op_info->data + curr_size;
  473. }
  474. return op_info;
  475. err:
  476. g_free(op_info);
  477. return NULL;
  478. }
  479. static int
  480. virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
  481. struct virtio_crypto_sym_data_req *req,
  482. CryptoDevBackendSymOpInfo **sym_op_info,
  483. struct iovec *iov, unsigned int out_num)
  484. {
  485. VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
  486. uint32_t op_type;
  487. CryptoDevBackendSymOpInfo *op_info;
  488. op_type = ldl_le_p(&req->op_type);
  489. if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
  490. op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
  491. NULL, iov, out_num);
  492. if (!op_info) {
  493. return -EFAULT;
  494. }
  495. op_info->op_type = op_type;
  496. } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
  497. op_info = virtio_crypto_sym_op_helper(vdev, NULL,
  498. &req->u.chain.para,
  499. iov, out_num);
  500. if (!op_info) {
  501. return -EFAULT;
  502. }
  503. op_info->op_type = op_type;
  504. } else {
  505. /* VIRTIO_CRYPTO_SYM_OP_NONE */
  506. error_report("virtio-crypto unsupported cipher type");
  507. return -VIRTIO_CRYPTO_NOTSUPP;
  508. }
  509. *sym_op_info = op_info;
  510. return 0;
  511. }
  512. static int
  513. virtio_crypto_handle_request(VirtIOCryptoReq *request)
  514. {
  515. VirtIOCrypto *vcrypto = request->vcrypto;
  516. VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
  517. VirtQueueElement *elem = &request->elem;
  518. int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
  519. struct virtio_crypto_op_data_req req;
  520. int ret;
  521. struct iovec *in_iov;
  522. struct iovec *out_iov;
  523. unsigned in_num;
  524. unsigned out_num;
  525. uint32_t opcode;
  526. uint8_t status = VIRTIO_CRYPTO_ERR;
  527. uint64_t session_id;
  528. CryptoDevBackendSymOpInfo *sym_op_info = NULL;
  529. Error *local_err = NULL;
  530. if (elem->out_num < 1 || elem->in_num < 1) {
  531. virtio_error(vdev, "virtio-crypto dataq missing headers");
  532. return -1;
  533. }
  534. out_num = elem->out_num;
  535. out_iov = elem->out_sg;
  536. in_num = elem->in_num;
  537. in_iov = elem->in_sg;
  538. if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
  539. != sizeof(req))) {
  540. virtio_error(vdev, "virtio-crypto request outhdr too short");
  541. return -1;
  542. }
  543. iov_discard_front(&out_iov, &out_num, sizeof(req));
  544. if (in_iov[in_num - 1].iov_len <
  545. sizeof(struct virtio_crypto_inhdr)) {
  546. virtio_error(vdev, "virtio-crypto request inhdr too short");
  547. return -1;
  548. }
  549. /* We always touch the last byte, so just see how big in_iov is. */
  550. request->in_len = iov_size(in_iov, in_num);
  551. request->in = (void *)in_iov[in_num - 1].iov_base
  552. + in_iov[in_num - 1].iov_len
  553. - sizeof(struct virtio_crypto_inhdr);
  554. iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
  555. /*
  556. * The length of operation result, including dest_data
  557. * and digest_result if exists.
  558. */
  559. request->in_num = in_num;
  560. request->in_iov = in_iov;
  561. opcode = ldl_le_p(&req.header.opcode);
  562. session_id = ldq_le_p(&req.header.session_id);
  563. switch (opcode) {
  564. case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
  565. case VIRTIO_CRYPTO_CIPHER_DECRYPT:
  566. ret = virtio_crypto_handle_sym_req(vcrypto,
  567. &req.u.sym_req,
  568. &sym_op_info,
  569. out_iov, out_num);
  570. /* Serious errors, need to reset virtio crypto device */
  571. if (ret == -EFAULT) {
  572. return -1;
  573. } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
  574. virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
  575. virtio_crypto_free_request(request);
  576. } else {
  577. sym_op_info->session_id = session_id;
  578. /* Set request's parameter */
  579. request->flags = CRYPTODEV_BACKEND_ALG_SYM;
  580. request->u.sym_op_info = sym_op_info;
  581. ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
  582. request, queue_index, &local_err);
  583. if (ret < 0) {
  584. status = -ret;
  585. if (local_err) {
  586. error_report_err(local_err);
  587. }
  588. } else { /* ret == VIRTIO_CRYPTO_OK */
  589. status = ret;
  590. }
  591. virtio_crypto_req_complete(request, status);
  592. virtio_crypto_free_request(request);
  593. }
  594. break;
  595. case VIRTIO_CRYPTO_HASH:
  596. case VIRTIO_CRYPTO_MAC:
  597. case VIRTIO_CRYPTO_AEAD_ENCRYPT:
  598. case VIRTIO_CRYPTO_AEAD_DECRYPT:
  599. default:
  600. error_report("virtio-crypto unsupported dataq opcode: %u",
  601. opcode);
  602. virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
  603. virtio_crypto_free_request(request);
  604. }
  605. return 0;
  606. }
  607. static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
  608. {
  609. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  610. VirtIOCryptoReq *req;
  611. while ((req = virtio_crypto_get_request(vcrypto, vq))) {
  612. if (virtio_crypto_handle_request(req) < 0) {
  613. virtqueue_detach_element(req->vq, &req->elem, 0);
  614. virtio_crypto_free_request(req);
  615. break;
  616. }
  617. }
  618. }
  619. static void virtio_crypto_dataq_bh(void *opaque)
  620. {
  621. VirtIOCryptoQueue *q = opaque;
  622. VirtIOCrypto *vcrypto = q->vcrypto;
  623. VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
  624. /* This happens when device was stopped but BH wasn't. */
  625. if (!vdev->vm_running) {
  626. return;
  627. }
  628. /* Just in case the driver is not ready on more */
  629. if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
  630. return;
  631. }
  632. for (;;) {
  633. virtio_crypto_handle_dataq(vdev, q->dataq);
  634. virtio_queue_set_notification(q->dataq, 1);
  635. /* Are we done or did the guest add more buffers? */
  636. if (virtio_queue_empty(q->dataq)) {
  637. break;
  638. }
  639. virtio_queue_set_notification(q->dataq, 0);
  640. }
  641. }
  642. static void
  643. virtio_crypto_handle_dataq_bh(VirtIODevice *vdev, VirtQueue *vq)
  644. {
  645. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  646. VirtIOCryptoQueue *q =
  647. &vcrypto->vqs[virtio_crypto_vq2q(virtio_get_queue_index(vq))];
  648. /* This happens when device was stopped but VCPU wasn't. */
  649. if (!vdev->vm_running) {
  650. return;
  651. }
  652. virtio_queue_set_notification(vq, 0);
  653. qemu_bh_schedule(q->dataq_bh);
  654. }
  655. static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
  656. uint64_t features,
  657. Error **errp)
  658. {
  659. return features;
  660. }
  661. static void virtio_crypto_reset(VirtIODevice *vdev)
  662. {
  663. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  664. /* multiqueue is disabled by default */
  665. vcrypto->curr_queues = 1;
  666. if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
  667. vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
  668. } else {
  669. vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
  670. }
  671. }
  672. static void virtio_crypto_init_config(VirtIODevice *vdev)
  673. {
  674. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  675. vcrypto->conf.crypto_services =
  676. vcrypto->conf.cryptodev->conf.crypto_services;
  677. vcrypto->conf.cipher_algo_l =
  678. vcrypto->conf.cryptodev->conf.cipher_algo_l;
  679. vcrypto->conf.cipher_algo_h =
  680. vcrypto->conf.cryptodev->conf.cipher_algo_h;
  681. vcrypto->conf.hash_algo = vcrypto->conf.cryptodev->conf.hash_algo;
  682. vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l;
  683. vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h;
  684. vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo;
  685. vcrypto->conf.max_cipher_key_len =
  686. vcrypto->conf.cryptodev->conf.max_cipher_key_len;
  687. vcrypto->conf.max_auth_key_len =
  688. vcrypto->conf.cryptodev->conf.max_auth_key_len;
  689. vcrypto->conf.max_size = vcrypto->conf.cryptodev->conf.max_size;
  690. }
  691. static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
  692. {
  693. VirtIODevice *vdev = VIRTIO_DEVICE(dev);
  694. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
  695. int i;
  696. vcrypto->cryptodev = vcrypto->conf.cryptodev;
  697. if (vcrypto->cryptodev == NULL) {
  698. error_setg(errp, "'cryptodev' parameter expects a valid object");
  699. return;
  700. } else if (cryptodev_backend_is_used(vcrypto->cryptodev)) {
  701. char *path = object_get_canonical_path_component(OBJECT(vcrypto->conf.cryptodev));
  702. error_setg(errp, "can't use already used cryptodev backend: %s", path);
  703. g_free(path);
  704. return;
  705. }
  706. vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1);
  707. if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) {
  708. error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
  709. "must be a positive integer less than %d.",
  710. vcrypto->max_queues, VIRTIO_QUEUE_MAX);
  711. return;
  712. }
  713. virtio_init(vdev, "virtio-crypto", VIRTIO_ID_CRYPTO, vcrypto->config_size);
  714. vcrypto->curr_queues = 1;
  715. vcrypto->vqs = g_malloc0(sizeof(VirtIOCryptoQueue) * vcrypto->max_queues);
  716. for (i = 0; i < vcrypto->max_queues; i++) {
  717. vcrypto->vqs[i].dataq =
  718. virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh);
  719. vcrypto->vqs[i].dataq_bh =
  720. qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]);
  721. vcrypto->vqs[i].vcrypto = vcrypto;
  722. }
  723. vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
  724. if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
  725. vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
  726. } else {
  727. vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
  728. }
  729. virtio_crypto_init_config(vdev);
  730. cryptodev_backend_set_used(vcrypto->cryptodev, true);
  731. }
  732. static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
  733. {
  734. VirtIODevice *vdev = VIRTIO_DEVICE(dev);
  735. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
  736. VirtIOCryptoQueue *q;
  737. int i, max_queues;
  738. max_queues = vcrypto->multiqueue ? vcrypto->max_queues : 1;
  739. for (i = 0; i < max_queues; i++) {
  740. virtio_del_queue(vdev, i);
  741. q = &vcrypto->vqs[i];
  742. qemu_bh_delete(q->dataq_bh);
  743. }
  744. g_free(vcrypto->vqs);
  745. virtio_cleanup(vdev);
  746. cryptodev_backend_set_used(vcrypto->cryptodev, false);
  747. }
  748. static const VMStateDescription vmstate_virtio_crypto = {
  749. .name = "virtio-crypto",
  750. .unmigratable = 1,
  751. .minimum_version_id = VIRTIO_CRYPTO_VM_VERSION,
  752. .version_id = VIRTIO_CRYPTO_VM_VERSION,
  753. .fields = (VMStateField[]) {
  754. VMSTATE_VIRTIO_DEVICE,
  755. VMSTATE_END_OF_LIST()
  756. },
  757. };
  758. static Property virtio_crypto_properties[] = {
  759. DEFINE_PROP_LINK("cryptodev", VirtIOCrypto, conf.cryptodev,
  760. TYPE_CRYPTODEV_BACKEND, CryptoDevBackend *),
  761. DEFINE_PROP_END_OF_LIST(),
  762. };
  763. static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config)
  764. {
  765. VirtIOCrypto *c = VIRTIO_CRYPTO(vdev);
  766. struct virtio_crypto_config crypto_cfg = {};
  767. /*
  768. * Virtio-crypto device conforms to VIRTIO 1.0 which is always LE,
  769. * so we can use LE accessors directly.
  770. */
  771. stl_le_p(&crypto_cfg.status, c->status);
  772. stl_le_p(&crypto_cfg.max_dataqueues, c->max_queues);
  773. stl_le_p(&crypto_cfg.crypto_services, c->conf.crypto_services);
  774. stl_le_p(&crypto_cfg.cipher_algo_l, c->conf.cipher_algo_l);
  775. stl_le_p(&crypto_cfg.cipher_algo_h, c->conf.cipher_algo_h);
  776. stl_le_p(&crypto_cfg.hash_algo, c->conf.hash_algo);
  777. stl_le_p(&crypto_cfg.mac_algo_l, c->conf.mac_algo_l);
  778. stl_le_p(&crypto_cfg.mac_algo_h, c->conf.mac_algo_h);
  779. stl_le_p(&crypto_cfg.aead_algo, c->conf.aead_algo);
  780. stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len);
  781. stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len);
  782. stq_le_p(&crypto_cfg.max_size, c->conf.max_size);
  783. memcpy(config, &crypto_cfg, c->config_size);
  784. }
  785. static bool virtio_crypto_started(VirtIOCrypto *c, uint8_t status)
  786. {
  787. VirtIODevice *vdev = VIRTIO_DEVICE(c);
  788. return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
  789. (c->status & VIRTIO_CRYPTO_S_HW_READY) && vdev->vm_running;
  790. }
  791. static void virtio_crypto_vhost_status(VirtIOCrypto *c, uint8_t status)
  792. {
  793. VirtIODevice *vdev = VIRTIO_DEVICE(c);
  794. int queues = c->multiqueue ? c->max_queues : 1;
  795. CryptoDevBackend *b = c->cryptodev;
  796. CryptoDevBackendClient *cc = b->conf.peers.ccs[0];
  797. if (!cryptodev_get_vhost(cc, b, 0)) {
  798. return;
  799. }
  800. if ((virtio_crypto_started(c, status)) == !!c->vhost_started) {
  801. return;
  802. }
  803. if (!c->vhost_started) {
  804. int r;
  805. c->vhost_started = 1;
  806. r = cryptodev_vhost_start(vdev, queues);
  807. if (r < 0) {
  808. error_report("unable to start vhost crypto: %d: "
  809. "falling back on userspace virtio", -r);
  810. c->vhost_started = 0;
  811. }
  812. } else {
  813. cryptodev_vhost_stop(vdev, queues);
  814. c->vhost_started = 0;
  815. }
  816. }
  817. static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status)
  818. {
  819. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  820. virtio_crypto_vhost_status(vcrypto, status);
  821. }
  822. static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
  823. bool mask)
  824. {
  825. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  826. int queue = virtio_crypto_vq2q(idx);
  827. assert(vcrypto->vhost_started);
  828. cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
  829. }
  830. static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
  831. {
  832. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
  833. int queue = virtio_crypto_vq2q(idx);
  834. assert(vcrypto->vhost_started);
  835. return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
  836. }
  837. static void virtio_crypto_class_init(ObjectClass *klass, void *data)
  838. {
  839. DeviceClass *dc = DEVICE_CLASS(klass);
  840. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  841. dc->props = virtio_crypto_properties;
  842. dc->vmsd = &vmstate_virtio_crypto;
  843. set_bit(DEVICE_CATEGORY_MISC, dc->categories);
  844. vdc->realize = virtio_crypto_device_realize;
  845. vdc->unrealize = virtio_crypto_device_unrealize;
  846. vdc->get_config = virtio_crypto_get_config;
  847. vdc->get_features = virtio_crypto_get_features;
  848. vdc->reset = virtio_crypto_reset;
  849. vdc->set_status = virtio_crypto_set_status;
  850. vdc->guest_notifier_mask = virtio_crypto_guest_notifier_mask;
  851. vdc->guest_notifier_pending = virtio_crypto_guest_notifier_pending;
  852. }
  853. static void virtio_crypto_instance_init(Object *obj)
  854. {
  855. VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj);
  856. /*
  857. * The default config_size is sizeof(struct virtio_crypto_config).
  858. * Can be overriden with virtio_crypto_set_config_size.
  859. */
  860. vcrypto->config_size = sizeof(struct virtio_crypto_config);
  861. }
  862. static const TypeInfo virtio_crypto_info = {
  863. .name = TYPE_VIRTIO_CRYPTO,
  864. .parent = TYPE_VIRTIO_DEVICE,
  865. .instance_size = sizeof(VirtIOCrypto),
  866. .instance_init = virtio_crypto_instance_init,
  867. .class_init = virtio_crypto_class_init,
  868. };
  869. static void virtio_register_types(void)
  870. {
  871. type_register_static(&virtio_crypto_info);
  872. }
  873. type_init(virtio_register_types)