rdma_backend.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401
  1. /*
  2. * QEMU paravirtual RDMA - Generic RDMA backend
  3. *
  4. * Copyright (C) 2018 Oracle
  5. * Copyright (C) 2018 Red Hat Inc
  6. *
  7. * Authors:
  8. * Yuval Shaia <yuval.shaia@oracle.com>
  9. * Marcel Apfelbaum <marcel@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. *
  14. */
  15. #include "qemu/osdep.h"
  16. #include "qapi/qapi-events-rdma.h"
  17. #include <infiniband/verbs.h>
  18. #include "contrib/rdmacm-mux/rdmacm-mux.h"
  19. #include "trace.h"
  20. #include "rdma_utils.h"
  21. #include "rdma_rm.h"
  22. #include "rdma_backend.h"
  23. #define THR_NAME_LEN 16
  24. #define THR_POLL_TO 5000
  25. #define MAD_HDR_SIZE sizeof(struct ibv_grh)
  26. typedef struct BackendCtx {
  27. void *up_ctx;
  28. struct ibv_sge sge; /* Used to save MAD recv buffer */
  29. RdmaBackendQP *backend_qp; /* To maintain recv buffers */
  30. RdmaBackendSRQ *backend_srq;
  31. } BackendCtx;
  32. struct backend_umad {
  33. struct ib_user_mad hdr;
  34. char mad[RDMA_MAX_PRIVATE_DATA];
  35. };
  36. static void (*comp_handler)(void *ctx, struct ibv_wc *wc);
  37. static void dummy_comp_handler(void *ctx, struct ibv_wc *wc)
  38. {
  39. rdma_error_report("No completion handler is registered");
  40. }
  41. static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err,
  42. void *ctx)
  43. {
  44. struct ibv_wc wc = {};
  45. wc.status = status;
  46. wc.vendor_err = vendor_err;
  47. comp_handler(ctx, &wc);
  48. }
  49. static void free_cqe_ctx(gpointer data, gpointer user_data)
  50. {
  51. BackendCtx *bctx;
  52. RdmaDeviceResources *rdma_dev_res = user_data;
  53. unsigned long cqe_ctx_id = GPOINTER_TO_INT(data);
  54. bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id);
  55. if (bctx) {
  56. rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id);
  57. qatomic_dec(&rdma_dev_res->stats.missing_cqe);
  58. }
  59. g_free(bctx);
  60. }
  61. static void clean_recv_mads(RdmaBackendDev *backend_dev)
  62. {
  63. unsigned long cqe_ctx_id;
  64. do {
  65. cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->
  66. recv_mads_list);
  67. if (cqe_ctx_id != -ENOENT) {
  68. qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
  69. free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id),
  70. backend_dev->rdma_dev_res);
  71. }
  72. } while (cqe_ctx_id != -ENOENT);
  73. }
  74. static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
  75. {
  76. int i, ne, total_ne = 0;
  77. BackendCtx *bctx;
  78. struct ibv_wc wc[2];
  79. RdmaProtectedGSList *cqe_ctx_list;
  80. WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
  81. do {
  82. ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
  83. trace_rdma_poll_cq(ne, ibcq);
  84. for (i = 0; i < ne; i++) {
  85. bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
  86. if (unlikely(!bctx)) {
  87. rdma_error_report("No matching ctx for req %"PRId64,
  88. wc[i].wr_id);
  89. continue;
  90. }
  91. comp_handler(bctx->up_ctx, &wc[i]);
  92. if (bctx->backend_qp) {
  93. cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
  94. } else {
  95. cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
  96. }
  97. rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
  98. rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
  99. g_free(bctx);
  100. }
  101. total_ne += ne;
  102. } while (ne > 0);
  103. qatomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
  104. }
  105. if (ne < 0) {
  106. rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
  107. }
  108. rdma_dev_res->stats.completions += total_ne;
  109. return total_ne;
  110. }
  111. static void *comp_handler_thread(void *arg)
  112. {
  113. RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg;
  114. int rc;
  115. struct ibv_cq *ev_cq;
  116. void *ev_ctx;
  117. int flags;
  118. GPollFD pfds[1];
  119. /* Change to non-blocking mode */
  120. flags = fcntl(backend_dev->channel->fd, F_GETFL);
  121. rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK);
  122. if (rc < 0) {
  123. rdma_error_report("Failed to change backend channel FD to non-blocking");
  124. return NULL;
  125. }
  126. pfds[0].fd = backend_dev->channel->fd;
  127. pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR;
  128. backend_dev->comp_thread.is_running = true;
  129. while (backend_dev->comp_thread.run) {
  130. do {
  131. rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS);
  132. if (!rc) {
  133. backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++;
  134. }
  135. } while (!rc && backend_dev->comp_thread.run);
  136. if (backend_dev->comp_thread.run) {
  137. rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx);
  138. if (unlikely(rc)) {
  139. rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc,
  140. errno);
  141. continue;
  142. }
  143. rc = ibv_req_notify_cq(ev_cq, 0);
  144. if (unlikely(rc)) {
  145. rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc,
  146. errno);
  147. }
  148. backend_dev->rdma_dev_res->stats.poll_cq_from_bk++;
  149. rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq);
  150. ibv_ack_cq_events(ev_cq, 1);
  151. }
  152. }
  153. backend_dev->comp_thread.is_running = false;
  154. qemu_thread_exit(0);
  155. return NULL;
  156. }
  157. static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
  158. {
  159. qatomic_set(&backend_dev->rdmacm_mux.can_receive, 0);
  160. }
  161. static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
  162. {
  163. qatomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg));
  164. }
  165. static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev)
  166. {
  167. return qatomic_read(&backend_dev->rdmacm_mux.can_receive);
  168. }
  169. static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be)
  170. {
  171. RdmaCmMuxMsg msg = {};
  172. int ret;
  173. ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg));
  174. if (ret != sizeof(msg)) {
  175. rdma_error_report("Got invalid message from mux: size %d, expecting %d",
  176. ret, (int)sizeof(msg));
  177. return -EIO;
  178. }
  179. trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code,
  180. msg.hdr.err_code);
  181. if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) {
  182. rdma_error_report("Got invalid message type %d", msg.hdr.msg_type);
  183. return -EIO;
  184. }
  185. if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) {
  186. rdma_error_report("Operation failed in mux, error code %d",
  187. msg.hdr.err_code);
  188. return -EIO;
  189. }
  190. return 0;
  191. }
  192. static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg)
  193. {
  194. int rc = 0;
  195. msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ;
  196. trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code);
  197. disable_rdmacm_mux_async(backend_dev);
  198. rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be,
  199. (const uint8_t *)msg, sizeof(*msg));
  200. if (rc != sizeof(*msg)) {
  201. enable_rdmacm_mux_async(backend_dev);
  202. rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc);
  203. return -EIO;
  204. }
  205. rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be);
  206. if (rc) {
  207. rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)",
  208. msg->hdr.op_code, rc);
  209. }
  210. enable_rdmacm_mux_async(backend_dev);
  211. return 0;
  212. }
  213. static void stop_backend_thread(RdmaBackendThread *thread)
  214. {
  215. thread->run = false;
  216. while (thread->is_running) {
  217. sleep(THR_POLL_TO / SCALE_US / 2);
  218. }
  219. }
  220. static void start_comp_thread(RdmaBackendDev *backend_dev)
  221. {
  222. char thread_name[THR_NAME_LEN] = {};
  223. stop_backend_thread(&backend_dev->comp_thread);
  224. snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s",
  225. ibv_get_device_name(backend_dev->ib_dev));
  226. backend_dev->comp_thread.run = true;
  227. qemu_thread_create(&backend_dev->comp_thread.thread, thread_name,
  228. comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED);
  229. }
  230. void rdma_backend_register_comp_handler(void (*handler)(void *ctx,
  231. struct ibv_wc *wc))
  232. {
  233. comp_handler = handler;
  234. }
  235. void rdma_backend_unregister_comp_handler(void)
  236. {
  237. rdma_backend_register_comp_handler(dummy_comp_handler);
  238. }
  239. int rdma_backend_query_port(RdmaBackendDev *backend_dev,
  240. struct ibv_port_attr *port_attr)
  241. {
  242. int rc;
  243. rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr);
  244. if (rc) {
  245. rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno);
  246. return -EIO;
  247. }
  248. return 0;
  249. }
  250. void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq)
  251. {
  252. int polled;
  253. rdma_dev_res->stats.poll_cq_from_guest++;
  254. polled = rdma_poll_cq(rdma_dev_res, cq->ibcq);
  255. if (!polled) {
  256. rdma_dev_res->stats.poll_cq_from_guest_empty++;
  257. }
  258. }
  259. static GHashTable *ah_hash;
  260. static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd,
  261. uint8_t sgid_idx, union ibv_gid *dgid)
  262. {
  263. GBytes *ah_key = g_bytes_new(dgid, sizeof(*dgid));
  264. struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key);
  265. if (ah) {
  266. trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix),
  267. be64_to_cpu(dgid->global.interface_id));
  268. g_bytes_unref(ah_key);
  269. } else {
  270. struct ibv_ah_attr ah_attr = {
  271. .is_global = 1,
  272. .port_num = backend_dev->port_num,
  273. .grh.hop_limit = 1,
  274. };
  275. ah_attr.grh.dgid = *dgid;
  276. ah_attr.grh.sgid_index = sgid_idx;
  277. ah = ibv_create_ah(pd, &ah_attr);
  278. if (ah) {
  279. g_hash_table_insert(ah_hash, ah_key, ah);
  280. } else {
  281. g_bytes_unref(ah_key);
  282. rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">",
  283. be64_to_cpu(dgid->global.subnet_prefix),
  284. be64_to_cpu(dgid->global.interface_id));
  285. }
  286. trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix),
  287. be64_to_cpu(dgid->global.interface_id));
  288. }
  289. return ah;
  290. }
  291. static void destroy_ah_hash_key(gpointer data)
  292. {
  293. g_bytes_unref(data);
  294. }
  295. static void destroy_ah_hast_data(gpointer data)
  296. {
  297. struct ibv_ah *ah = data;
  298. ibv_destroy_ah(ah);
  299. }
  300. static void ah_cache_init(void)
  301. {
  302. ah_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal,
  303. destroy_ah_hash_key, destroy_ah_hast_data);
  304. }
  305. #ifdef LEGACY_RDMA_REG_MR
  306. static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
  307. struct ibv_sge *sge, uint8_t num_sge,
  308. uint64_t *total_length)
  309. {
  310. RdmaRmMR *mr;
  311. int idx;
  312. for (idx = 0; idx < num_sge; idx++) {
  313. mr = rdma_rm_get_mr(rdma_dev_res, sge[idx].lkey);
  314. if (unlikely(!mr)) {
  315. rdma_error_report("Invalid lkey 0x%x", sge[idx].lkey);
  316. return VENDOR_ERR_INVLKEY | sge[idx].lkey;
  317. }
  318. sge[idx].addr = (uintptr_t)mr->virt + sge[idx].addr - mr->start;
  319. sge[idx].lkey = rdma_backend_mr_lkey(&mr->backend_mr);
  320. *total_length += sge[idx].length;
  321. }
  322. return 0;
  323. }
  324. #else
  325. static inline int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
  326. struct ibv_sge *sge, uint8_t num_sge,
  327. uint64_t *total_length)
  328. {
  329. int idx;
  330. for (idx = 0; idx < num_sge; idx++) {
  331. *total_length += sge[idx].length;
  332. }
  333. return 0;
  334. }
  335. #endif
  336. static void trace_mad_message(const char *title, char *buf, int len)
  337. {
  338. int i;
  339. char *b = g_malloc0(len * 3 + 1);
  340. char b1[4];
  341. for (i = 0; i < len; i++) {
  342. sprintf(b1, "%.2X ", buf[i] & 0x000000FF);
  343. strcat(b, b1);
  344. }
  345. trace_rdma_mad_message(title, len, b);
  346. g_free(b);
  347. }
  348. static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx,
  349. union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge)
  350. {
  351. RdmaCmMuxMsg msg = {};
  352. char *hdr, *data;
  353. int ret;
  354. if (num_sge != 2) {
  355. return -EINVAL;
  356. }
  357. msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD;
  358. memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid));
  359. msg.umad_len = sge[0].length + sge[1].length;
  360. if (msg.umad_len > sizeof(msg.umad.mad)) {
  361. return -ENOMEM;
  362. }
  363. msg.umad.hdr.addr.qpn = htobe32(1);
  364. msg.umad.hdr.addr.grh_present = 1;
  365. msg.umad.hdr.addr.gid_index = sgid_idx;
  366. memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid));
  367. msg.umad.hdr.addr.hop_limit = 0xFF;
  368. hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length);
  369. if (!hdr) {
  370. return -ENOMEM;
  371. }
  372. data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length);
  373. if (!data) {
  374. rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
  375. return -ENOMEM;
  376. }
  377. memcpy(&msg.umad.mad[0], hdr, sge[0].length);
  378. memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length);
  379. rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length);
  380. rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
  381. trace_mad_message("send", msg.umad.mad, msg.umad_len);
  382. ret = rdmacm_mux_send(backend_dev, &msg);
  383. if (ret) {
  384. rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret);
  385. return -EIO;
  386. }
  387. return 0;
  388. }
  389. void rdma_backend_post_send(RdmaBackendDev *backend_dev,
  390. RdmaBackendQP *qp, uint8_t qp_type,
  391. struct ibv_sge *sge, uint32_t num_sge,
  392. uint8_t sgid_idx, union ibv_gid *sgid,
  393. union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey,
  394. void *ctx)
  395. {
  396. BackendCtx *bctx;
  397. uint32_t bctx_id;
  398. int rc;
  399. struct ibv_send_wr wr = {}, *bad_wr;
  400. if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */
  401. if (qp_type == IBV_QPT_SMI) {
  402. rdma_error_report("Got QP0 request");
  403. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
  404. } else if (qp_type == IBV_QPT_GSI) {
  405. rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge);
  406. if (rc) {
  407. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx);
  408. backend_dev->rdma_dev_res->stats.mad_tx_err++;
  409. } else {
  410. complete_work(IBV_WC_SUCCESS, 0, ctx);
  411. backend_dev->rdma_dev_res->stats.mad_tx++;
  412. }
  413. }
  414. return;
  415. }
  416. bctx = g_malloc0(sizeof(*bctx));
  417. bctx->up_ctx = ctx;
  418. bctx->backend_qp = qp;
  419. rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
  420. if (unlikely(rc)) {
  421. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
  422. goto err_free_bctx;
  423. }
  424. rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
  425. rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
  426. &backend_dev->rdma_dev_res->stats.tx_len);
  427. if (rc) {
  428. complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
  429. goto err_dealloc_cqe_ctx;
  430. }
  431. if (qp_type == IBV_QPT_UD) {
  432. wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid);
  433. if (!wr.wr.ud.ah) {
  434. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
  435. goto err_dealloc_cqe_ctx;
  436. }
  437. wr.wr.ud.remote_qpn = dqpn;
  438. wr.wr.ud.remote_qkey = dqkey;
  439. }
  440. wr.num_sge = num_sge;
  441. wr.opcode = IBV_WR_SEND;
  442. wr.send_flags = IBV_SEND_SIGNALED;
  443. wr.sg_list = sge;
  444. wr.wr_id = bctx_id;
  445. rc = ibv_post_send(qp->ibqp, &wr, &bad_wr);
  446. if (rc) {
  447. rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d",
  448. qp->ibqp->qp_num, rc, errno);
  449. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
  450. goto err_dealloc_cqe_ctx;
  451. }
  452. qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
  453. backend_dev->rdma_dev_res->stats.tx++;
  454. return;
  455. err_dealloc_cqe_ctx:
  456. backend_dev->rdma_dev_res->stats.tx_err++;
  457. rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
  458. err_free_bctx:
  459. g_free(bctx);
  460. }
  461. static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
  462. struct ibv_sge *sge, uint32_t num_sge,
  463. void *ctx)
  464. {
  465. BackendCtx *bctx;
  466. int rc;
  467. uint32_t bctx_id;
  468. if (num_sge != 1) {
  469. rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge);
  470. return VENDOR_ERR_INV_NUM_SGE;
  471. }
  472. if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) {
  473. rdma_error_report("Too small buffer for MAD");
  474. return VENDOR_ERR_INV_MAD_BUFF;
  475. }
  476. bctx = g_malloc0(sizeof(*bctx));
  477. rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
  478. if (unlikely(rc)) {
  479. g_free(bctx);
  480. return VENDOR_ERR_NOMEM;
  481. }
  482. bctx->up_ctx = ctx;
  483. bctx->sge = *sge;
  484. rdma_protected_gqueue_append_int64(&backend_dev->recv_mads_list, bctx_id);
  485. return 0;
  486. }
  487. void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
  488. RdmaBackendQP *qp, uint8_t qp_type,
  489. struct ibv_sge *sge, uint32_t num_sge, void *ctx)
  490. {
  491. BackendCtx *bctx;
  492. uint32_t bctx_id;
  493. int rc;
  494. struct ibv_recv_wr wr = {}, *bad_wr;
  495. if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */
  496. if (qp_type == IBV_QPT_SMI) {
  497. rdma_error_report("Got QP0 request");
  498. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
  499. }
  500. if (qp_type == IBV_QPT_GSI) {
  501. rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx);
  502. if (rc) {
  503. complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
  504. backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++;
  505. } else {
  506. backend_dev->rdma_dev_res->stats.mad_rx_bufs++;
  507. }
  508. }
  509. return;
  510. }
  511. bctx = g_malloc0(sizeof(*bctx));
  512. bctx->up_ctx = ctx;
  513. bctx->backend_qp = qp;
  514. rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
  515. if (unlikely(rc)) {
  516. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
  517. goto err_free_bctx;
  518. }
  519. rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
  520. rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
  521. &backend_dev->rdma_dev_res->stats.rx_bufs_len);
  522. if (rc) {
  523. complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
  524. goto err_dealloc_cqe_ctx;
  525. }
  526. wr.num_sge = num_sge;
  527. wr.sg_list = sge;
  528. wr.wr_id = bctx_id;
  529. rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr);
  530. if (rc) {
  531. rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d",
  532. qp->ibqp->qp_num, rc, errno);
  533. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
  534. goto err_dealloc_cqe_ctx;
  535. }
  536. qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
  537. backend_dev->rdma_dev_res->stats.rx_bufs++;
  538. return;
  539. err_dealloc_cqe_ctx:
  540. backend_dev->rdma_dev_res->stats.rx_bufs_err++;
  541. rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
  542. err_free_bctx:
  543. g_free(bctx);
  544. }
  545. void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
  546. RdmaBackendSRQ *srq, struct ibv_sge *sge,
  547. uint32_t num_sge, void *ctx)
  548. {
  549. BackendCtx *bctx;
  550. uint32_t bctx_id;
  551. int rc;
  552. struct ibv_recv_wr wr = {}, *bad_wr;
  553. bctx = g_malloc0(sizeof(*bctx));
  554. bctx->up_ctx = ctx;
  555. bctx->backend_srq = srq;
  556. rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
  557. if (unlikely(rc)) {
  558. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
  559. goto err_free_bctx;
  560. }
  561. rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id);
  562. rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
  563. &backend_dev->rdma_dev_res->stats.rx_bufs_len);
  564. if (rc) {
  565. complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
  566. goto err_dealloc_cqe_ctx;
  567. }
  568. wr.num_sge = num_sge;
  569. wr.sg_list = sge;
  570. wr.wr_id = bctx_id;
  571. rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr);
  572. if (rc) {
  573. rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d",
  574. srq->ibsrq->handle, rc, errno);
  575. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
  576. goto err_dealloc_cqe_ctx;
  577. }
  578. qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
  579. backend_dev->rdma_dev_res->stats.rx_bufs++;
  580. backend_dev->rdma_dev_res->stats.rx_srq++;
  581. return;
  582. err_dealloc_cqe_ctx:
  583. backend_dev->rdma_dev_res->stats.rx_bufs_err++;
  584. rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
  585. err_free_bctx:
  586. g_free(bctx);
  587. }
  588. int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd)
  589. {
  590. pd->ibpd = ibv_alloc_pd(backend_dev->context);
  591. if (!pd->ibpd) {
  592. rdma_error_report("ibv_alloc_pd fail, errno=%d", errno);
  593. return -EIO;
  594. }
  595. return 0;
  596. }
  597. void rdma_backend_destroy_pd(RdmaBackendPD *pd)
  598. {
  599. if (pd->ibpd) {
  600. ibv_dealloc_pd(pd->ibpd);
  601. }
  602. }
  603. int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr,
  604. size_t length, uint64_t guest_start, int access)
  605. {
  606. #ifdef LEGACY_RDMA_REG_MR
  607. mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access);
  608. #else
  609. mr->ibmr = ibv_reg_mr_iova(pd->ibpd, addr, length, guest_start, access);
  610. #endif
  611. if (!mr->ibmr) {
  612. rdma_error_report("ibv_reg_mr fail, errno=%d", errno);
  613. return -EIO;
  614. }
  615. mr->ibpd = pd->ibpd;
  616. return 0;
  617. }
  618. void rdma_backend_destroy_mr(RdmaBackendMR *mr)
  619. {
  620. if (mr->ibmr) {
  621. ibv_dereg_mr(mr->ibmr);
  622. }
  623. }
  624. int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq,
  625. int cqe)
  626. {
  627. int rc;
  628. cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL,
  629. backend_dev->channel, 0);
  630. if (!cq->ibcq) {
  631. rdma_error_report("ibv_create_cq fail, errno=%d", errno);
  632. return -EIO;
  633. }
  634. rc = ibv_req_notify_cq(cq->ibcq, 0);
  635. if (rc) {
  636. rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno);
  637. }
  638. cq->backend_dev = backend_dev;
  639. return 0;
  640. }
  641. void rdma_backend_destroy_cq(RdmaBackendCQ *cq)
  642. {
  643. if (cq->ibcq) {
  644. ibv_destroy_cq(cq->ibcq);
  645. }
  646. }
  647. int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
  648. RdmaBackendPD *pd, RdmaBackendCQ *scq,
  649. RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
  650. uint32_t max_send_wr, uint32_t max_recv_wr,
  651. uint32_t max_send_sge, uint32_t max_recv_sge)
  652. {
  653. struct ibv_qp_init_attr attr = {};
  654. qp->ibqp = 0;
  655. switch (qp_type) {
  656. case IBV_QPT_GSI:
  657. return 0;
  658. case IBV_QPT_RC:
  659. /* fall through */
  660. case IBV_QPT_UD:
  661. /* do nothing */
  662. break;
  663. default:
  664. rdma_error_report("Unsupported QP type %d", qp_type);
  665. return -EIO;
  666. }
  667. attr.qp_type = qp_type;
  668. attr.send_cq = scq->ibcq;
  669. attr.recv_cq = rcq->ibcq;
  670. attr.cap.max_send_wr = max_send_wr;
  671. attr.cap.max_recv_wr = max_recv_wr;
  672. attr.cap.max_send_sge = max_send_sge;
  673. attr.cap.max_recv_sge = max_recv_sge;
  674. if (srq) {
  675. attr.srq = srq->ibsrq;
  676. }
  677. qp->ibqp = ibv_create_qp(pd->ibpd, &attr);
  678. if (!qp->ibqp) {
  679. rdma_error_report("ibv_create_qp fail, errno=%d", errno);
  680. return -EIO;
  681. }
  682. rdma_protected_gslist_init(&qp->cqe_ctx_list);
  683. qp->ibpd = pd->ibpd;
  684. /* TODO: Query QP to get max_inline_data and save it to be used in send */
  685. return 0;
  686. }
  687. int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
  688. uint8_t qp_type, uint32_t qkey)
  689. {
  690. struct ibv_qp_attr attr = {};
  691. int rc, attr_mask;
  692. attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT;
  693. attr.qp_state = IBV_QPS_INIT;
  694. attr.pkey_index = 0;
  695. attr.port_num = backend_dev->port_num;
  696. switch (qp_type) {
  697. case IBV_QPT_RC:
  698. attr_mask |= IBV_QP_ACCESS_FLAGS;
  699. trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num);
  700. break;
  701. case IBV_QPT_UD:
  702. attr.qkey = qkey;
  703. attr_mask |= IBV_QP_QKEY;
  704. trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey);
  705. break;
  706. default:
  707. rdma_error_report("Unsupported QP type %d", qp_type);
  708. return -EIO;
  709. }
  710. rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
  711. if (rc) {
  712. rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
  713. return -EIO;
  714. }
  715. return 0;
  716. }
  717. int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
  718. uint8_t qp_type, uint8_t sgid_idx,
  719. union ibv_gid *dgid, uint32_t dqpn,
  720. uint32_t rq_psn, uint32_t qkey, bool use_qkey)
  721. {
  722. struct ibv_qp_attr attr = {};
  723. union ibv_gid ibv_gid = {
  724. .global.interface_id = dgid->global.interface_id,
  725. .global.subnet_prefix = dgid->global.subnet_prefix
  726. };
  727. int rc, attr_mask;
  728. attr.qp_state = IBV_QPS_RTR;
  729. attr_mask = IBV_QP_STATE;
  730. qp->sgid_idx = sgid_idx;
  731. switch (qp_type) {
  732. case IBV_QPT_RC:
  733. attr.path_mtu = IBV_MTU_1024;
  734. attr.dest_qp_num = dqpn;
  735. attr.max_dest_rd_atomic = 1;
  736. attr.min_rnr_timer = 12;
  737. attr.ah_attr.port_num = backend_dev->port_num;
  738. attr.ah_attr.is_global = 1;
  739. attr.ah_attr.grh.hop_limit = 1;
  740. attr.ah_attr.grh.dgid = ibv_gid;
  741. attr.ah_attr.grh.sgid_index = qp->sgid_idx;
  742. attr.rq_psn = rq_psn;
  743. attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN |
  744. IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC |
  745. IBV_QP_MIN_RNR_TIMER;
  746. trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num,
  747. be64_to_cpu(ibv_gid.global.
  748. subnet_prefix),
  749. be64_to_cpu(ibv_gid.global.
  750. interface_id),
  751. qp->sgid_idx, dqpn, rq_psn);
  752. break;
  753. case IBV_QPT_UD:
  754. if (use_qkey) {
  755. attr.qkey = qkey;
  756. attr_mask |= IBV_QP_QKEY;
  757. }
  758. trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey :
  759. 0);
  760. break;
  761. }
  762. rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
  763. if (rc) {
  764. rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
  765. return -EIO;
  766. }
  767. return 0;
  768. }
  769. int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type,
  770. uint32_t sq_psn, uint32_t qkey, bool use_qkey)
  771. {
  772. struct ibv_qp_attr attr = {};
  773. int rc, attr_mask;
  774. attr.qp_state = IBV_QPS_RTS;
  775. attr.sq_psn = sq_psn;
  776. attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN;
  777. switch (qp_type) {
  778. case IBV_QPT_RC:
  779. attr.timeout = 14;
  780. attr.retry_cnt = 7;
  781. attr.rnr_retry = 7;
  782. attr.max_rd_atomic = 1;
  783. attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY |
  784. IBV_QP_MAX_QP_RD_ATOMIC;
  785. trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn);
  786. break;
  787. case IBV_QPT_UD:
  788. if (use_qkey) {
  789. attr.qkey = qkey;
  790. attr_mask |= IBV_QP_QKEY;
  791. }
  792. trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn,
  793. use_qkey ? qkey : 0);
  794. break;
  795. }
  796. rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
  797. if (rc) {
  798. rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
  799. return -EIO;
  800. }
  801. return 0;
  802. }
  803. int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr,
  804. int attr_mask, struct ibv_qp_init_attr *init_attr)
  805. {
  806. if (!qp->ibqp) {
  807. attr->qp_state = IBV_QPS_RTS;
  808. return 0;
  809. }
  810. return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr);
  811. }
  812. void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res)
  813. {
  814. if (qp->ibqp) {
  815. ibv_destroy_qp(qp->ibqp);
  816. }
  817. g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res);
  818. rdma_protected_gslist_destroy(&qp->cqe_ctx_list);
  819. }
  820. int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd,
  821. uint32_t max_wr, uint32_t max_sge,
  822. uint32_t srq_limit)
  823. {
  824. struct ibv_srq_init_attr srq_init_attr = {};
  825. srq_init_attr.attr.max_wr = max_wr;
  826. srq_init_attr.attr.max_sge = max_sge;
  827. srq_init_attr.attr.srq_limit = srq_limit;
  828. srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr);
  829. if (!srq->ibsrq) {
  830. rdma_error_report("ibv_create_srq failed, errno=%d", errno);
  831. return -EIO;
  832. }
  833. rdma_protected_gslist_init(&srq->cqe_ctx_list);
  834. return 0;
  835. }
  836. int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr)
  837. {
  838. if (!srq->ibsrq) {
  839. return -EINVAL;
  840. }
  841. return ibv_query_srq(srq->ibsrq, srq_attr);
  842. }
  843. int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr,
  844. int srq_attr_mask)
  845. {
  846. if (!srq->ibsrq) {
  847. return -EINVAL;
  848. }
  849. return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask);
  850. }
  851. void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res)
  852. {
  853. if (srq->ibsrq) {
  854. ibv_destroy_srq(srq->ibsrq);
  855. }
  856. g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res);
  857. rdma_protected_gslist_destroy(&srq->cqe_ctx_list);
  858. }
  859. #define CHK_ATTR(req, dev, member, fmt) ({ \
  860. trace_rdma_check_dev_attr(#member, dev.member, req->member); \
  861. if (req->member > dev.member) { \
  862. rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \
  863. #member, req->member, dev.member); \
  864. req->member = dev.member; \
  865. } \
  866. })
  867. static int init_device_caps(RdmaBackendDev *backend_dev,
  868. struct ibv_device_attr *dev_attr)
  869. {
  870. struct ibv_device_attr bk_dev_attr;
  871. int rc;
  872. rc = ibv_query_device(backend_dev->context, &bk_dev_attr);
  873. if (rc) {
  874. rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno);
  875. return -EIO;
  876. }
  877. dev_attr->max_sge = MAX_SGE;
  878. dev_attr->max_srq_sge = MAX_SGE;
  879. CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64);
  880. CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d");
  881. CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d");
  882. CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d");
  883. CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d");
  884. CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d");
  885. CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d");
  886. CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d");
  887. CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d");
  888. CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d");
  889. return 0;
  890. }
  891. static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
  892. union ibv_gid *my_gid, int paylen)
  893. {
  894. grh->paylen = htons(paylen);
  895. grh->sgid = *sgid;
  896. grh->dgid = *my_gid;
  897. }
  898. static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
  899. RdmaCmMuxMsg *msg)
  900. {
  901. unsigned long cqe_ctx_id;
  902. BackendCtx *bctx;
  903. char *mad;
  904. trace_mad_message("recv", msg->umad.mad, msg->umad_len);
  905. cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->recv_mads_list);
  906. if (cqe_ctx_id == -ENOENT) {
  907. rdma_warn_report("No more free MADs buffers, waiting for a while");
  908. sleep(THR_POLL_TO);
  909. return;
  910. }
  911. bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
  912. if (unlikely(!bctx)) {
  913. rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
  914. backend_dev->rdma_dev_res->stats.mad_rx_err++;
  915. return;
  916. }
  917. mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr,
  918. bctx->sge.length);
  919. if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) {
  920. backend_dev->rdma_dev_res->stats.mad_rx_err++;
  921. complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF,
  922. bctx->up_ctx);
  923. } else {
  924. struct ibv_wc wc = {};
  925. memset(mad, 0, bctx->sge.length);
  926. build_mad_hdr((struct ibv_grh *)mad,
  927. (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid,
  928. msg->umad_len);
  929. memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len);
  930. rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length);
  931. wc.byte_len = msg->umad_len;
  932. wc.status = IBV_WC_SUCCESS;
  933. wc.wc_flags = IBV_WC_GRH;
  934. backend_dev->rdma_dev_res->stats.mad_rx++;
  935. comp_handler(bctx->up_ctx, &wc);
  936. }
  937. g_free(bctx);
  938. rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
  939. }
  940. static inline int rdmacm_mux_can_receive(void *opaque)
  941. {
  942. RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
  943. return rdmacm_mux_can_process_async(backend_dev);
  944. }
  945. static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size)
  946. {
  947. RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
  948. RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf;
  949. trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code);
  950. if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ &&
  951. msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) {
  952. rdma_error_report("Error: Not a MAD request, skipping");
  953. return;
  954. }
  955. process_incoming_mad_req(backend_dev, msg);
  956. }
  957. static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
  958. {
  959. int ret;
  960. backend_dev->rdmacm_mux.chr_be = mad_chr_be;
  961. ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be);
  962. if (!ret) {
  963. rdma_error_report("Missing chardev for MAD multiplexer");
  964. return -EIO;
  965. }
  966. rdma_protected_gqueue_init(&backend_dev->recv_mads_list);
  967. enable_rdmacm_mux_async(backend_dev);
  968. qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be,
  969. rdmacm_mux_can_receive, rdmacm_mux_read, NULL,
  970. NULL, backend_dev, NULL, true);
  971. return 0;
  972. }
  973. static void mad_stop(RdmaBackendDev *backend_dev)
  974. {
  975. clean_recv_mads(backend_dev);
  976. }
  977. static void mad_fini(RdmaBackendDev *backend_dev)
  978. {
  979. disable_rdmacm_mux_async(backend_dev);
  980. qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
  981. rdma_protected_gqueue_destroy(&backend_dev->recv_mads_list);
  982. }
  983. int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
  984. union ibv_gid *gid)
  985. {
  986. union ibv_gid sgid;
  987. int ret;
  988. int i = 0;
  989. do {
  990. ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i,
  991. &sgid);
  992. i++;
  993. } while (!ret && (memcmp(&sgid, gid, sizeof(*gid))));
  994. trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix),
  995. be64_to_cpu(gid->global.interface_id),
  996. i - 1);
  997. return ret ? ret : i - 1;
  998. }
  999. int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname,
  1000. union ibv_gid *gid)
  1001. {
  1002. RdmaCmMuxMsg msg = {};
  1003. int ret;
  1004. trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix),
  1005. be64_to_cpu(gid->global.interface_id));
  1006. msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG;
  1007. memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
  1008. ret = rdmacm_mux_send(backend_dev, &msg);
  1009. if (ret) {
  1010. rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret);
  1011. return -EIO;
  1012. }
  1013. qapi_event_send_rdma_gid_status_changed(ifname, true,
  1014. gid->global.subnet_prefix,
  1015. gid->global.interface_id);
  1016. return ret;
  1017. }
  1018. int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname,
  1019. union ibv_gid *gid)
  1020. {
  1021. RdmaCmMuxMsg msg = {};
  1022. int ret;
  1023. trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix),
  1024. be64_to_cpu(gid->global.interface_id));
  1025. msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG;
  1026. memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
  1027. ret = rdmacm_mux_send(backend_dev, &msg);
  1028. if (ret) {
  1029. rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)",
  1030. ret);
  1031. return -EIO;
  1032. }
  1033. qapi_event_send_rdma_gid_status_changed(ifname, false,
  1034. gid->global.subnet_prefix,
  1035. gid->global.interface_id);
  1036. return 0;
  1037. }
  1038. int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
  1039. RdmaDeviceResources *rdma_dev_res,
  1040. const char *backend_device_name, uint8_t port_num,
  1041. struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be)
  1042. {
  1043. int i;
  1044. int ret = 0;
  1045. int num_ibv_devices;
  1046. struct ibv_device **dev_list;
  1047. memset(backend_dev, 0, sizeof(*backend_dev));
  1048. backend_dev->dev = pdev;
  1049. backend_dev->port_num = port_num;
  1050. backend_dev->rdma_dev_res = rdma_dev_res;
  1051. rdma_backend_register_comp_handler(dummy_comp_handler);
  1052. dev_list = ibv_get_device_list(&num_ibv_devices);
  1053. if (!dev_list) {
  1054. rdma_error_report("Failed to get IB devices list");
  1055. return -EIO;
  1056. }
  1057. if (num_ibv_devices == 0) {
  1058. rdma_error_report("No IB devices were found");
  1059. ret = -ENXIO;
  1060. goto out_free_dev_list;
  1061. }
  1062. if (backend_device_name) {
  1063. for (i = 0; dev_list[i]; ++i) {
  1064. if (!strcmp(ibv_get_device_name(dev_list[i]),
  1065. backend_device_name)) {
  1066. break;
  1067. }
  1068. }
  1069. backend_dev->ib_dev = dev_list[i];
  1070. if (!backend_dev->ib_dev) {
  1071. rdma_error_report("Failed to find IB device %s",
  1072. backend_device_name);
  1073. ret = -EIO;
  1074. goto out_free_dev_list;
  1075. }
  1076. } else {
  1077. backend_dev->ib_dev = *dev_list;
  1078. }
  1079. rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name);
  1080. backend_dev->context = ibv_open_device(backend_dev->ib_dev);
  1081. if (!backend_dev->context) {
  1082. rdma_error_report("Failed to open IB device %s",
  1083. ibv_get_device_name(backend_dev->ib_dev));
  1084. ret = -EIO;
  1085. goto out;
  1086. }
  1087. backend_dev->channel = ibv_create_comp_channel(backend_dev->context);
  1088. if (!backend_dev->channel) {
  1089. rdma_error_report("Failed to create IB communication channel");
  1090. ret = -EIO;
  1091. goto out_close_device;
  1092. }
  1093. ret = init_device_caps(backend_dev, dev_attr);
  1094. if (ret) {
  1095. rdma_error_report("Failed to initialize device capabilities");
  1096. ret = -EIO;
  1097. goto out_destroy_comm_channel;
  1098. }
  1099. ret = mad_init(backend_dev, mad_chr_be);
  1100. if (ret) {
  1101. rdma_error_report("Failed to initialize mad");
  1102. ret = -EIO;
  1103. goto out_destroy_comm_channel;
  1104. }
  1105. backend_dev->comp_thread.run = false;
  1106. backend_dev->comp_thread.is_running = false;
  1107. ah_cache_init();
  1108. goto out_free_dev_list;
  1109. out_destroy_comm_channel:
  1110. ibv_destroy_comp_channel(backend_dev->channel);
  1111. out_close_device:
  1112. ibv_close_device(backend_dev->context);
  1113. out_free_dev_list:
  1114. ibv_free_device_list(dev_list);
  1115. out:
  1116. return ret;
  1117. }
  1118. void rdma_backend_start(RdmaBackendDev *backend_dev)
  1119. {
  1120. start_comp_thread(backend_dev);
  1121. }
  1122. void rdma_backend_stop(RdmaBackendDev *backend_dev)
  1123. {
  1124. mad_stop(backend_dev);
  1125. stop_backend_thread(&backend_dev->comp_thread);
  1126. }
  1127. void rdma_backend_fini(RdmaBackendDev *backend_dev)
  1128. {
  1129. mad_fini(backend_dev);
  1130. g_hash_table_destroy(ah_hash);
  1131. ibv_destroy_comp_channel(backend_dev->channel);
  1132. ibv_close_device(backend_dev->context);
  1133. }