2
0

vhost-user.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951
  1. /*
  2. * vhost-user
  3. *
  4. * Copyright (c) 2013 Virtual Open Systems Sarl.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. *
  9. */
  10. #include "qemu/osdep.h"
  11. #include "qapi/error.h"
  12. #include "hw/virtio/vhost.h"
  13. #include "hw/virtio/vhost-user.h"
  14. #include "hw/virtio/vhost-backend.h"
  15. #include "hw/virtio/virtio.h"
  16. #include "hw/virtio/virtio-net.h"
  17. #include "chardev/char-fe.h"
  18. #include "sysemu/kvm.h"
  19. #include "qemu/error-report.h"
  20. #include "qemu/main-loop.h"
  21. #include "qemu/sockets.h"
  22. #include "sysemu/cryptodev.h"
  23. #include "migration/migration.h"
  24. #include "migration/postcopy-ram.h"
  25. #include "trace.h"
  26. #include <sys/ioctl.h>
  27. #include <sys/socket.h>
  28. #include <sys/un.h>
  29. #include "standard-headers/linux/vhost_types.h"
  30. #ifdef CONFIG_LINUX
  31. #include <linux/userfaultfd.h>
  32. #endif
  33. #define VHOST_MEMORY_MAX_NREGIONS 8
  34. #define VHOST_USER_F_PROTOCOL_FEATURES 30
  35. #define VHOST_USER_SLAVE_MAX_FDS 8
  36. /*
  37. * Maximum size of virtio device config space
  38. */
  39. #define VHOST_USER_MAX_CONFIG_SIZE 256
  40. enum VhostUserProtocolFeature {
  41. VHOST_USER_PROTOCOL_F_MQ = 0,
  42. VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
  43. VHOST_USER_PROTOCOL_F_RARP = 2,
  44. VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
  45. VHOST_USER_PROTOCOL_F_NET_MTU = 4,
  46. VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
  47. VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
  48. VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
  49. VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
  50. VHOST_USER_PROTOCOL_F_CONFIG = 9,
  51. VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
  52. VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
  53. VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
  54. VHOST_USER_PROTOCOL_F_MAX
  55. };
  56. #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
  57. typedef enum VhostUserRequest {
  58. VHOST_USER_NONE = 0,
  59. VHOST_USER_GET_FEATURES = 1,
  60. VHOST_USER_SET_FEATURES = 2,
  61. VHOST_USER_SET_OWNER = 3,
  62. VHOST_USER_RESET_OWNER = 4,
  63. VHOST_USER_SET_MEM_TABLE = 5,
  64. VHOST_USER_SET_LOG_BASE = 6,
  65. VHOST_USER_SET_LOG_FD = 7,
  66. VHOST_USER_SET_VRING_NUM = 8,
  67. VHOST_USER_SET_VRING_ADDR = 9,
  68. VHOST_USER_SET_VRING_BASE = 10,
  69. VHOST_USER_GET_VRING_BASE = 11,
  70. VHOST_USER_SET_VRING_KICK = 12,
  71. VHOST_USER_SET_VRING_CALL = 13,
  72. VHOST_USER_SET_VRING_ERR = 14,
  73. VHOST_USER_GET_PROTOCOL_FEATURES = 15,
  74. VHOST_USER_SET_PROTOCOL_FEATURES = 16,
  75. VHOST_USER_GET_QUEUE_NUM = 17,
  76. VHOST_USER_SET_VRING_ENABLE = 18,
  77. VHOST_USER_SEND_RARP = 19,
  78. VHOST_USER_NET_SET_MTU = 20,
  79. VHOST_USER_SET_SLAVE_REQ_FD = 21,
  80. VHOST_USER_IOTLB_MSG = 22,
  81. VHOST_USER_SET_VRING_ENDIAN = 23,
  82. VHOST_USER_GET_CONFIG = 24,
  83. VHOST_USER_SET_CONFIG = 25,
  84. VHOST_USER_CREATE_CRYPTO_SESSION = 26,
  85. VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
  86. VHOST_USER_POSTCOPY_ADVISE = 28,
  87. VHOST_USER_POSTCOPY_LISTEN = 29,
  88. VHOST_USER_POSTCOPY_END = 30,
  89. VHOST_USER_GET_INFLIGHT_FD = 31,
  90. VHOST_USER_SET_INFLIGHT_FD = 32,
  91. VHOST_USER_GPU_SET_SOCKET = 33,
  92. VHOST_USER_MAX
  93. } VhostUserRequest;
  94. typedef enum VhostUserSlaveRequest {
  95. VHOST_USER_SLAVE_NONE = 0,
  96. VHOST_USER_SLAVE_IOTLB_MSG = 1,
  97. VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
  98. VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
  99. VHOST_USER_SLAVE_MAX
  100. } VhostUserSlaveRequest;
  101. typedef struct VhostUserMemoryRegion {
  102. uint64_t guest_phys_addr;
  103. uint64_t memory_size;
  104. uint64_t userspace_addr;
  105. uint64_t mmap_offset;
  106. } VhostUserMemoryRegion;
  107. typedef struct VhostUserMemory {
  108. uint32_t nregions;
  109. uint32_t padding;
  110. VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
  111. } VhostUserMemory;
  112. typedef struct VhostUserLog {
  113. uint64_t mmap_size;
  114. uint64_t mmap_offset;
  115. } VhostUserLog;
  116. typedef struct VhostUserConfig {
  117. uint32_t offset;
  118. uint32_t size;
  119. uint32_t flags;
  120. uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
  121. } VhostUserConfig;
  122. #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
  123. #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
  124. typedef struct VhostUserCryptoSession {
  125. /* session id for success, -1 on errors */
  126. int64_t session_id;
  127. CryptoDevBackendSymSessionInfo session_setup_data;
  128. uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
  129. uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
  130. } VhostUserCryptoSession;
  131. static VhostUserConfig c __attribute__ ((unused));
  132. #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
  133. + sizeof(c.size) \
  134. + sizeof(c.flags))
  135. typedef struct VhostUserVringArea {
  136. uint64_t u64;
  137. uint64_t size;
  138. uint64_t offset;
  139. } VhostUserVringArea;
  140. typedef struct VhostUserInflight {
  141. uint64_t mmap_size;
  142. uint64_t mmap_offset;
  143. uint16_t num_queues;
  144. uint16_t queue_size;
  145. } VhostUserInflight;
  146. typedef struct {
  147. VhostUserRequest request;
  148. #define VHOST_USER_VERSION_MASK (0x3)
  149. #define VHOST_USER_REPLY_MASK (0x1<<2)
  150. #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
  151. uint32_t flags;
  152. uint32_t size; /* the following payload size */
  153. } QEMU_PACKED VhostUserHeader;
  154. typedef union {
  155. #define VHOST_USER_VRING_IDX_MASK (0xff)
  156. #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
  157. uint64_t u64;
  158. struct vhost_vring_state state;
  159. struct vhost_vring_addr addr;
  160. VhostUserMemory memory;
  161. VhostUserLog log;
  162. struct vhost_iotlb_msg iotlb;
  163. VhostUserConfig config;
  164. VhostUserCryptoSession session;
  165. VhostUserVringArea area;
  166. VhostUserInflight inflight;
  167. } VhostUserPayload;
  168. typedef struct VhostUserMsg {
  169. VhostUserHeader hdr;
  170. VhostUserPayload payload;
  171. } QEMU_PACKED VhostUserMsg;
  172. static VhostUserMsg m __attribute__ ((unused));
  173. #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
  174. #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
  175. /* The version of the protocol we support */
  176. #define VHOST_USER_VERSION (0x1)
  177. struct vhost_user {
  178. struct vhost_dev *dev;
  179. /* Shared between vhost devs of the same virtio device */
  180. VhostUserState *user;
  181. int slave_fd;
  182. NotifierWithReturn postcopy_notifier;
  183. struct PostCopyFD postcopy_fd;
  184. uint64_t postcopy_client_bases[VHOST_MEMORY_MAX_NREGIONS];
  185. /* Length of the region_rb and region_rb_offset arrays */
  186. size_t region_rb_len;
  187. /* RAMBlock associated with a given region */
  188. RAMBlock **region_rb;
  189. /* The offset from the start of the RAMBlock to the start of the
  190. * vhost region.
  191. */
  192. ram_addr_t *region_rb_offset;
  193. /* True once we've entered postcopy_listen */
  194. bool postcopy_listen;
  195. };
  196. static bool ioeventfd_enabled(void)
  197. {
  198. return !kvm_enabled() || kvm_eventfds_enabled();
  199. }
  200. static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
  201. {
  202. struct vhost_user *u = dev->opaque;
  203. CharBackend *chr = u->user->chr;
  204. uint8_t *p = (uint8_t *) msg;
  205. int r, size = VHOST_USER_HDR_SIZE;
  206. r = qemu_chr_fe_read_all(chr, p, size);
  207. if (r != size) {
  208. error_report("Failed to read msg header. Read %d instead of %d."
  209. " Original request %d.", r, size, msg->hdr.request);
  210. return -1;
  211. }
  212. /* validate received flags */
  213. if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
  214. error_report("Failed to read msg header."
  215. " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
  216. VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
  217. return -1;
  218. }
  219. return 0;
  220. }
  221. static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
  222. {
  223. struct vhost_user *u = dev->opaque;
  224. CharBackend *chr = u->user->chr;
  225. uint8_t *p = (uint8_t *) msg;
  226. int r, size;
  227. if (vhost_user_read_header(dev, msg) < 0) {
  228. return -1;
  229. }
  230. /* validate message size is sane */
  231. if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
  232. error_report("Failed to read msg header."
  233. " Size %d exceeds the maximum %zu.", msg->hdr.size,
  234. VHOST_USER_PAYLOAD_SIZE);
  235. return -1;
  236. }
  237. if (msg->hdr.size) {
  238. p += VHOST_USER_HDR_SIZE;
  239. size = msg->hdr.size;
  240. r = qemu_chr_fe_read_all(chr, p, size);
  241. if (r != size) {
  242. error_report("Failed to read msg payload."
  243. " Read %d instead of %d.", r, msg->hdr.size);
  244. return -1;
  245. }
  246. }
  247. return 0;
  248. }
  249. static int process_message_reply(struct vhost_dev *dev,
  250. const VhostUserMsg *msg)
  251. {
  252. VhostUserMsg msg_reply;
  253. if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
  254. return 0;
  255. }
  256. if (vhost_user_read(dev, &msg_reply) < 0) {
  257. return -1;
  258. }
  259. if (msg_reply.hdr.request != msg->hdr.request) {
  260. error_report("Received unexpected msg type."
  261. "Expected %d received %d",
  262. msg->hdr.request, msg_reply.hdr.request);
  263. return -1;
  264. }
  265. return msg_reply.payload.u64 ? -1 : 0;
  266. }
  267. static bool vhost_user_one_time_request(VhostUserRequest request)
  268. {
  269. switch (request) {
  270. case VHOST_USER_SET_OWNER:
  271. case VHOST_USER_RESET_OWNER:
  272. case VHOST_USER_SET_MEM_TABLE:
  273. case VHOST_USER_GET_QUEUE_NUM:
  274. case VHOST_USER_NET_SET_MTU:
  275. return true;
  276. default:
  277. return false;
  278. }
  279. }
  280. /* most non-init callers ignore the error */
  281. static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
  282. int *fds, int fd_num)
  283. {
  284. struct vhost_user *u = dev->opaque;
  285. CharBackend *chr = u->user->chr;
  286. int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
  287. /*
  288. * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
  289. * we just need send it once in the first time. For later such
  290. * request, we just ignore it.
  291. */
  292. if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
  293. msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
  294. return 0;
  295. }
  296. if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
  297. error_report("Failed to set msg fds.");
  298. return -1;
  299. }
  300. ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
  301. if (ret != size) {
  302. error_report("Failed to write msg."
  303. " Wrote %d instead of %d.", ret, size);
  304. return -1;
  305. }
  306. return 0;
  307. }
  308. int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
  309. {
  310. VhostUserMsg msg = {
  311. .hdr.request = VHOST_USER_GPU_SET_SOCKET,
  312. .hdr.flags = VHOST_USER_VERSION,
  313. };
  314. return vhost_user_write(dev, &msg, &fd, 1);
  315. }
  316. static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
  317. struct vhost_log *log)
  318. {
  319. int fds[VHOST_MEMORY_MAX_NREGIONS];
  320. size_t fd_num = 0;
  321. bool shmfd = virtio_has_feature(dev->protocol_features,
  322. VHOST_USER_PROTOCOL_F_LOG_SHMFD);
  323. VhostUserMsg msg = {
  324. .hdr.request = VHOST_USER_SET_LOG_BASE,
  325. .hdr.flags = VHOST_USER_VERSION,
  326. .payload.log.mmap_size = log->size * sizeof(*(log->log)),
  327. .payload.log.mmap_offset = 0,
  328. .hdr.size = sizeof(msg.payload.log),
  329. };
  330. if (shmfd && log->fd != -1) {
  331. fds[fd_num++] = log->fd;
  332. }
  333. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  334. return -1;
  335. }
  336. if (shmfd) {
  337. msg.hdr.size = 0;
  338. if (vhost_user_read(dev, &msg) < 0) {
  339. return -1;
  340. }
  341. if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
  342. error_report("Received unexpected msg type. "
  343. "Expected %d received %d",
  344. VHOST_USER_SET_LOG_BASE, msg.hdr.request);
  345. return -1;
  346. }
  347. }
  348. return 0;
  349. }
  350. static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
  351. struct vhost_memory *mem)
  352. {
  353. struct vhost_user *u = dev->opaque;
  354. int fds[VHOST_MEMORY_MAX_NREGIONS];
  355. int i, fd;
  356. size_t fd_num = 0;
  357. VhostUserMsg msg_reply;
  358. int region_i, msg_i;
  359. VhostUserMsg msg = {
  360. .hdr.request = VHOST_USER_SET_MEM_TABLE,
  361. .hdr.flags = VHOST_USER_VERSION,
  362. };
  363. if (u->region_rb_len < dev->mem->nregions) {
  364. u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
  365. u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
  366. dev->mem->nregions);
  367. memset(&(u->region_rb[u->region_rb_len]), '\0',
  368. sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
  369. memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
  370. sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
  371. u->region_rb_len = dev->mem->nregions;
  372. }
  373. for (i = 0; i < dev->mem->nregions; ++i) {
  374. struct vhost_memory_region *reg = dev->mem->regions + i;
  375. ram_addr_t offset;
  376. MemoryRegion *mr;
  377. assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
  378. mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
  379. &offset);
  380. fd = memory_region_get_fd(mr);
  381. if (fd > 0) {
  382. trace_vhost_user_set_mem_table_withfd(fd_num, mr->name,
  383. reg->memory_size,
  384. reg->guest_phys_addr,
  385. reg->userspace_addr, offset);
  386. u->region_rb_offset[i] = offset;
  387. u->region_rb[i] = mr->ram_block;
  388. msg.payload.memory.regions[fd_num].userspace_addr =
  389. reg->userspace_addr;
  390. msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
  391. msg.payload.memory.regions[fd_num].guest_phys_addr =
  392. reg->guest_phys_addr;
  393. msg.payload.memory.regions[fd_num].mmap_offset = offset;
  394. assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
  395. fds[fd_num++] = fd;
  396. } else {
  397. u->region_rb_offset[i] = 0;
  398. u->region_rb[i] = NULL;
  399. }
  400. }
  401. msg.payload.memory.nregions = fd_num;
  402. if (!fd_num) {
  403. error_report("Failed initializing vhost-user memory map, "
  404. "consider using -object memory-backend-file share=on");
  405. return -1;
  406. }
  407. msg.hdr.size = sizeof(msg.payload.memory.nregions);
  408. msg.hdr.size += sizeof(msg.payload.memory.padding);
  409. msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
  410. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  411. return -1;
  412. }
  413. if (vhost_user_read(dev, &msg_reply) < 0) {
  414. return -1;
  415. }
  416. if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
  417. error_report("%s: Received unexpected msg type."
  418. "Expected %d received %d", __func__,
  419. VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
  420. return -1;
  421. }
  422. /* We're using the same structure, just reusing one of the
  423. * fields, so it should be the same size.
  424. */
  425. if (msg_reply.hdr.size != msg.hdr.size) {
  426. error_report("%s: Unexpected size for postcopy reply "
  427. "%d vs %d", __func__, msg_reply.hdr.size, msg.hdr.size);
  428. return -1;
  429. }
  430. memset(u->postcopy_client_bases, 0,
  431. sizeof(uint64_t) * VHOST_MEMORY_MAX_NREGIONS);
  432. /* They're in the same order as the regions that were sent
  433. * but some of the regions were skipped (above) if they
  434. * didn't have fd's
  435. */
  436. for (msg_i = 0, region_i = 0;
  437. region_i < dev->mem->nregions;
  438. region_i++) {
  439. if (msg_i < fd_num &&
  440. msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
  441. dev->mem->regions[region_i].guest_phys_addr) {
  442. u->postcopy_client_bases[region_i] =
  443. msg_reply.payload.memory.regions[msg_i].userspace_addr;
  444. trace_vhost_user_set_mem_table_postcopy(
  445. msg_reply.payload.memory.regions[msg_i].userspace_addr,
  446. msg.payload.memory.regions[msg_i].userspace_addr,
  447. msg_i, region_i);
  448. msg_i++;
  449. }
  450. }
  451. if (msg_i != fd_num) {
  452. error_report("%s: postcopy reply not fully consumed "
  453. "%d vs %zd",
  454. __func__, msg_i, fd_num);
  455. return -1;
  456. }
  457. /* Now we've registered this with the postcopy code, we ack to the client,
  458. * because now we're in the position to be able to deal with any faults
  459. * it generates.
  460. */
  461. /* TODO: Use this for failure cases as well with a bad value */
  462. msg.hdr.size = sizeof(msg.payload.u64);
  463. msg.payload.u64 = 0; /* OK */
  464. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  465. return -1;
  466. }
  467. return 0;
  468. }
  469. static int vhost_user_set_mem_table(struct vhost_dev *dev,
  470. struct vhost_memory *mem)
  471. {
  472. struct vhost_user *u = dev->opaque;
  473. int fds[VHOST_MEMORY_MAX_NREGIONS];
  474. int i, fd;
  475. size_t fd_num = 0;
  476. bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
  477. bool reply_supported = virtio_has_feature(dev->protocol_features,
  478. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  479. if (do_postcopy) {
  480. /* Postcopy has enough differences that it's best done in it's own
  481. * version
  482. */
  483. return vhost_user_set_mem_table_postcopy(dev, mem);
  484. }
  485. VhostUserMsg msg = {
  486. .hdr.request = VHOST_USER_SET_MEM_TABLE,
  487. .hdr.flags = VHOST_USER_VERSION,
  488. };
  489. if (reply_supported) {
  490. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  491. }
  492. for (i = 0; i < dev->mem->nregions; ++i) {
  493. struct vhost_memory_region *reg = dev->mem->regions + i;
  494. ram_addr_t offset;
  495. MemoryRegion *mr;
  496. assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
  497. mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
  498. &offset);
  499. fd = memory_region_get_fd(mr);
  500. if (fd > 0) {
  501. if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
  502. error_report("Failed preparing vhost-user memory table msg");
  503. return -1;
  504. }
  505. msg.payload.memory.regions[fd_num].userspace_addr =
  506. reg->userspace_addr;
  507. msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
  508. msg.payload.memory.regions[fd_num].guest_phys_addr =
  509. reg->guest_phys_addr;
  510. msg.payload.memory.regions[fd_num].mmap_offset = offset;
  511. fds[fd_num++] = fd;
  512. }
  513. }
  514. msg.payload.memory.nregions = fd_num;
  515. if (!fd_num) {
  516. error_report("Failed initializing vhost-user memory map, "
  517. "consider using -object memory-backend-file share=on");
  518. return -1;
  519. }
  520. msg.hdr.size = sizeof(msg.payload.memory.nregions);
  521. msg.hdr.size += sizeof(msg.payload.memory.padding);
  522. msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
  523. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  524. return -1;
  525. }
  526. if (reply_supported) {
  527. return process_message_reply(dev, &msg);
  528. }
  529. return 0;
  530. }
  531. static int vhost_user_set_vring_addr(struct vhost_dev *dev,
  532. struct vhost_vring_addr *addr)
  533. {
  534. VhostUserMsg msg = {
  535. .hdr.request = VHOST_USER_SET_VRING_ADDR,
  536. .hdr.flags = VHOST_USER_VERSION,
  537. .payload.addr = *addr,
  538. .hdr.size = sizeof(msg.payload.addr),
  539. };
  540. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  541. return -1;
  542. }
  543. return 0;
  544. }
  545. static int vhost_user_set_vring_endian(struct vhost_dev *dev,
  546. struct vhost_vring_state *ring)
  547. {
  548. bool cross_endian = virtio_has_feature(dev->protocol_features,
  549. VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
  550. VhostUserMsg msg = {
  551. .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
  552. .hdr.flags = VHOST_USER_VERSION,
  553. .payload.state = *ring,
  554. .hdr.size = sizeof(msg.payload.state),
  555. };
  556. if (!cross_endian) {
  557. error_report("vhost-user trying to send unhandled ioctl");
  558. return -1;
  559. }
  560. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  561. return -1;
  562. }
  563. return 0;
  564. }
  565. static int vhost_set_vring(struct vhost_dev *dev,
  566. unsigned long int request,
  567. struct vhost_vring_state *ring)
  568. {
  569. VhostUserMsg msg = {
  570. .hdr.request = request,
  571. .hdr.flags = VHOST_USER_VERSION,
  572. .payload.state = *ring,
  573. .hdr.size = sizeof(msg.payload.state),
  574. };
  575. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  576. return -1;
  577. }
  578. return 0;
  579. }
  580. static int vhost_user_set_vring_num(struct vhost_dev *dev,
  581. struct vhost_vring_state *ring)
  582. {
  583. return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
  584. }
  585. static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
  586. int queue_idx)
  587. {
  588. struct vhost_user *u = dev->opaque;
  589. VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
  590. VirtIODevice *vdev = dev->vdev;
  591. if (n->addr && !n->set) {
  592. virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
  593. n->set = true;
  594. }
  595. }
  596. static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
  597. int queue_idx)
  598. {
  599. struct vhost_user *u = dev->opaque;
  600. VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
  601. VirtIODevice *vdev = dev->vdev;
  602. if (n->addr && n->set) {
  603. virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
  604. n->set = false;
  605. }
  606. }
  607. static int vhost_user_set_vring_base(struct vhost_dev *dev,
  608. struct vhost_vring_state *ring)
  609. {
  610. vhost_user_host_notifier_restore(dev, ring->index);
  611. return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
  612. }
  613. static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
  614. {
  615. int i;
  616. if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
  617. return -1;
  618. }
  619. for (i = 0; i < dev->nvqs; ++i) {
  620. struct vhost_vring_state state = {
  621. .index = dev->vq_index + i,
  622. .num = enable,
  623. };
  624. vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
  625. }
  626. return 0;
  627. }
  628. static int vhost_user_get_vring_base(struct vhost_dev *dev,
  629. struct vhost_vring_state *ring)
  630. {
  631. VhostUserMsg msg = {
  632. .hdr.request = VHOST_USER_GET_VRING_BASE,
  633. .hdr.flags = VHOST_USER_VERSION,
  634. .payload.state = *ring,
  635. .hdr.size = sizeof(msg.payload.state),
  636. };
  637. vhost_user_host_notifier_remove(dev, ring->index);
  638. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  639. return -1;
  640. }
  641. if (vhost_user_read(dev, &msg) < 0) {
  642. return -1;
  643. }
  644. if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
  645. error_report("Received unexpected msg type. Expected %d received %d",
  646. VHOST_USER_GET_VRING_BASE, msg.hdr.request);
  647. return -1;
  648. }
  649. if (msg.hdr.size != sizeof(msg.payload.state)) {
  650. error_report("Received bad msg size.");
  651. return -1;
  652. }
  653. *ring = msg.payload.state;
  654. return 0;
  655. }
  656. static int vhost_set_vring_file(struct vhost_dev *dev,
  657. VhostUserRequest request,
  658. struct vhost_vring_file *file)
  659. {
  660. int fds[VHOST_MEMORY_MAX_NREGIONS];
  661. size_t fd_num = 0;
  662. VhostUserMsg msg = {
  663. .hdr.request = request,
  664. .hdr.flags = VHOST_USER_VERSION,
  665. .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
  666. .hdr.size = sizeof(msg.payload.u64),
  667. };
  668. if (ioeventfd_enabled() && file->fd > 0) {
  669. fds[fd_num++] = file->fd;
  670. } else {
  671. msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
  672. }
  673. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  674. return -1;
  675. }
  676. return 0;
  677. }
  678. static int vhost_user_set_vring_kick(struct vhost_dev *dev,
  679. struct vhost_vring_file *file)
  680. {
  681. return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
  682. }
  683. static int vhost_user_set_vring_call(struct vhost_dev *dev,
  684. struct vhost_vring_file *file)
  685. {
  686. return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
  687. }
  688. static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
  689. {
  690. VhostUserMsg msg = {
  691. .hdr.request = request,
  692. .hdr.flags = VHOST_USER_VERSION,
  693. .payload.u64 = u64,
  694. .hdr.size = sizeof(msg.payload.u64),
  695. };
  696. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  697. return -1;
  698. }
  699. return 0;
  700. }
  701. static int vhost_user_set_features(struct vhost_dev *dev,
  702. uint64_t features)
  703. {
  704. return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
  705. }
  706. static int vhost_user_set_protocol_features(struct vhost_dev *dev,
  707. uint64_t features)
  708. {
  709. return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
  710. }
  711. static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
  712. {
  713. VhostUserMsg msg = {
  714. .hdr.request = request,
  715. .hdr.flags = VHOST_USER_VERSION,
  716. };
  717. if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
  718. return 0;
  719. }
  720. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  721. return -1;
  722. }
  723. if (vhost_user_read(dev, &msg) < 0) {
  724. return -1;
  725. }
  726. if (msg.hdr.request != request) {
  727. error_report("Received unexpected msg type. Expected %d received %d",
  728. request, msg.hdr.request);
  729. return -1;
  730. }
  731. if (msg.hdr.size != sizeof(msg.payload.u64)) {
  732. error_report("Received bad msg size.");
  733. return -1;
  734. }
  735. *u64 = msg.payload.u64;
  736. return 0;
  737. }
  738. static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
  739. {
  740. return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
  741. }
  742. static int vhost_user_set_owner(struct vhost_dev *dev)
  743. {
  744. VhostUserMsg msg = {
  745. .hdr.request = VHOST_USER_SET_OWNER,
  746. .hdr.flags = VHOST_USER_VERSION,
  747. };
  748. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  749. return -1;
  750. }
  751. return 0;
  752. }
  753. static int vhost_user_reset_device(struct vhost_dev *dev)
  754. {
  755. VhostUserMsg msg = {
  756. .hdr.request = VHOST_USER_RESET_OWNER,
  757. .hdr.flags = VHOST_USER_VERSION,
  758. };
  759. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  760. return -1;
  761. }
  762. return 0;
  763. }
  764. static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
  765. {
  766. int ret = -1;
  767. if (!dev->config_ops) {
  768. return -1;
  769. }
  770. if (dev->config_ops->vhost_dev_config_notifier) {
  771. ret = dev->config_ops->vhost_dev_config_notifier(dev);
  772. }
  773. return ret;
  774. }
  775. static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
  776. VhostUserVringArea *area,
  777. int fd)
  778. {
  779. int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
  780. size_t page_size = qemu_real_host_page_size;
  781. struct vhost_user *u = dev->opaque;
  782. VhostUserState *user = u->user;
  783. VirtIODevice *vdev = dev->vdev;
  784. VhostUserHostNotifier *n;
  785. void *addr;
  786. char *name;
  787. if (!virtio_has_feature(dev->protocol_features,
  788. VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
  789. vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
  790. return -1;
  791. }
  792. n = &user->notifier[queue_idx];
  793. if (n->addr) {
  794. virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
  795. object_unparent(OBJECT(&n->mr));
  796. munmap(n->addr, page_size);
  797. n->addr = NULL;
  798. }
  799. if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
  800. return 0;
  801. }
  802. /* Sanity check. */
  803. if (area->size != page_size) {
  804. return -1;
  805. }
  806. addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
  807. fd, area->offset);
  808. if (addr == MAP_FAILED) {
  809. return -1;
  810. }
  811. name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
  812. user, queue_idx);
  813. memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
  814. page_size, addr);
  815. g_free(name);
  816. if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
  817. munmap(addr, page_size);
  818. return -1;
  819. }
  820. n->addr = addr;
  821. n->set = true;
  822. return 0;
  823. }
  824. static void slave_read(void *opaque)
  825. {
  826. struct vhost_dev *dev = opaque;
  827. struct vhost_user *u = dev->opaque;
  828. VhostUserHeader hdr = { 0, };
  829. VhostUserPayload payload = { 0, };
  830. int size, ret = 0;
  831. struct iovec iov;
  832. struct msghdr msgh;
  833. int fd[VHOST_USER_SLAVE_MAX_FDS];
  834. char control[CMSG_SPACE(sizeof(fd))];
  835. struct cmsghdr *cmsg;
  836. int i, fdsize = 0;
  837. memset(&msgh, 0, sizeof(msgh));
  838. msgh.msg_iov = &iov;
  839. msgh.msg_iovlen = 1;
  840. msgh.msg_control = control;
  841. msgh.msg_controllen = sizeof(control);
  842. memset(fd, -1, sizeof(fd));
  843. /* Read header */
  844. iov.iov_base = &hdr;
  845. iov.iov_len = VHOST_USER_HDR_SIZE;
  846. do {
  847. size = recvmsg(u->slave_fd, &msgh, 0);
  848. } while (size < 0 && (errno == EINTR || errno == EAGAIN));
  849. if (size != VHOST_USER_HDR_SIZE) {
  850. error_report("Failed to read from slave.");
  851. goto err;
  852. }
  853. if (msgh.msg_flags & MSG_CTRUNC) {
  854. error_report("Truncated message.");
  855. goto err;
  856. }
  857. for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
  858. cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
  859. if (cmsg->cmsg_level == SOL_SOCKET &&
  860. cmsg->cmsg_type == SCM_RIGHTS) {
  861. fdsize = cmsg->cmsg_len - CMSG_LEN(0);
  862. memcpy(fd, CMSG_DATA(cmsg), fdsize);
  863. break;
  864. }
  865. }
  866. if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
  867. error_report("Failed to read msg header."
  868. " Size %d exceeds the maximum %zu.", hdr.size,
  869. VHOST_USER_PAYLOAD_SIZE);
  870. goto err;
  871. }
  872. /* Read payload */
  873. do {
  874. size = read(u->slave_fd, &payload, hdr.size);
  875. } while (size < 0 && (errno == EINTR || errno == EAGAIN));
  876. if (size != hdr.size) {
  877. error_report("Failed to read payload from slave.");
  878. goto err;
  879. }
  880. switch (hdr.request) {
  881. case VHOST_USER_SLAVE_IOTLB_MSG:
  882. ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
  883. break;
  884. case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
  885. ret = vhost_user_slave_handle_config_change(dev);
  886. break;
  887. case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
  888. ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
  889. fd[0]);
  890. break;
  891. default:
  892. error_report("Received unexpected msg type.");
  893. ret = -EINVAL;
  894. }
  895. /* Close the remaining file descriptors. */
  896. for (i = 0; i < fdsize; i++) {
  897. if (fd[i] != -1) {
  898. close(fd[i]);
  899. }
  900. }
  901. /*
  902. * REPLY_ACK feature handling. Other reply types has to be managed
  903. * directly in their request handlers.
  904. */
  905. if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
  906. struct iovec iovec[2];
  907. hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
  908. hdr.flags |= VHOST_USER_REPLY_MASK;
  909. payload.u64 = !!ret;
  910. hdr.size = sizeof(payload.u64);
  911. iovec[0].iov_base = &hdr;
  912. iovec[0].iov_len = VHOST_USER_HDR_SIZE;
  913. iovec[1].iov_base = &payload;
  914. iovec[1].iov_len = hdr.size;
  915. do {
  916. size = writev(u->slave_fd, iovec, ARRAY_SIZE(iovec));
  917. } while (size < 0 && (errno == EINTR || errno == EAGAIN));
  918. if (size != VHOST_USER_HDR_SIZE + hdr.size) {
  919. error_report("Failed to send msg reply to slave.");
  920. goto err;
  921. }
  922. }
  923. return;
  924. err:
  925. qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
  926. close(u->slave_fd);
  927. u->slave_fd = -1;
  928. for (i = 0; i < fdsize; i++) {
  929. if (fd[i] != -1) {
  930. close(fd[i]);
  931. }
  932. }
  933. return;
  934. }
  935. static int vhost_setup_slave_channel(struct vhost_dev *dev)
  936. {
  937. VhostUserMsg msg = {
  938. .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
  939. .hdr.flags = VHOST_USER_VERSION,
  940. };
  941. struct vhost_user *u = dev->opaque;
  942. int sv[2], ret = 0;
  943. bool reply_supported = virtio_has_feature(dev->protocol_features,
  944. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  945. if (!virtio_has_feature(dev->protocol_features,
  946. VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
  947. return 0;
  948. }
  949. if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
  950. error_report("socketpair() failed");
  951. return -1;
  952. }
  953. u->slave_fd = sv[0];
  954. qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev);
  955. if (reply_supported) {
  956. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  957. }
  958. ret = vhost_user_write(dev, &msg, &sv[1], 1);
  959. if (ret) {
  960. goto out;
  961. }
  962. if (reply_supported) {
  963. ret = process_message_reply(dev, &msg);
  964. }
  965. out:
  966. close(sv[1]);
  967. if (ret) {
  968. qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
  969. close(u->slave_fd);
  970. u->slave_fd = -1;
  971. }
  972. return ret;
  973. }
  974. #ifdef CONFIG_LINUX
  975. /*
  976. * Called back from the postcopy fault thread when a fault is received on our
  977. * ufd.
  978. * TODO: This is Linux specific
  979. */
  980. static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
  981. void *ufd)
  982. {
  983. struct vhost_dev *dev = pcfd->data;
  984. struct vhost_user *u = dev->opaque;
  985. struct uffd_msg *msg = ufd;
  986. uint64_t faultaddr = msg->arg.pagefault.address;
  987. RAMBlock *rb = NULL;
  988. uint64_t rb_offset;
  989. int i;
  990. trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
  991. dev->mem->nregions);
  992. for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
  993. trace_vhost_user_postcopy_fault_handler_loop(i,
  994. u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
  995. if (faultaddr >= u->postcopy_client_bases[i]) {
  996. /* Ofset of the fault address in the vhost region */
  997. uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
  998. if (region_offset < dev->mem->regions[i].memory_size) {
  999. rb_offset = region_offset + u->region_rb_offset[i];
  1000. trace_vhost_user_postcopy_fault_handler_found(i,
  1001. region_offset, rb_offset);
  1002. rb = u->region_rb[i];
  1003. return postcopy_request_shared_page(pcfd, rb, faultaddr,
  1004. rb_offset);
  1005. }
  1006. }
  1007. }
  1008. error_report("%s: Failed to find region for fault %" PRIx64,
  1009. __func__, faultaddr);
  1010. return -1;
  1011. }
  1012. static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
  1013. uint64_t offset)
  1014. {
  1015. struct vhost_dev *dev = pcfd->data;
  1016. struct vhost_user *u = dev->opaque;
  1017. int i;
  1018. trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
  1019. if (!u) {
  1020. return 0;
  1021. }
  1022. /* Translate the offset into an address in the clients address space */
  1023. for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
  1024. if (u->region_rb[i] == rb &&
  1025. offset >= u->region_rb_offset[i] &&
  1026. offset < (u->region_rb_offset[i] +
  1027. dev->mem->regions[i].memory_size)) {
  1028. uint64_t client_addr = (offset - u->region_rb_offset[i]) +
  1029. u->postcopy_client_bases[i];
  1030. trace_vhost_user_postcopy_waker_found(client_addr);
  1031. return postcopy_wake_shared(pcfd, client_addr, rb);
  1032. }
  1033. }
  1034. trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
  1035. return 0;
  1036. }
  1037. #endif
  1038. /*
  1039. * Called at the start of an inbound postcopy on reception of the
  1040. * 'advise' command.
  1041. */
  1042. static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
  1043. {
  1044. #ifdef CONFIG_LINUX
  1045. struct vhost_user *u = dev->opaque;
  1046. CharBackend *chr = u->user->chr;
  1047. int ufd;
  1048. VhostUserMsg msg = {
  1049. .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
  1050. .hdr.flags = VHOST_USER_VERSION,
  1051. };
  1052. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1053. error_setg(errp, "Failed to send postcopy_advise to vhost");
  1054. return -1;
  1055. }
  1056. if (vhost_user_read(dev, &msg) < 0) {
  1057. error_setg(errp, "Failed to get postcopy_advise reply from vhost");
  1058. return -1;
  1059. }
  1060. if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
  1061. error_setg(errp, "Unexpected msg type. Expected %d received %d",
  1062. VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
  1063. return -1;
  1064. }
  1065. if (msg.hdr.size) {
  1066. error_setg(errp, "Received bad msg size.");
  1067. return -1;
  1068. }
  1069. ufd = qemu_chr_fe_get_msgfd(chr);
  1070. if (ufd < 0) {
  1071. error_setg(errp, "%s: Failed to get ufd", __func__);
  1072. return -1;
  1073. }
  1074. qemu_set_nonblock(ufd);
  1075. /* register ufd with userfault thread */
  1076. u->postcopy_fd.fd = ufd;
  1077. u->postcopy_fd.data = dev;
  1078. u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
  1079. u->postcopy_fd.waker = vhost_user_postcopy_waker;
  1080. u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
  1081. postcopy_register_shared_ufd(&u->postcopy_fd);
  1082. return 0;
  1083. #else
  1084. error_setg(errp, "Postcopy not supported on non-Linux systems");
  1085. return -1;
  1086. #endif
  1087. }
  1088. /*
  1089. * Called at the switch to postcopy on reception of the 'listen' command.
  1090. */
  1091. static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
  1092. {
  1093. struct vhost_user *u = dev->opaque;
  1094. int ret;
  1095. VhostUserMsg msg = {
  1096. .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
  1097. .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
  1098. };
  1099. u->postcopy_listen = true;
  1100. trace_vhost_user_postcopy_listen();
  1101. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1102. error_setg(errp, "Failed to send postcopy_listen to vhost");
  1103. return -1;
  1104. }
  1105. ret = process_message_reply(dev, &msg);
  1106. if (ret) {
  1107. error_setg(errp, "Failed to receive reply to postcopy_listen");
  1108. return ret;
  1109. }
  1110. return 0;
  1111. }
  1112. /*
  1113. * Called at the end of postcopy
  1114. */
  1115. static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
  1116. {
  1117. VhostUserMsg msg = {
  1118. .hdr.request = VHOST_USER_POSTCOPY_END,
  1119. .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
  1120. };
  1121. int ret;
  1122. struct vhost_user *u = dev->opaque;
  1123. trace_vhost_user_postcopy_end_entry();
  1124. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1125. error_setg(errp, "Failed to send postcopy_end to vhost");
  1126. return -1;
  1127. }
  1128. ret = process_message_reply(dev, &msg);
  1129. if (ret) {
  1130. error_setg(errp, "Failed to receive reply to postcopy_end");
  1131. return ret;
  1132. }
  1133. postcopy_unregister_shared_ufd(&u->postcopy_fd);
  1134. close(u->postcopy_fd.fd);
  1135. u->postcopy_fd.handler = NULL;
  1136. trace_vhost_user_postcopy_end_exit();
  1137. return 0;
  1138. }
  1139. static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
  1140. void *opaque)
  1141. {
  1142. struct PostcopyNotifyData *pnd = opaque;
  1143. struct vhost_user *u = container_of(notifier, struct vhost_user,
  1144. postcopy_notifier);
  1145. struct vhost_dev *dev = u->dev;
  1146. switch (pnd->reason) {
  1147. case POSTCOPY_NOTIFY_PROBE:
  1148. if (!virtio_has_feature(dev->protocol_features,
  1149. VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
  1150. /* TODO: Get the device name into this error somehow */
  1151. error_setg(pnd->errp,
  1152. "vhost-user backend not capable of postcopy");
  1153. return -ENOENT;
  1154. }
  1155. break;
  1156. case POSTCOPY_NOTIFY_INBOUND_ADVISE:
  1157. return vhost_user_postcopy_advise(dev, pnd->errp);
  1158. case POSTCOPY_NOTIFY_INBOUND_LISTEN:
  1159. return vhost_user_postcopy_listen(dev, pnd->errp);
  1160. case POSTCOPY_NOTIFY_INBOUND_END:
  1161. return vhost_user_postcopy_end(dev, pnd->errp);
  1162. default:
  1163. /* We ignore notifications we don't know */
  1164. break;
  1165. }
  1166. return 0;
  1167. }
  1168. static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque)
  1169. {
  1170. uint64_t features, protocol_features;
  1171. struct vhost_user *u;
  1172. int err;
  1173. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1174. u = g_new0(struct vhost_user, 1);
  1175. u->user = opaque;
  1176. u->slave_fd = -1;
  1177. u->dev = dev;
  1178. dev->opaque = u;
  1179. err = vhost_user_get_features(dev, &features);
  1180. if (err < 0) {
  1181. return err;
  1182. }
  1183. if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
  1184. dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
  1185. err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
  1186. &protocol_features);
  1187. if (err < 0) {
  1188. return err;
  1189. }
  1190. dev->protocol_features =
  1191. protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
  1192. if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
  1193. /* Don't acknowledge CONFIG feature if device doesn't support it */
  1194. dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
  1195. } else if (!(protocol_features &
  1196. (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
  1197. error_report("Device expects VHOST_USER_PROTOCOL_F_CONFIG "
  1198. "but backend does not support it.");
  1199. return -1;
  1200. }
  1201. err = vhost_user_set_protocol_features(dev, dev->protocol_features);
  1202. if (err < 0) {
  1203. return err;
  1204. }
  1205. /* query the max queues we support if backend supports Multiple Queue */
  1206. if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
  1207. err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
  1208. &dev->max_queues);
  1209. if (err < 0) {
  1210. return err;
  1211. }
  1212. }
  1213. if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
  1214. !(virtio_has_feature(dev->protocol_features,
  1215. VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
  1216. virtio_has_feature(dev->protocol_features,
  1217. VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
  1218. error_report("IOMMU support requires reply-ack and "
  1219. "slave-req protocol features.");
  1220. return -1;
  1221. }
  1222. }
  1223. if (dev->migration_blocker == NULL &&
  1224. !virtio_has_feature(dev->protocol_features,
  1225. VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
  1226. error_setg(&dev->migration_blocker,
  1227. "Migration disabled: vhost-user backend lacks "
  1228. "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
  1229. }
  1230. err = vhost_setup_slave_channel(dev);
  1231. if (err < 0) {
  1232. return err;
  1233. }
  1234. u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
  1235. postcopy_add_notifier(&u->postcopy_notifier);
  1236. return 0;
  1237. }
  1238. static int vhost_user_backend_cleanup(struct vhost_dev *dev)
  1239. {
  1240. struct vhost_user *u;
  1241. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1242. u = dev->opaque;
  1243. if (u->postcopy_notifier.notify) {
  1244. postcopy_remove_notifier(&u->postcopy_notifier);
  1245. u->postcopy_notifier.notify = NULL;
  1246. }
  1247. u->postcopy_listen = false;
  1248. if (u->postcopy_fd.handler) {
  1249. postcopy_unregister_shared_ufd(&u->postcopy_fd);
  1250. close(u->postcopy_fd.fd);
  1251. u->postcopy_fd.handler = NULL;
  1252. }
  1253. if (u->slave_fd >= 0) {
  1254. qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
  1255. close(u->slave_fd);
  1256. u->slave_fd = -1;
  1257. }
  1258. g_free(u->region_rb);
  1259. u->region_rb = NULL;
  1260. g_free(u->region_rb_offset);
  1261. u->region_rb_offset = NULL;
  1262. u->region_rb_len = 0;
  1263. g_free(u);
  1264. dev->opaque = 0;
  1265. return 0;
  1266. }
  1267. static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
  1268. {
  1269. assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
  1270. return idx;
  1271. }
  1272. static int vhost_user_memslots_limit(struct vhost_dev *dev)
  1273. {
  1274. return VHOST_MEMORY_MAX_NREGIONS;
  1275. }
  1276. static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
  1277. {
  1278. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1279. return virtio_has_feature(dev->protocol_features,
  1280. VHOST_USER_PROTOCOL_F_LOG_SHMFD);
  1281. }
  1282. static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
  1283. {
  1284. VhostUserMsg msg = { };
  1285. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1286. /* If guest supports GUEST_ANNOUNCE do nothing */
  1287. if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
  1288. return 0;
  1289. }
  1290. /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
  1291. if (virtio_has_feature(dev->protocol_features,
  1292. VHOST_USER_PROTOCOL_F_RARP)) {
  1293. msg.hdr.request = VHOST_USER_SEND_RARP;
  1294. msg.hdr.flags = VHOST_USER_VERSION;
  1295. memcpy((char *)&msg.payload.u64, mac_addr, 6);
  1296. msg.hdr.size = sizeof(msg.payload.u64);
  1297. return vhost_user_write(dev, &msg, NULL, 0);
  1298. }
  1299. return -1;
  1300. }
  1301. static bool vhost_user_can_merge(struct vhost_dev *dev,
  1302. uint64_t start1, uint64_t size1,
  1303. uint64_t start2, uint64_t size2)
  1304. {
  1305. ram_addr_t offset;
  1306. int mfd, rfd;
  1307. MemoryRegion *mr;
  1308. mr = memory_region_from_host((void *)(uintptr_t)start1, &offset);
  1309. mfd = memory_region_get_fd(mr);
  1310. mr = memory_region_from_host((void *)(uintptr_t)start2, &offset);
  1311. rfd = memory_region_get_fd(mr);
  1312. return mfd == rfd;
  1313. }
  1314. static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
  1315. {
  1316. VhostUserMsg msg;
  1317. bool reply_supported = virtio_has_feature(dev->protocol_features,
  1318. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  1319. if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
  1320. return 0;
  1321. }
  1322. msg.hdr.request = VHOST_USER_NET_SET_MTU;
  1323. msg.payload.u64 = mtu;
  1324. msg.hdr.size = sizeof(msg.payload.u64);
  1325. msg.hdr.flags = VHOST_USER_VERSION;
  1326. if (reply_supported) {
  1327. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  1328. }
  1329. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1330. return -1;
  1331. }
  1332. /* If reply_ack supported, slave has to ack specified MTU is valid */
  1333. if (reply_supported) {
  1334. return process_message_reply(dev, &msg);
  1335. }
  1336. return 0;
  1337. }
  1338. static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
  1339. struct vhost_iotlb_msg *imsg)
  1340. {
  1341. VhostUserMsg msg = {
  1342. .hdr.request = VHOST_USER_IOTLB_MSG,
  1343. .hdr.size = sizeof(msg.payload.iotlb),
  1344. .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
  1345. .payload.iotlb = *imsg,
  1346. };
  1347. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1348. return -EFAULT;
  1349. }
  1350. return process_message_reply(dev, &msg);
  1351. }
  1352. static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
  1353. {
  1354. /* No-op as the receive channel is not dedicated to IOTLB messages. */
  1355. }
  1356. static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
  1357. uint32_t config_len)
  1358. {
  1359. VhostUserMsg msg = {
  1360. .hdr.request = VHOST_USER_GET_CONFIG,
  1361. .hdr.flags = VHOST_USER_VERSION,
  1362. .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
  1363. };
  1364. if (!virtio_has_feature(dev->protocol_features,
  1365. VHOST_USER_PROTOCOL_F_CONFIG)) {
  1366. return -1;
  1367. }
  1368. if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
  1369. return -1;
  1370. }
  1371. msg.payload.config.offset = 0;
  1372. msg.payload.config.size = config_len;
  1373. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1374. return -1;
  1375. }
  1376. if (vhost_user_read(dev, &msg) < 0) {
  1377. return -1;
  1378. }
  1379. if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
  1380. error_report("Received unexpected msg type. Expected %d received %d",
  1381. VHOST_USER_GET_CONFIG, msg.hdr.request);
  1382. return -1;
  1383. }
  1384. if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
  1385. error_report("Received bad msg size.");
  1386. return -1;
  1387. }
  1388. memcpy(config, msg.payload.config.region, config_len);
  1389. return 0;
  1390. }
  1391. static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
  1392. uint32_t offset, uint32_t size, uint32_t flags)
  1393. {
  1394. uint8_t *p;
  1395. bool reply_supported = virtio_has_feature(dev->protocol_features,
  1396. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  1397. VhostUserMsg msg = {
  1398. .hdr.request = VHOST_USER_SET_CONFIG,
  1399. .hdr.flags = VHOST_USER_VERSION,
  1400. .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
  1401. };
  1402. if (!virtio_has_feature(dev->protocol_features,
  1403. VHOST_USER_PROTOCOL_F_CONFIG)) {
  1404. return -1;
  1405. }
  1406. if (reply_supported) {
  1407. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  1408. }
  1409. if (size > VHOST_USER_MAX_CONFIG_SIZE) {
  1410. return -1;
  1411. }
  1412. msg.payload.config.offset = offset,
  1413. msg.payload.config.size = size,
  1414. msg.payload.config.flags = flags,
  1415. p = msg.payload.config.region;
  1416. memcpy(p, data, size);
  1417. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1418. return -1;
  1419. }
  1420. if (reply_supported) {
  1421. return process_message_reply(dev, &msg);
  1422. }
  1423. return 0;
  1424. }
  1425. static int vhost_user_crypto_create_session(struct vhost_dev *dev,
  1426. void *session_info,
  1427. uint64_t *session_id)
  1428. {
  1429. bool crypto_session = virtio_has_feature(dev->protocol_features,
  1430. VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
  1431. CryptoDevBackendSymSessionInfo *sess_info = session_info;
  1432. VhostUserMsg msg = {
  1433. .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
  1434. .hdr.flags = VHOST_USER_VERSION,
  1435. .hdr.size = sizeof(msg.payload.session),
  1436. };
  1437. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1438. if (!crypto_session) {
  1439. error_report("vhost-user trying to send unhandled ioctl");
  1440. return -1;
  1441. }
  1442. memcpy(&msg.payload.session.session_setup_data, sess_info,
  1443. sizeof(CryptoDevBackendSymSessionInfo));
  1444. if (sess_info->key_len) {
  1445. memcpy(&msg.payload.session.key, sess_info->cipher_key,
  1446. sess_info->key_len);
  1447. }
  1448. if (sess_info->auth_key_len > 0) {
  1449. memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
  1450. sess_info->auth_key_len);
  1451. }
  1452. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1453. error_report("vhost_user_write() return -1, create session failed");
  1454. return -1;
  1455. }
  1456. if (vhost_user_read(dev, &msg) < 0) {
  1457. error_report("vhost_user_read() return -1, create session failed");
  1458. return -1;
  1459. }
  1460. if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
  1461. error_report("Received unexpected msg type. Expected %d received %d",
  1462. VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
  1463. return -1;
  1464. }
  1465. if (msg.hdr.size != sizeof(msg.payload.session)) {
  1466. error_report("Received bad msg size.");
  1467. return -1;
  1468. }
  1469. if (msg.payload.session.session_id < 0) {
  1470. error_report("Bad session id: %" PRId64 "",
  1471. msg.payload.session.session_id);
  1472. return -1;
  1473. }
  1474. *session_id = msg.payload.session.session_id;
  1475. return 0;
  1476. }
  1477. static int
  1478. vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
  1479. {
  1480. bool crypto_session = virtio_has_feature(dev->protocol_features,
  1481. VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
  1482. VhostUserMsg msg = {
  1483. .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
  1484. .hdr.flags = VHOST_USER_VERSION,
  1485. .hdr.size = sizeof(msg.payload.u64),
  1486. };
  1487. msg.payload.u64 = session_id;
  1488. if (!crypto_session) {
  1489. error_report("vhost-user trying to send unhandled ioctl");
  1490. return -1;
  1491. }
  1492. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1493. error_report("vhost_user_write() return -1, close session failed");
  1494. return -1;
  1495. }
  1496. return 0;
  1497. }
  1498. static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
  1499. MemoryRegionSection *section)
  1500. {
  1501. bool result;
  1502. result = memory_region_get_fd(section->mr) >= 0;
  1503. return result;
  1504. }
  1505. static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
  1506. uint16_t queue_size,
  1507. struct vhost_inflight *inflight)
  1508. {
  1509. void *addr;
  1510. int fd;
  1511. struct vhost_user *u = dev->opaque;
  1512. CharBackend *chr = u->user->chr;
  1513. VhostUserMsg msg = {
  1514. .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
  1515. .hdr.flags = VHOST_USER_VERSION,
  1516. .payload.inflight.num_queues = dev->nvqs,
  1517. .payload.inflight.queue_size = queue_size,
  1518. .hdr.size = sizeof(msg.payload.inflight),
  1519. };
  1520. if (!virtio_has_feature(dev->protocol_features,
  1521. VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
  1522. return 0;
  1523. }
  1524. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1525. return -1;
  1526. }
  1527. if (vhost_user_read(dev, &msg) < 0) {
  1528. return -1;
  1529. }
  1530. if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
  1531. error_report("Received unexpected msg type. "
  1532. "Expected %d received %d",
  1533. VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
  1534. return -1;
  1535. }
  1536. if (msg.hdr.size != sizeof(msg.payload.inflight)) {
  1537. error_report("Received bad msg size.");
  1538. return -1;
  1539. }
  1540. if (!msg.payload.inflight.mmap_size) {
  1541. return 0;
  1542. }
  1543. fd = qemu_chr_fe_get_msgfd(chr);
  1544. if (fd < 0) {
  1545. error_report("Failed to get mem fd");
  1546. return -1;
  1547. }
  1548. addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
  1549. MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
  1550. if (addr == MAP_FAILED) {
  1551. error_report("Failed to mmap mem fd");
  1552. close(fd);
  1553. return -1;
  1554. }
  1555. inflight->addr = addr;
  1556. inflight->fd = fd;
  1557. inflight->size = msg.payload.inflight.mmap_size;
  1558. inflight->offset = msg.payload.inflight.mmap_offset;
  1559. inflight->queue_size = queue_size;
  1560. return 0;
  1561. }
  1562. static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
  1563. struct vhost_inflight *inflight)
  1564. {
  1565. VhostUserMsg msg = {
  1566. .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
  1567. .hdr.flags = VHOST_USER_VERSION,
  1568. .payload.inflight.mmap_size = inflight->size,
  1569. .payload.inflight.mmap_offset = inflight->offset,
  1570. .payload.inflight.num_queues = dev->nvqs,
  1571. .payload.inflight.queue_size = inflight->queue_size,
  1572. .hdr.size = sizeof(msg.payload.inflight),
  1573. };
  1574. if (!virtio_has_feature(dev->protocol_features,
  1575. VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
  1576. return 0;
  1577. }
  1578. if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
  1579. return -1;
  1580. }
  1581. return 0;
  1582. }
  1583. bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
  1584. {
  1585. if (user->chr) {
  1586. error_setg(errp, "Cannot initialize vhost-user state");
  1587. return false;
  1588. }
  1589. user->chr = chr;
  1590. return true;
  1591. }
  1592. void vhost_user_cleanup(VhostUserState *user)
  1593. {
  1594. int i;
  1595. if (!user->chr) {
  1596. return;
  1597. }
  1598. for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
  1599. if (user->notifier[i].addr) {
  1600. object_unparent(OBJECT(&user->notifier[i].mr));
  1601. munmap(user->notifier[i].addr, qemu_real_host_page_size);
  1602. user->notifier[i].addr = NULL;
  1603. }
  1604. }
  1605. user->chr = NULL;
  1606. }
  1607. const VhostOps user_ops = {
  1608. .backend_type = VHOST_BACKEND_TYPE_USER,
  1609. .vhost_backend_init = vhost_user_backend_init,
  1610. .vhost_backend_cleanup = vhost_user_backend_cleanup,
  1611. .vhost_backend_memslots_limit = vhost_user_memslots_limit,
  1612. .vhost_set_log_base = vhost_user_set_log_base,
  1613. .vhost_set_mem_table = vhost_user_set_mem_table,
  1614. .vhost_set_vring_addr = vhost_user_set_vring_addr,
  1615. .vhost_set_vring_endian = vhost_user_set_vring_endian,
  1616. .vhost_set_vring_num = vhost_user_set_vring_num,
  1617. .vhost_set_vring_base = vhost_user_set_vring_base,
  1618. .vhost_get_vring_base = vhost_user_get_vring_base,
  1619. .vhost_set_vring_kick = vhost_user_set_vring_kick,
  1620. .vhost_set_vring_call = vhost_user_set_vring_call,
  1621. .vhost_set_features = vhost_user_set_features,
  1622. .vhost_get_features = vhost_user_get_features,
  1623. .vhost_set_owner = vhost_user_set_owner,
  1624. .vhost_reset_device = vhost_user_reset_device,
  1625. .vhost_get_vq_index = vhost_user_get_vq_index,
  1626. .vhost_set_vring_enable = vhost_user_set_vring_enable,
  1627. .vhost_requires_shm_log = vhost_user_requires_shm_log,
  1628. .vhost_migration_done = vhost_user_migration_done,
  1629. .vhost_backend_can_merge = vhost_user_can_merge,
  1630. .vhost_net_set_mtu = vhost_user_net_set_mtu,
  1631. .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
  1632. .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
  1633. .vhost_get_config = vhost_user_get_config,
  1634. .vhost_set_config = vhost_user_set_config,
  1635. .vhost_crypto_create_session = vhost_user_crypto_create_session,
  1636. .vhost_crypto_close_session = vhost_user_crypto_close_session,
  1637. .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
  1638. .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
  1639. .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
  1640. };