vhost-user.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390
  1. /*
  2. * vhost-user
  3. *
  4. * Copyright (c) 2013 Virtual Open Systems Sarl.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. *
  9. */
  10. #include "qemu/osdep.h"
  11. #include "qapi/error.h"
  12. #include "hw/virtio/vhost.h"
  13. #include "hw/virtio/vhost-user.h"
  14. #include "hw/virtio/vhost-backend.h"
  15. #include "hw/virtio/virtio.h"
  16. #include "hw/virtio/virtio-net.h"
  17. #include "chardev/char-fe.h"
  18. #include "sysemu/kvm.h"
  19. #include "qemu/error-report.h"
  20. #include "qemu/main-loop.h"
  21. #include "qemu/sockets.h"
  22. #include "sysemu/cryptodev.h"
  23. #include "migration/migration.h"
  24. #include "migration/postcopy-ram.h"
  25. #include "trace.h"
  26. #include <sys/ioctl.h>
  27. #include <sys/socket.h>
  28. #include <sys/un.h>
  29. #include "standard-headers/linux/vhost_types.h"
  30. #ifdef CONFIG_LINUX
  31. #include <linux/userfaultfd.h>
  32. #endif
  33. #define VHOST_MEMORY_BASELINE_NREGIONS 8
  34. #define VHOST_USER_F_PROTOCOL_FEATURES 30
  35. #define VHOST_USER_SLAVE_MAX_FDS 8
  36. /*
  37. * Set maximum number of RAM slots supported to
  38. * the maximum number supported by the target
  39. * hardware plaform.
  40. */
  41. #if defined(TARGET_X86) || defined(TARGET_X86_64) || \
  42. defined(TARGET_ARM) || defined(TARGET_ARM_64)
  43. #include "hw/acpi/acpi.h"
  44. #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
  45. #elif defined(TARGET_PPC) || defined(TARGET_PPC_64)
  46. #include "hw/ppc/spapr.h"
  47. #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
  48. #else
  49. #define VHOST_USER_MAX_RAM_SLOTS 512
  50. #endif
  51. /*
  52. * Maximum size of virtio device config space
  53. */
  54. #define VHOST_USER_MAX_CONFIG_SIZE 256
  55. enum VhostUserProtocolFeature {
  56. VHOST_USER_PROTOCOL_F_MQ = 0,
  57. VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
  58. VHOST_USER_PROTOCOL_F_RARP = 2,
  59. VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
  60. VHOST_USER_PROTOCOL_F_NET_MTU = 4,
  61. VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
  62. VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
  63. VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
  64. VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
  65. VHOST_USER_PROTOCOL_F_CONFIG = 9,
  66. VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
  67. VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
  68. VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
  69. VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
  70. /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
  71. VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
  72. VHOST_USER_PROTOCOL_F_MAX
  73. };
  74. #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
  75. typedef enum VhostUserRequest {
  76. VHOST_USER_NONE = 0,
  77. VHOST_USER_GET_FEATURES = 1,
  78. VHOST_USER_SET_FEATURES = 2,
  79. VHOST_USER_SET_OWNER = 3,
  80. VHOST_USER_RESET_OWNER = 4,
  81. VHOST_USER_SET_MEM_TABLE = 5,
  82. VHOST_USER_SET_LOG_BASE = 6,
  83. VHOST_USER_SET_LOG_FD = 7,
  84. VHOST_USER_SET_VRING_NUM = 8,
  85. VHOST_USER_SET_VRING_ADDR = 9,
  86. VHOST_USER_SET_VRING_BASE = 10,
  87. VHOST_USER_GET_VRING_BASE = 11,
  88. VHOST_USER_SET_VRING_KICK = 12,
  89. VHOST_USER_SET_VRING_CALL = 13,
  90. VHOST_USER_SET_VRING_ERR = 14,
  91. VHOST_USER_GET_PROTOCOL_FEATURES = 15,
  92. VHOST_USER_SET_PROTOCOL_FEATURES = 16,
  93. VHOST_USER_GET_QUEUE_NUM = 17,
  94. VHOST_USER_SET_VRING_ENABLE = 18,
  95. VHOST_USER_SEND_RARP = 19,
  96. VHOST_USER_NET_SET_MTU = 20,
  97. VHOST_USER_SET_SLAVE_REQ_FD = 21,
  98. VHOST_USER_IOTLB_MSG = 22,
  99. VHOST_USER_SET_VRING_ENDIAN = 23,
  100. VHOST_USER_GET_CONFIG = 24,
  101. VHOST_USER_SET_CONFIG = 25,
  102. VHOST_USER_CREATE_CRYPTO_SESSION = 26,
  103. VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
  104. VHOST_USER_POSTCOPY_ADVISE = 28,
  105. VHOST_USER_POSTCOPY_LISTEN = 29,
  106. VHOST_USER_POSTCOPY_END = 30,
  107. VHOST_USER_GET_INFLIGHT_FD = 31,
  108. VHOST_USER_SET_INFLIGHT_FD = 32,
  109. VHOST_USER_GPU_SET_SOCKET = 33,
  110. VHOST_USER_RESET_DEVICE = 34,
  111. /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
  112. VHOST_USER_GET_MAX_MEM_SLOTS = 36,
  113. VHOST_USER_ADD_MEM_REG = 37,
  114. VHOST_USER_REM_MEM_REG = 38,
  115. VHOST_USER_MAX
  116. } VhostUserRequest;
  117. typedef enum VhostUserSlaveRequest {
  118. VHOST_USER_SLAVE_NONE = 0,
  119. VHOST_USER_SLAVE_IOTLB_MSG = 1,
  120. VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
  121. VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
  122. VHOST_USER_SLAVE_MAX
  123. } VhostUserSlaveRequest;
  124. typedef struct VhostUserMemoryRegion {
  125. uint64_t guest_phys_addr;
  126. uint64_t memory_size;
  127. uint64_t userspace_addr;
  128. uint64_t mmap_offset;
  129. } VhostUserMemoryRegion;
  130. typedef struct VhostUserMemory {
  131. uint32_t nregions;
  132. uint32_t padding;
  133. VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
  134. } VhostUserMemory;
  135. typedef struct VhostUserMemRegMsg {
  136. uint32_t padding;
  137. VhostUserMemoryRegion region;
  138. } VhostUserMemRegMsg;
  139. typedef struct VhostUserLog {
  140. uint64_t mmap_size;
  141. uint64_t mmap_offset;
  142. } VhostUserLog;
  143. typedef struct VhostUserConfig {
  144. uint32_t offset;
  145. uint32_t size;
  146. uint32_t flags;
  147. uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
  148. } VhostUserConfig;
  149. #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
  150. #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
  151. typedef struct VhostUserCryptoSession {
  152. /* session id for success, -1 on errors */
  153. int64_t session_id;
  154. CryptoDevBackendSymSessionInfo session_setup_data;
  155. uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
  156. uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
  157. } VhostUserCryptoSession;
  158. static VhostUserConfig c __attribute__ ((unused));
  159. #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
  160. + sizeof(c.size) \
  161. + sizeof(c.flags))
  162. typedef struct VhostUserVringArea {
  163. uint64_t u64;
  164. uint64_t size;
  165. uint64_t offset;
  166. } VhostUserVringArea;
  167. typedef struct VhostUserInflight {
  168. uint64_t mmap_size;
  169. uint64_t mmap_offset;
  170. uint16_t num_queues;
  171. uint16_t queue_size;
  172. } VhostUserInflight;
  173. typedef struct {
  174. VhostUserRequest request;
  175. #define VHOST_USER_VERSION_MASK (0x3)
  176. #define VHOST_USER_REPLY_MASK (0x1<<2)
  177. #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
  178. uint32_t flags;
  179. uint32_t size; /* the following payload size */
  180. } QEMU_PACKED VhostUserHeader;
  181. typedef union {
  182. #define VHOST_USER_VRING_IDX_MASK (0xff)
  183. #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
  184. uint64_t u64;
  185. struct vhost_vring_state state;
  186. struct vhost_vring_addr addr;
  187. VhostUserMemory memory;
  188. VhostUserMemRegMsg mem_reg;
  189. VhostUserLog log;
  190. struct vhost_iotlb_msg iotlb;
  191. VhostUserConfig config;
  192. VhostUserCryptoSession session;
  193. VhostUserVringArea area;
  194. VhostUserInflight inflight;
  195. } VhostUserPayload;
  196. typedef struct VhostUserMsg {
  197. VhostUserHeader hdr;
  198. VhostUserPayload payload;
  199. } QEMU_PACKED VhostUserMsg;
  200. static VhostUserMsg m __attribute__ ((unused));
  201. #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
  202. #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
  203. /* The version of the protocol we support */
  204. #define VHOST_USER_VERSION (0x1)
  205. struct vhost_user {
  206. struct vhost_dev *dev;
  207. /* Shared between vhost devs of the same virtio device */
  208. VhostUserState *user;
  209. int slave_fd;
  210. NotifierWithReturn postcopy_notifier;
  211. struct PostCopyFD postcopy_fd;
  212. uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
  213. /* Length of the region_rb and region_rb_offset arrays */
  214. size_t region_rb_len;
  215. /* RAMBlock associated with a given region */
  216. RAMBlock **region_rb;
  217. /* The offset from the start of the RAMBlock to the start of the
  218. * vhost region.
  219. */
  220. ram_addr_t *region_rb_offset;
  221. /* True once we've entered postcopy_listen */
  222. bool postcopy_listen;
  223. /* Our current regions */
  224. int num_shadow_regions;
  225. struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
  226. };
  227. struct scrub_regions {
  228. struct vhost_memory_region *region;
  229. int reg_idx;
  230. int fd_idx;
  231. };
  232. static bool ioeventfd_enabled(void)
  233. {
  234. return !kvm_enabled() || kvm_eventfds_enabled();
  235. }
  236. static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
  237. {
  238. struct vhost_user *u = dev->opaque;
  239. CharBackend *chr = u->user->chr;
  240. uint8_t *p = (uint8_t *) msg;
  241. int r, size = VHOST_USER_HDR_SIZE;
  242. r = qemu_chr_fe_read_all(chr, p, size);
  243. if (r != size) {
  244. error_report("Failed to read msg header. Read %d instead of %d."
  245. " Original request %d.", r, size, msg->hdr.request);
  246. return -1;
  247. }
  248. /* validate received flags */
  249. if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
  250. error_report("Failed to read msg header."
  251. " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
  252. VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
  253. return -1;
  254. }
  255. return 0;
  256. }
  257. static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
  258. {
  259. struct vhost_user *u = dev->opaque;
  260. CharBackend *chr = u->user->chr;
  261. uint8_t *p = (uint8_t *) msg;
  262. int r, size;
  263. if (vhost_user_read_header(dev, msg) < 0) {
  264. return -1;
  265. }
  266. /* validate message size is sane */
  267. if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
  268. error_report("Failed to read msg header."
  269. " Size %d exceeds the maximum %zu.", msg->hdr.size,
  270. VHOST_USER_PAYLOAD_SIZE);
  271. return -1;
  272. }
  273. if (msg->hdr.size) {
  274. p += VHOST_USER_HDR_SIZE;
  275. size = msg->hdr.size;
  276. r = qemu_chr_fe_read_all(chr, p, size);
  277. if (r != size) {
  278. error_report("Failed to read msg payload."
  279. " Read %d instead of %d.", r, msg->hdr.size);
  280. return -1;
  281. }
  282. }
  283. return 0;
  284. }
  285. static int process_message_reply(struct vhost_dev *dev,
  286. const VhostUserMsg *msg)
  287. {
  288. VhostUserMsg msg_reply;
  289. if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
  290. return 0;
  291. }
  292. if (vhost_user_read(dev, &msg_reply) < 0) {
  293. return -1;
  294. }
  295. if (msg_reply.hdr.request != msg->hdr.request) {
  296. error_report("Received unexpected msg type."
  297. "Expected %d received %d",
  298. msg->hdr.request, msg_reply.hdr.request);
  299. return -1;
  300. }
  301. return msg_reply.payload.u64 ? -1 : 0;
  302. }
  303. static bool vhost_user_one_time_request(VhostUserRequest request)
  304. {
  305. switch (request) {
  306. case VHOST_USER_SET_OWNER:
  307. case VHOST_USER_RESET_OWNER:
  308. case VHOST_USER_SET_MEM_TABLE:
  309. case VHOST_USER_GET_QUEUE_NUM:
  310. case VHOST_USER_NET_SET_MTU:
  311. return true;
  312. default:
  313. return false;
  314. }
  315. }
  316. /* most non-init callers ignore the error */
  317. static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
  318. int *fds, int fd_num)
  319. {
  320. struct vhost_user *u = dev->opaque;
  321. CharBackend *chr = u->user->chr;
  322. int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
  323. /*
  324. * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
  325. * we just need send it once in the first time. For later such
  326. * request, we just ignore it.
  327. */
  328. if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
  329. msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
  330. return 0;
  331. }
  332. if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
  333. error_report("Failed to set msg fds.");
  334. return -1;
  335. }
  336. ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
  337. if (ret != size) {
  338. error_report("Failed to write msg."
  339. " Wrote %d instead of %d.", ret, size);
  340. return -1;
  341. }
  342. return 0;
  343. }
  344. int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
  345. {
  346. VhostUserMsg msg = {
  347. .hdr.request = VHOST_USER_GPU_SET_SOCKET,
  348. .hdr.flags = VHOST_USER_VERSION,
  349. };
  350. return vhost_user_write(dev, &msg, &fd, 1);
  351. }
  352. static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
  353. struct vhost_log *log)
  354. {
  355. int fds[VHOST_USER_MAX_RAM_SLOTS];
  356. size_t fd_num = 0;
  357. bool shmfd = virtio_has_feature(dev->protocol_features,
  358. VHOST_USER_PROTOCOL_F_LOG_SHMFD);
  359. VhostUserMsg msg = {
  360. .hdr.request = VHOST_USER_SET_LOG_BASE,
  361. .hdr.flags = VHOST_USER_VERSION,
  362. .payload.log.mmap_size = log->size * sizeof(*(log->log)),
  363. .payload.log.mmap_offset = 0,
  364. .hdr.size = sizeof(msg.payload.log),
  365. };
  366. if (shmfd && log->fd != -1) {
  367. fds[fd_num++] = log->fd;
  368. }
  369. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  370. return -1;
  371. }
  372. if (shmfd) {
  373. msg.hdr.size = 0;
  374. if (vhost_user_read(dev, &msg) < 0) {
  375. return -1;
  376. }
  377. if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
  378. error_report("Received unexpected msg type. "
  379. "Expected %d received %d",
  380. VHOST_USER_SET_LOG_BASE, msg.hdr.request);
  381. return -1;
  382. }
  383. }
  384. return 0;
  385. }
  386. static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
  387. int *fd)
  388. {
  389. MemoryRegion *mr;
  390. assert((uintptr_t)addr == addr);
  391. mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
  392. *fd = memory_region_get_fd(mr);
  393. return mr;
  394. }
  395. static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
  396. struct vhost_memory_region *src,
  397. uint64_t mmap_offset)
  398. {
  399. assert(src != NULL && dst != NULL);
  400. dst->userspace_addr = src->userspace_addr;
  401. dst->memory_size = src->memory_size;
  402. dst->guest_phys_addr = src->guest_phys_addr;
  403. dst->mmap_offset = mmap_offset;
  404. }
  405. static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
  406. struct vhost_dev *dev,
  407. VhostUserMsg *msg,
  408. int *fds, size_t *fd_num,
  409. bool track_ramblocks)
  410. {
  411. int i, fd;
  412. ram_addr_t offset;
  413. MemoryRegion *mr;
  414. struct vhost_memory_region *reg;
  415. VhostUserMemoryRegion region_buffer;
  416. msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
  417. for (i = 0; i < dev->mem->nregions; ++i) {
  418. reg = dev->mem->regions + i;
  419. mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
  420. if (fd > 0) {
  421. if (track_ramblocks) {
  422. assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
  423. trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
  424. reg->memory_size,
  425. reg->guest_phys_addr,
  426. reg->userspace_addr,
  427. offset);
  428. u->region_rb_offset[i] = offset;
  429. u->region_rb[i] = mr->ram_block;
  430. } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
  431. error_report("Failed preparing vhost-user memory table msg");
  432. return -1;
  433. }
  434. vhost_user_fill_msg_region(&region_buffer, reg, offset);
  435. msg->payload.memory.regions[*fd_num] = region_buffer;
  436. fds[(*fd_num)++] = fd;
  437. } else if (track_ramblocks) {
  438. u->region_rb_offset[i] = 0;
  439. u->region_rb[i] = NULL;
  440. }
  441. }
  442. msg->payload.memory.nregions = *fd_num;
  443. if (!*fd_num) {
  444. error_report("Failed initializing vhost-user memory map, "
  445. "consider using -object memory-backend-file share=on");
  446. return -1;
  447. }
  448. msg->hdr.size = sizeof(msg->payload.memory.nregions);
  449. msg->hdr.size += sizeof(msg->payload.memory.padding);
  450. msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
  451. return 1;
  452. }
  453. static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
  454. struct vhost_memory_region *vdev_reg)
  455. {
  456. return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
  457. shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
  458. shadow_reg->memory_size == vdev_reg->memory_size;
  459. }
  460. static void scrub_shadow_regions(struct vhost_dev *dev,
  461. struct scrub_regions *add_reg,
  462. int *nr_add_reg,
  463. struct scrub_regions *rem_reg,
  464. int *nr_rem_reg, uint64_t *shadow_pcb,
  465. bool track_ramblocks)
  466. {
  467. struct vhost_user *u = dev->opaque;
  468. bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
  469. struct vhost_memory_region *reg, *shadow_reg;
  470. int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
  471. ram_addr_t offset;
  472. MemoryRegion *mr;
  473. bool matching;
  474. /*
  475. * Find memory regions present in our shadow state which are not in
  476. * the device's current memory state.
  477. *
  478. * Mark regions in both the shadow and device state as "found".
  479. */
  480. for (i = 0; i < u->num_shadow_regions; i++) {
  481. shadow_reg = &u->shadow_regions[i];
  482. matching = false;
  483. for (j = 0; j < dev->mem->nregions; j++) {
  484. reg = &dev->mem->regions[j];
  485. mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
  486. if (reg_equal(shadow_reg, reg)) {
  487. matching = true;
  488. found[j] = true;
  489. if (track_ramblocks) {
  490. /*
  491. * Reset postcopy client bases, region_rb, and
  492. * region_rb_offset in case regions are removed.
  493. */
  494. if (fd > 0) {
  495. u->region_rb_offset[j] = offset;
  496. u->region_rb[j] = mr->ram_block;
  497. shadow_pcb[j] = u->postcopy_client_bases[i];
  498. } else {
  499. u->region_rb_offset[j] = 0;
  500. u->region_rb[j] = NULL;
  501. }
  502. }
  503. break;
  504. }
  505. }
  506. /*
  507. * If the region was not found in the current device memory state
  508. * create an entry for it in the removed list.
  509. */
  510. if (!matching) {
  511. rem_reg[rm_idx].region = shadow_reg;
  512. rem_reg[rm_idx++].reg_idx = i;
  513. }
  514. }
  515. /*
  516. * For regions not marked "found", create entries in the added list.
  517. *
  518. * Note their indexes in the device memory state and the indexes of their
  519. * file descriptors.
  520. */
  521. for (i = 0; i < dev->mem->nregions; i++) {
  522. reg = &dev->mem->regions[i];
  523. mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
  524. if (fd > 0) {
  525. ++fd_num;
  526. }
  527. /*
  528. * If the region was in both the shadow and device state we don't
  529. * need to send a VHOST_USER_ADD_MEM_REG message for it.
  530. */
  531. if (found[i]) {
  532. continue;
  533. }
  534. add_reg[add_idx].region = reg;
  535. add_reg[add_idx].reg_idx = i;
  536. add_reg[add_idx++].fd_idx = fd_num;
  537. }
  538. *nr_rem_reg = rm_idx;
  539. *nr_add_reg = add_idx;
  540. return;
  541. }
  542. static int send_remove_regions(struct vhost_dev *dev,
  543. struct scrub_regions *remove_reg,
  544. int nr_rem_reg, VhostUserMsg *msg,
  545. bool reply_supported)
  546. {
  547. struct vhost_user *u = dev->opaque;
  548. struct vhost_memory_region *shadow_reg;
  549. int i, fd, shadow_reg_idx, ret;
  550. ram_addr_t offset;
  551. VhostUserMemoryRegion region_buffer;
  552. /*
  553. * The regions in remove_reg appear in the same order they do in the
  554. * shadow table. Therefore we can minimize memory copies by iterating
  555. * through remove_reg backwards.
  556. */
  557. for (i = nr_rem_reg - 1; i >= 0; i--) {
  558. shadow_reg = remove_reg[i].region;
  559. shadow_reg_idx = remove_reg[i].reg_idx;
  560. vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
  561. if (fd > 0) {
  562. msg->hdr.request = VHOST_USER_REM_MEM_REG;
  563. vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
  564. msg->payload.mem_reg.region = region_buffer;
  565. if (vhost_user_write(dev, msg, &fd, 1) < 0) {
  566. return -1;
  567. }
  568. if (reply_supported) {
  569. ret = process_message_reply(dev, msg);
  570. if (ret) {
  571. return ret;
  572. }
  573. }
  574. }
  575. /*
  576. * At this point we know the backend has unmapped the region. It is now
  577. * safe to remove it from the shadow table.
  578. */
  579. memmove(&u->shadow_regions[shadow_reg_idx],
  580. &u->shadow_regions[shadow_reg_idx + 1],
  581. sizeof(struct vhost_memory_region) *
  582. (u->num_shadow_regions - shadow_reg_idx - 1));
  583. u->num_shadow_regions--;
  584. }
  585. return 0;
  586. }
  587. static int send_add_regions(struct vhost_dev *dev,
  588. struct scrub_regions *add_reg, int nr_add_reg,
  589. VhostUserMsg *msg, uint64_t *shadow_pcb,
  590. bool reply_supported, bool track_ramblocks)
  591. {
  592. struct vhost_user *u = dev->opaque;
  593. int i, fd, ret, reg_idx, reg_fd_idx;
  594. struct vhost_memory_region *reg;
  595. MemoryRegion *mr;
  596. ram_addr_t offset;
  597. VhostUserMsg msg_reply;
  598. VhostUserMemoryRegion region_buffer;
  599. for (i = 0; i < nr_add_reg; i++) {
  600. reg = add_reg[i].region;
  601. reg_idx = add_reg[i].reg_idx;
  602. reg_fd_idx = add_reg[i].fd_idx;
  603. mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
  604. if (fd > 0) {
  605. if (track_ramblocks) {
  606. trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
  607. reg->memory_size,
  608. reg->guest_phys_addr,
  609. reg->userspace_addr,
  610. offset);
  611. u->region_rb_offset[reg_idx] = offset;
  612. u->region_rb[reg_idx] = mr->ram_block;
  613. }
  614. msg->hdr.request = VHOST_USER_ADD_MEM_REG;
  615. vhost_user_fill_msg_region(&region_buffer, reg, offset);
  616. msg->payload.mem_reg.region = region_buffer;
  617. if (vhost_user_write(dev, msg, &fd, 1) < 0) {
  618. return -1;
  619. }
  620. if (track_ramblocks) {
  621. uint64_t reply_gpa;
  622. if (vhost_user_read(dev, &msg_reply) < 0) {
  623. return -1;
  624. }
  625. reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
  626. if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
  627. error_report("%s: Received unexpected msg type."
  628. "Expected %d received %d", __func__,
  629. VHOST_USER_ADD_MEM_REG,
  630. msg_reply.hdr.request);
  631. return -1;
  632. }
  633. /*
  634. * We're using the same structure, just reusing one of the
  635. * fields, so it should be the same size.
  636. */
  637. if (msg_reply.hdr.size != msg->hdr.size) {
  638. error_report("%s: Unexpected size for postcopy reply "
  639. "%d vs %d", __func__, msg_reply.hdr.size,
  640. msg->hdr.size);
  641. return -1;
  642. }
  643. /* Get the postcopy client base from the backend's reply. */
  644. if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
  645. shadow_pcb[reg_idx] =
  646. msg_reply.payload.mem_reg.region.userspace_addr;
  647. trace_vhost_user_set_mem_table_postcopy(
  648. msg_reply.payload.mem_reg.region.userspace_addr,
  649. msg->payload.mem_reg.region.userspace_addr,
  650. reg_fd_idx, reg_idx);
  651. } else {
  652. error_report("%s: invalid postcopy reply for region. "
  653. "Got guest physical address %" PRIX64 ", expected "
  654. "%" PRIX64, __func__, reply_gpa,
  655. dev->mem->regions[reg_idx].guest_phys_addr);
  656. return -1;
  657. }
  658. } else if (reply_supported) {
  659. ret = process_message_reply(dev, msg);
  660. if (ret) {
  661. return ret;
  662. }
  663. }
  664. } else if (track_ramblocks) {
  665. u->region_rb_offset[reg_idx] = 0;
  666. u->region_rb[reg_idx] = NULL;
  667. }
  668. /*
  669. * At this point, we know the backend has mapped in the new
  670. * region, if the region has a valid file descriptor.
  671. *
  672. * The region should now be added to the shadow table.
  673. */
  674. u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
  675. reg->guest_phys_addr;
  676. u->shadow_regions[u->num_shadow_regions].userspace_addr =
  677. reg->userspace_addr;
  678. u->shadow_regions[u->num_shadow_regions].memory_size =
  679. reg->memory_size;
  680. u->num_shadow_regions++;
  681. }
  682. return 0;
  683. }
  684. static int vhost_user_add_remove_regions(struct vhost_dev *dev,
  685. VhostUserMsg *msg,
  686. bool reply_supported,
  687. bool track_ramblocks)
  688. {
  689. struct vhost_user *u = dev->opaque;
  690. struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
  691. struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
  692. uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
  693. int nr_add_reg, nr_rem_reg;
  694. msg->hdr.size = sizeof(msg->payload.mem_reg.padding) +
  695. sizeof(VhostUserMemoryRegion);
  696. /* Find the regions which need to be removed or added. */
  697. scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
  698. shadow_pcb, track_ramblocks);
  699. if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
  700. reply_supported) < 0)
  701. {
  702. goto err;
  703. }
  704. if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
  705. shadow_pcb, reply_supported, track_ramblocks) < 0)
  706. {
  707. goto err;
  708. }
  709. if (track_ramblocks) {
  710. memcpy(u->postcopy_client_bases, shadow_pcb,
  711. sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
  712. /*
  713. * Now we've registered this with the postcopy code, we ack to the
  714. * client, because now we're in the position to be able to deal with
  715. * any faults it generates.
  716. */
  717. /* TODO: Use this for failure cases as well with a bad value. */
  718. msg->hdr.size = sizeof(msg->payload.u64);
  719. msg->payload.u64 = 0; /* OK */
  720. if (vhost_user_write(dev, msg, NULL, 0) < 0) {
  721. return -1;
  722. }
  723. }
  724. return 0;
  725. err:
  726. if (track_ramblocks) {
  727. memcpy(u->postcopy_client_bases, shadow_pcb,
  728. sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
  729. }
  730. return -1;
  731. }
  732. static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
  733. struct vhost_memory *mem,
  734. bool reply_supported,
  735. bool config_mem_slots)
  736. {
  737. struct vhost_user *u = dev->opaque;
  738. int fds[VHOST_MEMORY_BASELINE_NREGIONS];
  739. size_t fd_num = 0;
  740. VhostUserMsg msg_reply;
  741. int region_i, msg_i;
  742. VhostUserMsg msg = {
  743. .hdr.flags = VHOST_USER_VERSION,
  744. };
  745. if (u->region_rb_len < dev->mem->nregions) {
  746. u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
  747. u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
  748. dev->mem->nregions);
  749. memset(&(u->region_rb[u->region_rb_len]), '\0',
  750. sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
  751. memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
  752. sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
  753. u->region_rb_len = dev->mem->nregions;
  754. }
  755. if (config_mem_slots) {
  756. if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
  757. true) < 0) {
  758. return -1;
  759. }
  760. } else {
  761. if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
  762. true) < 0) {
  763. return -1;
  764. }
  765. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  766. return -1;
  767. }
  768. if (vhost_user_read(dev, &msg_reply) < 0) {
  769. return -1;
  770. }
  771. if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
  772. error_report("%s: Received unexpected msg type."
  773. "Expected %d received %d", __func__,
  774. VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
  775. return -1;
  776. }
  777. /*
  778. * We're using the same structure, just reusing one of the
  779. * fields, so it should be the same size.
  780. */
  781. if (msg_reply.hdr.size != msg.hdr.size) {
  782. error_report("%s: Unexpected size for postcopy reply "
  783. "%d vs %d", __func__, msg_reply.hdr.size,
  784. msg.hdr.size);
  785. return -1;
  786. }
  787. memset(u->postcopy_client_bases, 0,
  788. sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
  789. /*
  790. * They're in the same order as the regions that were sent
  791. * but some of the regions were skipped (above) if they
  792. * didn't have fd's
  793. */
  794. for (msg_i = 0, region_i = 0;
  795. region_i < dev->mem->nregions;
  796. region_i++) {
  797. if (msg_i < fd_num &&
  798. msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
  799. dev->mem->regions[region_i].guest_phys_addr) {
  800. u->postcopy_client_bases[region_i] =
  801. msg_reply.payload.memory.regions[msg_i].userspace_addr;
  802. trace_vhost_user_set_mem_table_postcopy(
  803. msg_reply.payload.memory.regions[msg_i].userspace_addr,
  804. msg.payload.memory.regions[msg_i].userspace_addr,
  805. msg_i, region_i);
  806. msg_i++;
  807. }
  808. }
  809. if (msg_i != fd_num) {
  810. error_report("%s: postcopy reply not fully consumed "
  811. "%d vs %zd",
  812. __func__, msg_i, fd_num);
  813. return -1;
  814. }
  815. /*
  816. * Now we've registered this with the postcopy code, we ack to the
  817. * client, because now we're in the position to be able to deal
  818. * with any faults it generates.
  819. */
  820. /* TODO: Use this for failure cases as well with a bad value. */
  821. msg.hdr.size = sizeof(msg.payload.u64);
  822. msg.payload.u64 = 0; /* OK */
  823. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  824. return -1;
  825. }
  826. }
  827. return 0;
  828. }
  829. static int vhost_user_set_mem_table(struct vhost_dev *dev,
  830. struct vhost_memory *mem)
  831. {
  832. struct vhost_user *u = dev->opaque;
  833. int fds[VHOST_MEMORY_BASELINE_NREGIONS];
  834. size_t fd_num = 0;
  835. bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
  836. bool reply_supported = virtio_has_feature(dev->protocol_features,
  837. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  838. bool config_mem_slots =
  839. virtio_has_feature(dev->protocol_features,
  840. VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
  841. if (do_postcopy) {
  842. /*
  843. * Postcopy has enough differences that it's best done in it's own
  844. * version
  845. */
  846. return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
  847. config_mem_slots);
  848. }
  849. VhostUserMsg msg = {
  850. .hdr.flags = VHOST_USER_VERSION,
  851. };
  852. if (reply_supported) {
  853. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  854. }
  855. if (config_mem_slots) {
  856. if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
  857. false) < 0) {
  858. return -1;
  859. }
  860. } else {
  861. if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
  862. false) < 0) {
  863. return -1;
  864. }
  865. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  866. return -1;
  867. }
  868. if (reply_supported) {
  869. return process_message_reply(dev, &msg);
  870. }
  871. }
  872. return 0;
  873. }
  874. static int vhost_user_set_vring_addr(struct vhost_dev *dev,
  875. struct vhost_vring_addr *addr)
  876. {
  877. VhostUserMsg msg = {
  878. .hdr.request = VHOST_USER_SET_VRING_ADDR,
  879. .hdr.flags = VHOST_USER_VERSION,
  880. .payload.addr = *addr,
  881. .hdr.size = sizeof(msg.payload.addr),
  882. };
  883. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  884. return -1;
  885. }
  886. return 0;
  887. }
  888. static int vhost_user_set_vring_endian(struct vhost_dev *dev,
  889. struct vhost_vring_state *ring)
  890. {
  891. bool cross_endian = virtio_has_feature(dev->protocol_features,
  892. VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
  893. VhostUserMsg msg = {
  894. .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
  895. .hdr.flags = VHOST_USER_VERSION,
  896. .payload.state = *ring,
  897. .hdr.size = sizeof(msg.payload.state),
  898. };
  899. if (!cross_endian) {
  900. error_report("vhost-user trying to send unhandled ioctl");
  901. return -1;
  902. }
  903. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  904. return -1;
  905. }
  906. return 0;
  907. }
  908. static int vhost_set_vring(struct vhost_dev *dev,
  909. unsigned long int request,
  910. struct vhost_vring_state *ring)
  911. {
  912. VhostUserMsg msg = {
  913. .hdr.request = request,
  914. .hdr.flags = VHOST_USER_VERSION,
  915. .payload.state = *ring,
  916. .hdr.size = sizeof(msg.payload.state),
  917. };
  918. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  919. return -1;
  920. }
  921. return 0;
  922. }
  923. static int vhost_user_set_vring_num(struct vhost_dev *dev,
  924. struct vhost_vring_state *ring)
  925. {
  926. return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
  927. }
  928. static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
  929. int queue_idx)
  930. {
  931. struct vhost_user *u = dev->opaque;
  932. VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
  933. VirtIODevice *vdev = dev->vdev;
  934. if (n->addr && !n->set) {
  935. virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
  936. n->set = true;
  937. }
  938. }
  939. static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
  940. int queue_idx)
  941. {
  942. struct vhost_user *u = dev->opaque;
  943. VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
  944. VirtIODevice *vdev = dev->vdev;
  945. if (n->addr && n->set) {
  946. virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
  947. n->set = false;
  948. }
  949. }
  950. static int vhost_user_set_vring_base(struct vhost_dev *dev,
  951. struct vhost_vring_state *ring)
  952. {
  953. vhost_user_host_notifier_restore(dev, ring->index);
  954. return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
  955. }
  956. static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
  957. {
  958. int i;
  959. if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
  960. return -1;
  961. }
  962. for (i = 0; i < dev->nvqs; ++i) {
  963. struct vhost_vring_state state = {
  964. .index = dev->vq_index + i,
  965. .num = enable,
  966. };
  967. vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
  968. }
  969. return 0;
  970. }
  971. static int vhost_user_get_vring_base(struct vhost_dev *dev,
  972. struct vhost_vring_state *ring)
  973. {
  974. VhostUserMsg msg = {
  975. .hdr.request = VHOST_USER_GET_VRING_BASE,
  976. .hdr.flags = VHOST_USER_VERSION,
  977. .payload.state = *ring,
  978. .hdr.size = sizeof(msg.payload.state),
  979. };
  980. vhost_user_host_notifier_remove(dev, ring->index);
  981. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  982. return -1;
  983. }
  984. if (vhost_user_read(dev, &msg) < 0) {
  985. return -1;
  986. }
  987. if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
  988. error_report("Received unexpected msg type. Expected %d received %d",
  989. VHOST_USER_GET_VRING_BASE, msg.hdr.request);
  990. return -1;
  991. }
  992. if (msg.hdr.size != sizeof(msg.payload.state)) {
  993. error_report("Received bad msg size.");
  994. return -1;
  995. }
  996. *ring = msg.payload.state;
  997. return 0;
  998. }
  999. static int vhost_set_vring_file(struct vhost_dev *dev,
  1000. VhostUserRequest request,
  1001. struct vhost_vring_file *file)
  1002. {
  1003. int fds[VHOST_USER_MAX_RAM_SLOTS];
  1004. size_t fd_num = 0;
  1005. VhostUserMsg msg = {
  1006. .hdr.request = request,
  1007. .hdr.flags = VHOST_USER_VERSION,
  1008. .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
  1009. .hdr.size = sizeof(msg.payload.u64),
  1010. };
  1011. if (ioeventfd_enabled() && file->fd > 0) {
  1012. fds[fd_num++] = file->fd;
  1013. } else {
  1014. msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
  1015. }
  1016. if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
  1017. return -1;
  1018. }
  1019. return 0;
  1020. }
  1021. static int vhost_user_set_vring_kick(struct vhost_dev *dev,
  1022. struct vhost_vring_file *file)
  1023. {
  1024. return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
  1025. }
  1026. static int vhost_user_set_vring_call(struct vhost_dev *dev,
  1027. struct vhost_vring_file *file)
  1028. {
  1029. return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
  1030. }
  1031. static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
  1032. {
  1033. VhostUserMsg msg = {
  1034. .hdr.request = request,
  1035. .hdr.flags = VHOST_USER_VERSION,
  1036. .payload.u64 = u64,
  1037. .hdr.size = sizeof(msg.payload.u64),
  1038. };
  1039. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1040. return -1;
  1041. }
  1042. return 0;
  1043. }
  1044. static int vhost_user_set_features(struct vhost_dev *dev,
  1045. uint64_t features)
  1046. {
  1047. return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
  1048. }
  1049. static int vhost_user_set_protocol_features(struct vhost_dev *dev,
  1050. uint64_t features)
  1051. {
  1052. return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
  1053. }
  1054. static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
  1055. {
  1056. VhostUserMsg msg = {
  1057. .hdr.request = request,
  1058. .hdr.flags = VHOST_USER_VERSION,
  1059. };
  1060. if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
  1061. return 0;
  1062. }
  1063. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1064. return -1;
  1065. }
  1066. if (vhost_user_read(dev, &msg) < 0) {
  1067. return -1;
  1068. }
  1069. if (msg.hdr.request != request) {
  1070. error_report("Received unexpected msg type. Expected %d received %d",
  1071. request, msg.hdr.request);
  1072. return -1;
  1073. }
  1074. if (msg.hdr.size != sizeof(msg.payload.u64)) {
  1075. error_report("Received bad msg size.");
  1076. return -1;
  1077. }
  1078. *u64 = msg.payload.u64;
  1079. return 0;
  1080. }
  1081. static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
  1082. {
  1083. return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
  1084. }
  1085. static int vhost_user_set_owner(struct vhost_dev *dev)
  1086. {
  1087. VhostUserMsg msg = {
  1088. .hdr.request = VHOST_USER_SET_OWNER,
  1089. .hdr.flags = VHOST_USER_VERSION,
  1090. };
  1091. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1092. return -1;
  1093. }
  1094. return 0;
  1095. }
  1096. static int vhost_user_get_max_memslots(struct vhost_dev *dev,
  1097. uint64_t *max_memslots)
  1098. {
  1099. uint64_t backend_max_memslots;
  1100. int err;
  1101. err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
  1102. &backend_max_memslots);
  1103. if (err < 0) {
  1104. return err;
  1105. }
  1106. *max_memslots = backend_max_memslots;
  1107. return 0;
  1108. }
  1109. static int vhost_user_reset_device(struct vhost_dev *dev)
  1110. {
  1111. VhostUserMsg msg = {
  1112. .hdr.flags = VHOST_USER_VERSION,
  1113. };
  1114. msg.hdr.request = virtio_has_feature(dev->protocol_features,
  1115. VHOST_USER_PROTOCOL_F_RESET_DEVICE)
  1116. ? VHOST_USER_RESET_DEVICE
  1117. : VHOST_USER_RESET_OWNER;
  1118. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1119. return -1;
  1120. }
  1121. return 0;
  1122. }
  1123. static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
  1124. {
  1125. int ret = -1;
  1126. if (!dev->config_ops) {
  1127. return -1;
  1128. }
  1129. if (dev->config_ops->vhost_dev_config_notifier) {
  1130. ret = dev->config_ops->vhost_dev_config_notifier(dev);
  1131. }
  1132. return ret;
  1133. }
  1134. static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
  1135. VhostUserVringArea *area,
  1136. int fd)
  1137. {
  1138. int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
  1139. size_t page_size = qemu_real_host_page_size;
  1140. struct vhost_user *u = dev->opaque;
  1141. VhostUserState *user = u->user;
  1142. VirtIODevice *vdev = dev->vdev;
  1143. VhostUserHostNotifier *n;
  1144. void *addr;
  1145. char *name;
  1146. if (!virtio_has_feature(dev->protocol_features,
  1147. VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
  1148. vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
  1149. return -1;
  1150. }
  1151. n = &user->notifier[queue_idx];
  1152. if (n->addr) {
  1153. virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
  1154. object_unparent(OBJECT(&n->mr));
  1155. munmap(n->addr, page_size);
  1156. n->addr = NULL;
  1157. }
  1158. if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
  1159. return 0;
  1160. }
  1161. /* Sanity check. */
  1162. if (area->size != page_size) {
  1163. return -1;
  1164. }
  1165. addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
  1166. fd, area->offset);
  1167. if (addr == MAP_FAILED) {
  1168. return -1;
  1169. }
  1170. name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
  1171. user, queue_idx);
  1172. memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
  1173. page_size, addr);
  1174. g_free(name);
  1175. if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
  1176. munmap(addr, page_size);
  1177. return -1;
  1178. }
  1179. n->addr = addr;
  1180. n->set = true;
  1181. return 0;
  1182. }
  1183. static void slave_read(void *opaque)
  1184. {
  1185. struct vhost_dev *dev = opaque;
  1186. struct vhost_user *u = dev->opaque;
  1187. VhostUserHeader hdr = { 0, };
  1188. VhostUserPayload payload = { 0, };
  1189. int size, ret = 0;
  1190. struct iovec iov;
  1191. struct msghdr msgh;
  1192. int fd[VHOST_USER_SLAVE_MAX_FDS];
  1193. char control[CMSG_SPACE(sizeof(fd))];
  1194. struct cmsghdr *cmsg;
  1195. int i, fdsize = 0;
  1196. memset(&msgh, 0, sizeof(msgh));
  1197. msgh.msg_iov = &iov;
  1198. msgh.msg_iovlen = 1;
  1199. msgh.msg_control = control;
  1200. msgh.msg_controllen = sizeof(control);
  1201. memset(fd, -1, sizeof(fd));
  1202. /* Read header */
  1203. iov.iov_base = &hdr;
  1204. iov.iov_len = VHOST_USER_HDR_SIZE;
  1205. do {
  1206. size = recvmsg(u->slave_fd, &msgh, 0);
  1207. } while (size < 0 && (errno == EINTR || errno == EAGAIN));
  1208. if (size != VHOST_USER_HDR_SIZE) {
  1209. error_report("Failed to read from slave.");
  1210. goto err;
  1211. }
  1212. if (msgh.msg_flags & MSG_CTRUNC) {
  1213. error_report("Truncated message.");
  1214. goto err;
  1215. }
  1216. for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
  1217. cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
  1218. if (cmsg->cmsg_level == SOL_SOCKET &&
  1219. cmsg->cmsg_type == SCM_RIGHTS) {
  1220. fdsize = cmsg->cmsg_len - CMSG_LEN(0);
  1221. memcpy(fd, CMSG_DATA(cmsg), fdsize);
  1222. break;
  1223. }
  1224. }
  1225. if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
  1226. error_report("Failed to read msg header."
  1227. " Size %d exceeds the maximum %zu.", hdr.size,
  1228. VHOST_USER_PAYLOAD_SIZE);
  1229. goto err;
  1230. }
  1231. /* Read payload */
  1232. do {
  1233. size = read(u->slave_fd, &payload, hdr.size);
  1234. } while (size < 0 && (errno == EINTR || errno == EAGAIN));
  1235. if (size != hdr.size) {
  1236. error_report("Failed to read payload from slave.");
  1237. goto err;
  1238. }
  1239. switch (hdr.request) {
  1240. case VHOST_USER_SLAVE_IOTLB_MSG:
  1241. ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
  1242. break;
  1243. case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
  1244. ret = vhost_user_slave_handle_config_change(dev);
  1245. break;
  1246. case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
  1247. ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
  1248. fd[0]);
  1249. break;
  1250. default:
  1251. error_report("Received unexpected msg type: %d.", hdr.request);
  1252. ret = -EINVAL;
  1253. }
  1254. /* Close the remaining file descriptors. */
  1255. for (i = 0; i < fdsize; i++) {
  1256. if (fd[i] != -1) {
  1257. close(fd[i]);
  1258. }
  1259. }
  1260. /*
  1261. * REPLY_ACK feature handling. Other reply types has to be managed
  1262. * directly in their request handlers.
  1263. */
  1264. if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
  1265. struct iovec iovec[2];
  1266. hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
  1267. hdr.flags |= VHOST_USER_REPLY_MASK;
  1268. payload.u64 = !!ret;
  1269. hdr.size = sizeof(payload.u64);
  1270. iovec[0].iov_base = &hdr;
  1271. iovec[0].iov_len = VHOST_USER_HDR_SIZE;
  1272. iovec[1].iov_base = &payload;
  1273. iovec[1].iov_len = hdr.size;
  1274. do {
  1275. size = writev(u->slave_fd, iovec, ARRAY_SIZE(iovec));
  1276. } while (size < 0 && (errno == EINTR || errno == EAGAIN));
  1277. if (size != VHOST_USER_HDR_SIZE + hdr.size) {
  1278. error_report("Failed to send msg reply to slave.");
  1279. goto err;
  1280. }
  1281. }
  1282. return;
  1283. err:
  1284. qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
  1285. close(u->slave_fd);
  1286. u->slave_fd = -1;
  1287. for (i = 0; i < fdsize; i++) {
  1288. if (fd[i] != -1) {
  1289. close(fd[i]);
  1290. }
  1291. }
  1292. return;
  1293. }
  1294. static int vhost_setup_slave_channel(struct vhost_dev *dev)
  1295. {
  1296. VhostUserMsg msg = {
  1297. .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
  1298. .hdr.flags = VHOST_USER_VERSION,
  1299. };
  1300. struct vhost_user *u = dev->opaque;
  1301. int sv[2], ret = 0;
  1302. bool reply_supported = virtio_has_feature(dev->protocol_features,
  1303. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  1304. if (!virtio_has_feature(dev->protocol_features,
  1305. VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
  1306. return 0;
  1307. }
  1308. if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
  1309. error_report("socketpair() failed");
  1310. return -1;
  1311. }
  1312. u->slave_fd = sv[0];
  1313. qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev);
  1314. if (reply_supported) {
  1315. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  1316. }
  1317. ret = vhost_user_write(dev, &msg, &sv[1], 1);
  1318. if (ret) {
  1319. goto out;
  1320. }
  1321. if (reply_supported) {
  1322. ret = process_message_reply(dev, &msg);
  1323. }
  1324. out:
  1325. close(sv[1]);
  1326. if (ret) {
  1327. qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
  1328. close(u->slave_fd);
  1329. u->slave_fd = -1;
  1330. }
  1331. return ret;
  1332. }
  1333. #ifdef CONFIG_LINUX
  1334. /*
  1335. * Called back from the postcopy fault thread when a fault is received on our
  1336. * ufd.
  1337. * TODO: This is Linux specific
  1338. */
  1339. static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
  1340. void *ufd)
  1341. {
  1342. struct vhost_dev *dev = pcfd->data;
  1343. struct vhost_user *u = dev->opaque;
  1344. struct uffd_msg *msg = ufd;
  1345. uint64_t faultaddr = msg->arg.pagefault.address;
  1346. RAMBlock *rb = NULL;
  1347. uint64_t rb_offset;
  1348. int i;
  1349. trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
  1350. dev->mem->nregions);
  1351. for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
  1352. trace_vhost_user_postcopy_fault_handler_loop(i,
  1353. u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
  1354. if (faultaddr >= u->postcopy_client_bases[i]) {
  1355. /* Ofset of the fault address in the vhost region */
  1356. uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
  1357. if (region_offset < dev->mem->regions[i].memory_size) {
  1358. rb_offset = region_offset + u->region_rb_offset[i];
  1359. trace_vhost_user_postcopy_fault_handler_found(i,
  1360. region_offset, rb_offset);
  1361. rb = u->region_rb[i];
  1362. return postcopy_request_shared_page(pcfd, rb, faultaddr,
  1363. rb_offset);
  1364. }
  1365. }
  1366. }
  1367. error_report("%s: Failed to find region for fault %" PRIx64,
  1368. __func__, faultaddr);
  1369. return -1;
  1370. }
  1371. static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
  1372. uint64_t offset)
  1373. {
  1374. struct vhost_dev *dev = pcfd->data;
  1375. struct vhost_user *u = dev->opaque;
  1376. int i;
  1377. trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
  1378. if (!u) {
  1379. return 0;
  1380. }
  1381. /* Translate the offset into an address in the clients address space */
  1382. for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
  1383. if (u->region_rb[i] == rb &&
  1384. offset >= u->region_rb_offset[i] &&
  1385. offset < (u->region_rb_offset[i] +
  1386. dev->mem->regions[i].memory_size)) {
  1387. uint64_t client_addr = (offset - u->region_rb_offset[i]) +
  1388. u->postcopy_client_bases[i];
  1389. trace_vhost_user_postcopy_waker_found(client_addr);
  1390. return postcopy_wake_shared(pcfd, client_addr, rb);
  1391. }
  1392. }
  1393. trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
  1394. return 0;
  1395. }
  1396. #endif
  1397. /*
  1398. * Called at the start of an inbound postcopy on reception of the
  1399. * 'advise' command.
  1400. */
  1401. static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
  1402. {
  1403. #ifdef CONFIG_LINUX
  1404. struct vhost_user *u = dev->opaque;
  1405. CharBackend *chr = u->user->chr;
  1406. int ufd;
  1407. VhostUserMsg msg = {
  1408. .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
  1409. .hdr.flags = VHOST_USER_VERSION,
  1410. };
  1411. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1412. error_setg(errp, "Failed to send postcopy_advise to vhost");
  1413. return -1;
  1414. }
  1415. if (vhost_user_read(dev, &msg) < 0) {
  1416. error_setg(errp, "Failed to get postcopy_advise reply from vhost");
  1417. return -1;
  1418. }
  1419. if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
  1420. error_setg(errp, "Unexpected msg type. Expected %d received %d",
  1421. VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
  1422. return -1;
  1423. }
  1424. if (msg.hdr.size) {
  1425. error_setg(errp, "Received bad msg size.");
  1426. return -1;
  1427. }
  1428. ufd = qemu_chr_fe_get_msgfd(chr);
  1429. if (ufd < 0) {
  1430. error_setg(errp, "%s: Failed to get ufd", __func__);
  1431. return -1;
  1432. }
  1433. qemu_set_nonblock(ufd);
  1434. /* register ufd with userfault thread */
  1435. u->postcopy_fd.fd = ufd;
  1436. u->postcopy_fd.data = dev;
  1437. u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
  1438. u->postcopy_fd.waker = vhost_user_postcopy_waker;
  1439. u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
  1440. postcopy_register_shared_ufd(&u->postcopy_fd);
  1441. return 0;
  1442. #else
  1443. error_setg(errp, "Postcopy not supported on non-Linux systems");
  1444. return -1;
  1445. #endif
  1446. }
  1447. /*
  1448. * Called at the switch to postcopy on reception of the 'listen' command.
  1449. */
  1450. static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
  1451. {
  1452. struct vhost_user *u = dev->opaque;
  1453. int ret;
  1454. VhostUserMsg msg = {
  1455. .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
  1456. .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
  1457. };
  1458. u->postcopy_listen = true;
  1459. trace_vhost_user_postcopy_listen();
  1460. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1461. error_setg(errp, "Failed to send postcopy_listen to vhost");
  1462. return -1;
  1463. }
  1464. ret = process_message_reply(dev, &msg);
  1465. if (ret) {
  1466. error_setg(errp, "Failed to receive reply to postcopy_listen");
  1467. return ret;
  1468. }
  1469. return 0;
  1470. }
  1471. /*
  1472. * Called at the end of postcopy
  1473. */
  1474. static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
  1475. {
  1476. VhostUserMsg msg = {
  1477. .hdr.request = VHOST_USER_POSTCOPY_END,
  1478. .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
  1479. };
  1480. int ret;
  1481. struct vhost_user *u = dev->opaque;
  1482. trace_vhost_user_postcopy_end_entry();
  1483. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1484. error_setg(errp, "Failed to send postcopy_end to vhost");
  1485. return -1;
  1486. }
  1487. ret = process_message_reply(dev, &msg);
  1488. if (ret) {
  1489. error_setg(errp, "Failed to receive reply to postcopy_end");
  1490. return ret;
  1491. }
  1492. postcopy_unregister_shared_ufd(&u->postcopy_fd);
  1493. close(u->postcopy_fd.fd);
  1494. u->postcopy_fd.handler = NULL;
  1495. trace_vhost_user_postcopy_end_exit();
  1496. return 0;
  1497. }
  1498. static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
  1499. void *opaque)
  1500. {
  1501. struct PostcopyNotifyData *pnd = opaque;
  1502. struct vhost_user *u = container_of(notifier, struct vhost_user,
  1503. postcopy_notifier);
  1504. struct vhost_dev *dev = u->dev;
  1505. switch (pnd->reason) {
  1506. case POSTCOPY_NOTIFY_PROBE:
  1507. if (!virtio_has_feature(dev->protocol_features,
  1508. VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
  1509. /* TODO: Get the device name into this error somehow */
  1510. error_setg(pnd->errp,
  1511. "vhost-user backend not capable of postcopy");
  1512. return -ENOENT;
  1513. }
  1514. break;
  1515. case POSTCOPY_NOTIFY_INBOUND_ADVISE:
  1516. return vhost_user_postcopy_advise(dev, pnd->errp);
  1517. case POSTCOPY_NOTIFY_INBOUND_LISTEN:
  1518. return vhost_user_postcopy_listen(dev, pnd->errp);
  1519. case POSTCOPY_NOTIFY_INBOUND_END:
  1520. return vhost_user_postcopy_end(dev, pnd->errp);
  1521. default:
  1522. /* We ignore notifications we don't know */
  1523. break;
  1524. }
  1525. return 0;
  1526. }
  1527. static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque)
  1528. {
  1529. uint64_t features, protocol_features, ram_slots;
  1530. struct vhost_user *u;
  1531. int err;
  1532. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1533. u = g_new0(struct vhost_user, 1);
  1534. u->user = opaque;
  1535. u->slave_fd = -1;
  1536. u->dev = dev;
  1537. dev->opaque = u;
  1538. err = vhost_user_get_features(dev, &features);
  1539. if (err < 0) {
  1540. return err;
  1541. }
  1542. if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
  1543. dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
  1544. err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
  1545. &protocol_features);
  1546. if (err < 0) {
  1547. return err;
  1548. }
  1549. dev->protocol_features =
  1550. protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
  1551. if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
  1552. /* Don't acknowledge CONFIG feature if device doesn't support it */
  1553. dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
  1554. } else if (!(protocol_features &
  1555. (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
  1556. error_report("Device expects VHOST_USER_PROTOCOL_F_CONFIG "
  1557. "but backend does not support it.");
  1558. return -1;
  1559. }
  1560. err = vhost_user_set_protocol_features(dev, dev->protocol_features);
  1561. if (err < 0) {
  1562. return err;
  1563. }
  1564. /* query the max queues we support if backend supports Multiple Queue */
  1565. if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
  1566. err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
  1567. &dev->max_queues);
  1568. if (err < 0) {
  1569. return err;
  1570. }
  1571. }
  1572. if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
  1573. !(virtio_has_feature(dev->protocol_features,
  1574. VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
  1575. virtio_has_feature(dev->protocol_features,
  1576. VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
  1577. error_report("IOMMU support requires reply-ack and "
  1578. "slave-req protocol features.");
  1579. return -1;
  1580. }
  1581. /* get max memory regions if backend supports configurable RAM slots */
  1582. if (!virtio_has_feature(dev->protocol_features,
  1583. VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
  1584. u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
  1585. } else {
  1586. err = vhost_user_get_max_memslots(dev, &ram_slots);
  1587. if (err < 0) {
  1588. return err;
  1589. }
  1590. if (ram_slots < u->user->memory_slots) {
  1591. error_report("The backend specified a max ram slots limit "
  1592. "of %" PRIu64", when the prior validated limit was %d. "
  1593. "This limit should never decrease.", ram_slots,
  1594. u->user->memory_slots);
  1595. return -1;
  1596. }
  1597. u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
  1598. }
  1599. }
  1600. if (dev->migration_blocker == NULL &&
  1601. !virtio_has_feature(dev->protocol_features,
  1602. VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
  1603. error_setg(&dev->migration_blocker,
  1604. "Migration disabled: vhost-user backend lacks "
  1605. "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
  1606. }
  1607. if (dev->vq_index == 0) {
  1608. err = vhost_setup_slave_channel(dev);
  1609. if (err < 0) {
  1610. return err;
  1611. }
  1612. }
  1613. u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
  1614. postcopy_add_notifier(&u->postcopy_notifier);
  1615. return 0;
  1616. }
  1617. static int vhost_user_backend_cleanup(struct vhost_dev *dev)
  1618. {
  1619. struct vhost_user *u;
  1620. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1621. u = dev->opaque;
  1622. if (u->postcopy_notifier.notify) {
  1623. postcopy_remove_notifier(&u->postcopy_notifier);
  1624. u->postcopy_notifier.notify = NULL;
  1625. }
  1626. u->postcopy_listen = false;
  1627. if (u->postcopy_fd.handler) {
  1628. postcopy_unregister_shared_ufd(&u->postcopy_fd);
  1629. close(u->postcopy_fd.fd);
  1630. u->postcopy_fd.handler = NULL;
  1631. }
  1632. if (u->slave_fd >= 0) {
  1633. qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
  1634. close(u->slave_fd);
  1635. u->slave_fd = -1;
  1636. }
  1637. g_free(u->region_rb);
  1638. u->region_rb = NULL;
  1639. g_free(u->region_rb_offset);
  1640. u->region_rb_offset = NULL;
  1641. u->region_rb_len = 0;
  1642. g_free(u);
  1643. dev->opaque = 0;
  1644. return 0;
  1645. }
  1646. static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
  1647. {
  1648. assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
  1649. return idx;
  1650. }
  1651. static int vhost_user_memslots_limit(struct vhost_dev *dev)
  1652. {
  1653. struct vhost_user *u = dev->opaque;
  1654. return u->user->memory_slots;
  1655. }
  1656. static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
  1657. {
  1658. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1659. return virtio_has_feature(dev->protocol_features,
  1660. VHOST_USER_PROTOCOL_F_LOG_SHMFD);
  1661. }
  1662. static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
  1663. {
  1664. VhostUserMsg msg = { };
  1665. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1666. /* If guest supports GUEST_ANNOUNCE do nothing */
  1667. if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
  1668. return 0;
  1669. }
  1670. /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
  1671. if (virtio_has_feature(dev->protocol_features,
  1672. VHOST_USER_PROTOCOL_F_RARP)) {
  1673. msg.hdr.request = VHOST_USER_SEND_RARP;
  1674. msg.hdr.flags = VHOST_USER_VERSION;
  1675. memcpy((char *)&msg.payload.u64, mac_addr, 6);
  1676. msg.hdr.size = sizeof(msg.payload.u64);
  1677. return vhost_user_write(dev, &msg, NULL, 0);
  1678. }
  1679. return -1;
  1680. }
  1681. static bool vhost_user_can_merge(struct vhost_dev *dev,
  1682. uint64_t start1, uint64_t size1,
  1683. uint64_t start2, uint64_t size2)
  1684. {
  1685. ram_addr_t offset;
  1686. int mfd, rfd;
  1687. (void)vhost_user_get_mr_data(start1, &offset, &mfd);
  1688. (void)vhost_user_get_mr_data(start2, &offset, &rfd);
  1689. return mfd == rfd;
  1690. }
  1691. static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
  1692. {
  1693. VhostUserMsg msg;
  1694. bool reply_supported = virtio_has_feature(dev->protocol_features,
  1695. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  1696. if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
  1697. return 0;
  1698. }
  1699. msg.hdr.request = VHOST_USER_NET_SET_MTU;
  1700. msg.payload.u64 = mtu;
  1701. msg.hdr.size = sizeof(msg.payload.u64);
  1702. msg.hdr.flags = VHOST_USER_VERSION;
  1703. if (reply_supported) {
  1704. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  1705. }
  1706. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1707. return -1;
  1708. }
  1709. /* If reply_ack supported, slave has to ack specified MTU is valid */
  1710. if (reply_supported) {
  1711. return process_message_reply(dev, &msg);
  1712. }
  1713. return 0;
  1714. }
  1715. static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
  1716. struct vhost_iotlb_msg *imsg)
  1717. {
  1718. VhostUserMsg msg = {
  1719. .hdr.request = VHOST_USER_IOTLB_MSG,
  1720. .hdr.size = sizeof(msg.payload.iotlb),
  1721. .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
  1722. .payload.iotlb = *imsg,
  1723. };
  1724. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1725. return -EFAULT;
  1726. }
  1727. return process_message_reply(dev, &msg);
  1728. }
  1729. static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
  1730. {
  1731. /* No-op as the receive channel is not dedicated to IOTLB messages. */
  1732. }
  1733. static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
  1734. uint32_t config_len)
  1735. {
  1736. VhostUserMsg msg = {
  1737. .hdr.request = VHOST_USER_GET_CONFIG,
  1738. .hdr.flags = VHOST_USER_VERSION,
  1739. .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
  1740. };
  1741. if (!virtio_has_feature(dev->protocol_features,
  1742. VHOST_USER_PROTOCOL_F_CONFIG)) {
  1743. return -1;
  1744. }
  1745. if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
  1746. return -1;
  1747. }
  1748. msg.payload.config.offset = 0;
  1749. msg.payload.config.size = config_len;
  1750. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1751. return -1;
  1752. }
  1753. if (vhost_user_read(dev, &msg) < 0) {
  1754. return -1;
  1755. }
  1756. if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
  1757. error_report("Received unexpected msg type. Expected %d received %d",
  1758. VHOST_USER_GET_CONFIG, msg.hdr.request);
  1759. return -1;
  1760. }
  1761. if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
  1762. error_report("Received bad msg size.");
  1763. return -1;
  1764. }
  1765. memcpy(config, msg.payload.config.region, config_len);
  1766. return 0;
  1767. }
  1768. static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
  1769. uint32_t offset, uint32_t size, uint32_t flags)
  1770. {
  1771. uint8_t *p;
  1772. bool reply_supported = virtio_has_feature(dev->protocol_features,
  1773. VHOST_USER_PROTOCOL_F_REPLY_ACK);
  1774. VhostUserMsg msg = {
  1775. .hdr.request = VHOST_USER_SET_CONFIG,
  1776. .hdr.flags = VHOST_USER_VERSION,
  1777. .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
  1778. };
  1779. if (!virtio_has_feature(dev->protocol_features,
  1780. VHOST_USER_PROTOCOL_F_CONFIG)) {
  1781. return -1;
  1782. }
  1783. if (reply_supported) {
  1784. msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
  1785. }
  1786. if (size > VHOST_USER_MAX_CONFIG_SIZE) {
  1787. return -1;
  1788. }
  1789. msg.payload.config.offset = offset,
  1790. msg.payload.config.size = size,
  1791. msg.payload.config.flags = flags,
  1792. p = msg.payload.config.region;
  1793. memcpy(p, data, size);
  1794. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1795. return -1;
  1796. }
  1797. if (reply_supported) {
  1798. return process_message_reply(dev, &msg);
  1799. }
  1800. return 0;
  1801. }
  1802. static int vhost_user_crypto_create_session(struct vhost_dev *dev,
  1803. void *session_info,
  1804. uint64_t *session_id)
  1805. {
  1806. bool crypto_session = virtio_has_feature(dev->protocol_features,
  1807. VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
  1808. CryptoDevBackendSymSessionInfo *sess_info = session_info;
  1809. VhostUserMsg msg = {
  1810. .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
  1811. .hdr.flags = VHOST_USER_VERSION,
  1812. .hdr.size = sizeof(msg.payload.session),
  1813. };
  1814. assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
  1815. if (!crypto_session) {
  1816. error_report("vhost-user trying to send unhandled ioctl");
  1817. return -1;
  1818. }
  1819. memcpy(&msg.payload.session.session_setup_data, sess_info,
  1820. sizeof(CryptoDevBackendSymSessionInfo));
  1821. if (sess_info->key_len) {
  1822. memcpy(&msg.payload.session.key, sess_info->cipher_key,
  1823. sess_info->key_len);
  1824. }
  1825. if (sess_info->auth_key_len > 0) {
  1826. memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
  1827. sess_info->auth_key_len);
  1828. }
  1829. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1830. error_report("vhost_user_write() return -1, create session failed");
  1831. return -1;
  1832. }
  1833. if (vhost_user_read(dev, &msg) < 0) {
  1834. error_report("vhost_user_read() return -1, create session failed");
  1835. return -1;
  1836. }
  1837. if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
  1838. error_report("Received unexpected msg type. Expected %d received %d",
  1839. VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
  1840. return -1;
  1841. }
  1842. if (msg.hdr.size != sizeof(msg.payload.session)) {
  1843. error_report("Received bad msg size.");
  1844. return -1;
  1845. }
  1846. if (msg.payload.session.session_id < 0) {
  1847. error_report("Bad session id: %" PRId64 "",
  1848. msg.payload.session.session_id);
  1849. return -1;
  1850. }
  1851. *session_id = msg.payload.session.session_id;
  1852. return 0;
  1853. }
  1854. static int
  1855. vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
  1856. {
  1857. bool crypto_session = virtio_has_feature(dev->protocol_features,
  1858. VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
  1859. VhostUserMsg msg = {
  1860. .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
  1861. .hdr.flags = VHOST_USER_VERSION,
  1862. .hdr.size = sizeof(msg.payload.u64),
  1863. };
  1864. msg.payload.u64 = session_id;
  1865. if (!crypto_session) {
  1866. error_report("vhost-user trying to send unhandled ioctl");
  1867. return -1;
  1868. }
  1869. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1870. error_report("vhost_user_write() return -1, close session failed");
  1871. return -1;
  1872. }
  1873. return 0;
  1874. }
  1875. static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
  1876. MemoryRegionSection *section)
  1877. {
  1878. bool result;
  1879. result = memory_region_get_fd(section->mr) >= 0;
  1880. return result;
  1881. }
  1882. static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
  1883. uint16_t queue_size,
  1884. struct vhost_inflight *inflight)
  1885. {
  1886. void *addr;
  1887. int fd;
  1888. struct vhost_user *u = dev->opaque;
  1889. CharBackend *chr = u->user->chr;
  1890. VhostUserMsg msg = {
  1891. .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
  1892. .hdr.flags = VHOST_USER_VERSION,
  1893. .payload.inflight.num_queues = dev->nvqs,
  1894. .payload.inflight.queue_size = queue_size,
  1895. .hdr.size = sizeof(msg.payload.inflight),
  1896. };
  1897. if (!virtio_has_feature(dev->protocol_features,
  1898. VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
  1899. return 0;
  1900. }
  1901. if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
  1902. return -1;
  1903. }
  1904. if (vhost_user_read(dev, &msg) < 0) {
  1905. return -1;
  1906. }
  1907. if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
  1908. error_report("Received unexpected msg type. "
  1909. "Expected %d received %d",
  1910. VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
  1911. return -1;
  1912. }
  1913. if (msg.hdr.size != sizeof(msg.payload.inflight)) {
  1914. error_report("Received bad msg size.");
  1915. return -1;
  1916. }
  1917. if (!msg.payload.inflight.mmap_size) {
  1918. return 0;
  1919. }
  1920. fd = qemu_chr_fe_get_msgfd(chr);
  1921. if (fd < 0) {
  1922. error_report("Failed to get mem fd");
  1923. return -1;
  1924. }
  1925. addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
  1926. MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
  1927. if (addr == MAP_FAILED) {
  1928. error_report("Failed to mmap mem fd");
  1929. close(fd);
  1930. return -1;
  1931. }
  1932. inflight->addr = addr;
  1933. inflight->fd = fd;
  1934. inflight->size = msg.payload.inflight.mmap_size;
  1935. inflight->offset = msg.payload.inflight.mmap_offset;
  1936. inflight->queue_size = queue_size;
  1937. return 0;
  1938. }
  1939. static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
  1940. struct vhost_inflight *inflight)
  1941. {
  1942. VhostUserMsg msg = {
  1943. .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
  1944. .hdr.flags = VHOST_USER_VERSION,
  1945. .payload.inflight.mmap_size = inflight->size,
  1946. .payload.inflight.mmap_offset = inflight->offset,
  1947. .payload.inflight.num_queues = dev->nvqs,
  1948. .payload.inflight.queue_size = inflight->queue_size,
  1949. .hdr.size = sizeof(msg.payload.inflight),
  1950. };
  1951. if (!virtio_has_feature(dev->protocol_features,
  1952. VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
  1953. return 0;
  1954. }
  1955. if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
  1956. return -1;
  1957. }
  1958. return 0;
  1959. }
  1960. bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
  1961. {
  1962. if (user->chr) {
  1963. error_setg(errp, "Cannot initialize vhost-user state");
  1964. return false;
  1965. }
  1966. user->chr = chr;
  1967. user->memory_slots = 0;
  1968. return true;
  1969. }
  1970. void vhost_user_cleanup(VhostUserState *user)
  1971. {
  1972. int i;
  1973. if (!user->chr) {
  1974. return;
  1975. }
  1976. for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
  1977. if (user->notifier[i].addr) {
  1978. object_unparent(OBJECT(&user->notifier[i].mr));
  1979. munmap(user->notifier[i].addr, qemu_real_host_page_size);
  1980. user->notifier[i].addr = NULL;
  1981. }
  1982. }
  1983. user->chr = NULL;
  1984. }
  1985. const VhostOps user_ops = {
  1986. .backend_type = VHOST_BACKEND_TYPE_USER,
  1987. .vhost_backend_init = vhost_user_backend_init,
  1988. .vhost_backend_cleanup = vhost_user_backend_cleanup,
  1989. .vhost_backend_memslots_limit = vhost_user_memslots_limit,
  1990. .vhost_set_log_base = vhost_user_set_log_base,
  1991. .vhost_set_mem_table = vhost_user_set_mem_table,
  1992. .vhost_set_vring_addr = vhost_user_set_vring_addr,
  1993. .vhost_set_vring_endian = vhost_user_set_vring_endian,
  1994. .vhost_set_vring_num = vhost_user_set_vring_num,
  1995. .vhost_set_vring_base = vhost_user_set_vring_base,
  1996. .vhost_get_vring_base = vhost_user_get_vring_base,
  1997. .vhost_set_vring_kick = vhost_user_set_vring_kick,
  1998. .vhost_set_vring_call = vhost_user_set_vring_call,
  1999. .vhost_set_features = vhost_user_set_features,
  2000. .vhost_get_features = vhost_user_get_features,
  2001. .vhost_set_owner = vhost_user_set_owner,
  2002. .vhost_reset_device = vhost_user_reset_device,
  2003. .vhost_get_vq_index = vhost_user_get_vq_index,
  2004. .vhost_set_vring_enable = vhost_user_set_vring_enable,
  2005. .vhost_requires_shm_log = vhost_user_requires_shm_log,
  2006. .vhost_migration_done = vhost_user_migration_done,
  2007. .vhost_backend_can_merge = vhost_user_can_merge,
  2008. .vhost_net_set_mtu = vhost_user_net_set_mtu,
  2009. .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
  2010. .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
  2011. .vhost_get_config = vhost_user_get_config,
  2012. .vhost_set_config = vhost_user_set_config,
  2013. .vhost_crypto_create_session = vhost_user_crypto_create_session,
  2014. .vhost_crypto_close_session = vhost_user_crypto_close_session,
  2015. .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
  2016. .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
  2017. .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
  2018. };