vmnet-common.m 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /*
  2. * vmnet-common.m - network client wrapper for Apple vmnet.framework
  3. *
  4. * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
  5. * Copyright(c) 2021 Phillip Tennen <phillip@axleos.com>
  6. *
  7. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  8. * See the COPYING file in the top-level directory.
  9. *
  10. */
  11. #include "qemu/osdep.h"
  12. #include "qemu/main-loop.h"
  13. #include "qemu/log.h"
  14. #include "qapi/qapi-types-net.h"
  15. #include "vmnet_int.h"
  16. #include "clients.h"
  17. #include "qemu/error-report.h"
  18. #include "qapi/error.h"
  19. #include "system/runstate.h"
  20. #include "net/eth.h"
  21. #include <vmnet/vmnet.h>
  22. #include <dispatch/dispatch.h>
  23. static void vmnet_send_completed(NetClientState *nc, ssize_t len);
  24. const char *vmnet_status_map_str(vmnet_return_t status)
  25. {
  26. switch (status) {
  27. case VMNET_SUCCESS:
  28. return "success";
  29. case VMNET_FAILURE:
  30. return "general failure (possibly not enough privileges)";
  31. case VMNET_MEM_FAILURE:
  32. return "memory allocation failure";
  33. case VMNET_INVALID_ARGUMENT:
  34. return "invalid argument specified";
  35. case VMNET_SETUP_INCOMPLETE:
  36. return "interface setup is not complete";
  37. case VMNET_INVALID_ACCESS:
  38. return "invalid access, permission denied";
  39. case VMNET_PACKET_TOO_BIG:
  40. return "packet size is larger than MTU";
  41. case VMNET_BUFFER_EXHAUSTED:
  42. return "buffers exhausted in kernel";
  43. case VMNET_TOO_MANY_PACKETS:
  44. return "packet count exceeds limit";
  45. case VMNET_SHARING_SERVICE_BUSY:
  46. return "conflict, sharing service is in use";
  47. default:
  48. return "unknown vmnet error";
  49. }
  50. }
  51. /**
  52. * Write packets from QEMU to vmnet interface.
  53. *
  54. * vmnet.framework supports iov, but writing more than
  55. * one iov into vmnet interface fails with
  56. * 'VMNET_INVALID_ARGUMENT'. Collecting provided iovs into
  57. * one and passing it to vmnet works fine. That's the
  58. * reason why receive_iov() left unimplemented. But it still
  59. * works with good performance having .receive() only.
  60. */
  61. ssize_t vmnet_receive_common(NetClientState *nc,
  62. const uint8_t *buf,
  63. size_t size)
  64. {
  65. VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
  66. struct vmpktdesc packet;
  67. struct iovec iov;
  68. int pkt_cnt;
  69. vmnet_return_t if_status;
  70. if (size > s->max_packet_size) {
  71. warn_report("vmnet: packet is too big, %zu > %" PRIu64,
  72. packet.vm_pkt_size,
  73. s->max_packet_size);
  74. return -1;
  75. }
  76. iov.iov_base = (char *) buf;
  77. iov.iov_len = size;
  78. packet.vm_pkt_iovcnt = 1;
  79. packet.vm_flags = 0;
  80. packet.vm_pkt_size = size;
  81. packet.vm_pkt_iov = &iov;
  82. pkt_cnt = 1;
  83. if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt);
  84. if (if_status != VMNET_SUCCESS) {
  85. error_report("vmnet: write error: %s",
  86. vmnet_status_map_str(if_status));
  87. return -1;
  88. }
  89. if (pkt_cnt) {
  90. return size;
  91. }
  92. return 0;
  93. }
  94. /**
  95. * Read packets from vmnet interface and write them
  96. * to temporary buffers in VmnetState.
  97. *
  98. * Returns read packets number (may be 0) on success,
  99. * -1 on error
  100. */
  101. static int vmnet_read_packets(VmnetState *s)
  102. {
  103. assert(s->packets_send_current_pos == s->packets_send_end_pos);
  104. struct vmpktdesc *packets = s->packets_buf;
  105. vmnet_return_t status;
  106. int i;
  107. /* Read as many packets as present */
  108. s->packets_send_current_pos = 0;
  109. s->packets_send_end_pos = VMNET_PACKETS_LIMIT;
  110. for (i = 0; i < s->packets_send_end_pos; ++i) {
  111. packets[i].vm_pkt_size = s->max_packet_size;
  112. packets[i].vm_pkt_iovcnt = 1;
  113. packets[i].vm_flags = 0;
  114. }
  115. status = vmnet_read(s->vmnet_if, packets, &s->packets_send_end_pos);
  116. if (status != VMNET_SUCCESS) {
  117. error_printf("vmnet: read failed: %s\n",
  118. vmnet_status_map_str(status));
  119. s->packets_send_current_pos = 0;
  120. s->packets_send_end_pos = 0;
  121. return -1;
  122. }
  123. return s->packets_send_end_pos;
  124. }
  125. /**
  126. * Write packets from temporary buffers in VmnetState
  127. * to QEMU.
  128. */
  129. static void vmnet_write_packets_to_qemu(VmnetState *s)
  130. {
  131. uint8_t *pkt;
  132. size_t pktsz;
  133. uint8_t min_pkt[ETH_ZLEN];
  134. size_t min_pktsz;
  135. ssize_t size;
  136. while (s->packets_send_current_pos < s->packets_send_end_pos) {
  137. pkt = s->iov_buf[s->packets_send_current_pos].iov_base;
  138. pktsz = s->packets_buf[s->packets_send_current_pos].vm_pkt_size;
  139. if (net_peer_needs_padding(&s->nc)) {
  140. min_pktsz = sizeof(min_pkt);
  141. if (eth_pad_short_frame(min_pkt, &min_pktsz, pkt, pktsz)) {
  142. pkt = min_pkt;
  143. pktsz = min_pktsz;
  144. }
  145. }
  146. size = qemu_send_packet_async(&s->nc, pkt, pktsz,
  147. vmnet_send_completed);
  148. if (size == 0) {
  149. /* QEMU is not ready to consume more packets -
  150. * stop and wait for completion callback call */
  151. return;
  152. }
  153. ++s->packets_send_current_pos;
  154. }
  155. }
  156. /**
  157. * Bottom half callback that transfers packets from vmnet interface
  158. * to QEMU.
  159. *
  160. * The process of transferring packets is three-staged:
  161. * 1. Handle vmnet event;
  162. * 2. Read packets from vmnet interface into temporary buffer;
  163. * 3. Write packets from temporary buffer to QEMU.
  164. *
  165. * QEMU may suspend this process on the last stage, returning 0 from
  166. * qemu_send_packet_async function. If this happens, we should
  167. * respectfully wait until it is ready to consume more packets,
  168. * write left ones in temporary buffer and only after this
  169. * continue reading more packets from vmnet interface.
  170. *
  171. * Packets to be transferred are stored into packets_buf,
  172. * in the window [packets_send_current_pos..packets_send_end_pos)
  173. * including current_pos, excluding end_pos.
  174. *
  175. * Thus, if QEMU is not ready, buffer is not read and
  176. * packets_send_current_pos < packets_send_end_pos.
  177. */
  178. static void vmnet_send_bh(void *opaque)
  179. {
  180. NetClientState *nc = (NetClientState *) opaque;
  181. VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
  182. /*
  183. * Do nothing if QEMU is not ready - wait
  184. * for completion callback invocation
  185. */
  186. if (s->packets_send_current_pos < s->packets_send_end_pos) {
  187. return;
  188. }
  189. /* Read packets from vmnet interface */
  190. if (vmnet_read_packets(s) > 0) {
  191. /* Send them to QEMU */
  192. vmnet_write_packets_to_qemu(s);
  193. }
  194. }
  195. /**
  196. * Completion callback to be invoked by QEMU when it becomes
  197. * ready to consume more packets.
  198. */
  199. static void vmnet_send_completed(NetClientState *nc, ssize_t len)
  200. {
  201. VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
  202. /* Callback is invoked eq queued packet is sent */
  203. ++s->packets_send_current_pos;
  204. /* Complete sending packets left in VmnetState buffers */
  205. vmnet_write_packets_to_qemu(s);
  206. /* And read new ones from vmnet if VmnetState buffer is ready */
  207. if (s->packets_send_current_pos < s->packets_send_end_pos) {
  208. qemu_bh_schedule(s->send_bh);
  209. }
  210. }
  211. static void vmnet_bufs_init(VmnetState *s)
  212. {
  213. struct vmpktdesc *packets = s->packets_buf;
  214. struct iovec *iov = s->iov_buf;
  215. int i;
  216. for (i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
  217. iov[i].iov_len = s->max_packet_size;
  218. iov[i].iov_base = g_malloc0(iov[i].iov_len);
  219. packets[i].vm_pkt_iov = iov + i;
  220. }
  221. }
  222. /**
  223. * Called on state change to un-register/re-register handlers
  224. */
  225. static void vmnet_vm_state_change_cb(void *opaque, bool running, RunState state)
  226. {
  227. VmnetState *s = opaque;
  228. if (running) {
  229. vmnet_interface_set_event_callback(
  230. s->vmnet_if,
  231. VMNET_INTERFACE_PACKETS_AVAILABLE,
  232. s->if_queue,
  233. ^(interface_event_t event_id, xpc_object_t event) {
  234. assert(event_id == VMNET_INTERFACE_PACKETS_AVAILABLE);
  235. /*
  236. * This function is being called from a non qemu thread, so
  237. * we only schedule a BH, and do the rest of the io completion
  238. * handling from vmnet_send_bh() which runs in a qemu context.
  239. */
  240. qemu_bh_schedule(s->send_bh);
  241. });
  242. } else {
  243. vmnet_interface_set_event_callback(
  244. s->vmnet_if,
  245. VMNET_INTERFACE_PACKETS_AVAILABLE,
  246. NULL,
  247. NULL);
  248. }
  249. }
  250. int vmnet_if_create(NetClientState *nc,
  251. xpc_object_t if_desc,
  252. Error **errp)
  253. {
  254. VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
  255. dispatch_semaphore_t if_created_sem = dispatch_semaphore_create(0);
  256. __block vmnet_return_t if_status;
  257. s->if_queue = dispatch_queue_create(
  258. "org.qemu.vmnet.if_queue",
  259. DISPATCH_QUEUE_SERIAL
  260. );
  261. xpc_dictionary_set_bool(
  262. if_desc,
  263. vmnet_allocate_mac_address_key,
  264. false
  265. );
  266. #ifdef DEBUG
  267. qemu_log("vmnet.start.interface_desc:\n");
  268. xpc_dictionary_apply(if_desc,
  269. ^bool(const char *k, xpc_object_t v) {
  270. char *desc = xpc_copy_description(v);
  271. qemu_log(" %s=%s\n", k, desc);
  272. free(desc);
  273. return true;
  274. });
  275. #endif /* DEBUG */
  276. s->vmnet_if = vmnet_start_interface(
  277. if_desc,
  278. s->if_queue,
  279. ^(vmnet_return_t status, xpc_object_t interface_param) {
  280. if_status = status;
  281. if (status != VMNET_SUCCESS || !interface_param) {
  282. dispatch_semaphore_signal(if_created_sem);
  283. return;
  284. }
  285. #ifdef DEBUG
  286. qemu_log("vmnet.start.interface_param:\n");
  287. xpc_dictionary_apply(interface_param,
  288. ^bool(const char *k, xpc_object_t v) {
  289. char *desc = xpc_copy_description(v);
  290. qemu_log(" %s=%s\n", k, desc);
  291. free(desc);
  292. return true;
  293. });
  294. #endif /* DEBUG */
  295. s->mtu = xpc_dictionary_get_uint64(
  296. interface_param,
  297. vmnet_mtu_key);
  298. s->max_packet_size = xpc_dictionary_get_uint64(
  299. interface_param,
  300. vmnet_max_packet_size_key);
  301. dispatch_semaphore_signal(if_created_sem);
  302. });
  303. if (s->vmnet_if == NULL) {
  304. dispatch_release(s->if_queue);
  305. dispatch_release(if_created_sem);
  306. error_setg(errp,
  307. "unable to create interface with requested params");
  308. return -1;
  309. }
  310. dispatch_semaphore_wait(if_created_sem, DISPATCH_TIME_FOREVER);
  311. dispatch_release(if_created_sem);
  312. if (if_status != VMNET_SUCCESS) {
  313. dispatch_release(s->if_queue);
  314. error_setg(errp,
  315. "cannot create vmnet interface: %s",
  316. vmnet_status_map_str(if_status));
  317. return -1;
  318. }
  319. s->send_bh = aio_bh_new(qemu_get_aio_context(), vmnet_send_bh, nc);
  320. vmnet_bufs_init(s);
  321. s->packets_send_current_pos = 0;
  322. s->packets_send_end_pos = 0;
  323. vmnet_vm_state_change_cb(s, 1, RUN_STATE_RUNNING);
  324. s->change = qemu_add_vm_change_state_handler(vmnet_vm_state_change_cb, s);
  325. return 0;
  326. }
  327. void vmnet_cleanup_common(NetClientState *nc)
  328. {
  329. VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
  330. dispatch_semaphore_t if_stopped_sem;
  331. if (s->vmnet_if == NULL) {
  332. return;
  333. }
  334. vmnet_vm_state_change_cb(s, 0, RUN_STATE_SHUTDOWN);
  335. qemu_del_vm_change_state_handler(s->change);
  336. if_stopped_sem = dispatch_semaphore_create(0);
  337. vmnet_stop_interface(
  338. s->vmnet_if,
  339. s->if_queue,
  340. ^(vmnet_return_t status) {
  341. assert(status == VMNET_SUCCESS);
  342. dispatch_semaphore_signal(if_stopped_sem);
  343. });
  344. dispatch_semaphore_wait(if_stopped_sem, DISPATCH_TIME_FOREVER);
  345. qemu_purge_queued_packets(nc);
  346. qemu_bh_delete(s->send_bh);
  347. dispatch_release(if_stopped_sem);
  348. dispatch_release(s->if_queue);
  349. for (int i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
  350. g_free(s->iov_buf[i].iov_base);
  351. }
  352. }