channel.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. * QEMU I/O channels
  3. *
  4. * Copyright (c) 2015 Red Hat, Inc.
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. *
  19. */
  20. #include "qemu/osdep.h"
  21. #include "io/channel.h"
  22. #include "qapi/error.h"
  23. #include "qemu/main-loop.h"
  24. #include "qemu/module.h"
  25. #include "qemu/iov.h"
  26. bool qio_channel_has_feature(QIOChannel *ioc,
  27. QIOChannelFeature feature)
  28. {
  29. return ioc->features & (1 << feature);
  30. }
  31. void qio_channel_set_feature(QIOChannel *ioc,
  32. QIOChannelFeature feature)
  33. {
  34. ioc->features |= (1 << feature);
  35. }
  36. void qio_channel_set_name(QIOChannel *ioc,
  37. const char *name)
  38. {
  39. g_free(ioc->name);
  40. ioc->name = g_strdup(name);
  41. }
  42. ssize_t qio_channel_readv_full(QIOChannel *ioc,
  43. const struct iovec *iov,
  44. size_t niov,
  45. int **fds,
  46. size_t *nfds,
  47. Error **errp)
  48. {
  49. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  50. if ((fds || nfds) &&
  51. !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
  52. error_setg_errno(errp, EINVAL,
  53. "Channel does not support file descriptor passing");
  54. return -1;
  55. }
  56. return klass->io_readv(ioc, iov, niov, fds, nfds, errp);
  57. }
  58. ssize_t qio_channel_writev_full(QIOChannel *ioc,
  59. const struct iovec *iov,
  60. size_t niov,
  61. int *fds,
  62. size_t nfds,
  63. Error **errp)
  64. {
  65. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  66. if ((fds || nfds) &&
  67. !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
  68. error_setg_errno(errp, EINVAL,
  69. "Channel does not support file descriptor passing");
  70. return -1;
  71. }
  72. return klass->io_writev(ioc, iov, niov, fds, nfds, errp);
  73. }
  74. int qio_channel_readv_all_eof(QIOChannel *ioc,
  75. const struct iovec *iov,
  76. size_t niov,
  77. Error **errp)
  78. {
  79. int ret = -1;
  80. struct iovec *local_iov = g_new(struct iovec, niov);
  81. struct iovec *local_iov_head = local_iov;
  82. unsigned int nlocal_iov = niov;
  83. bool partial = false;
  84. nlocal_iov = iov_copy(local_iov, nlocal_iov,
  85. iov, niov,
  86. 0, iov_size(iov, niov));
  87. while (nlocal_iov > 0) {
  88. ssize_t len;
  89. len = qio_channel_readv(ioc, local_iov, nlocal_iov, errp);
  90. if (len == QIO_CHANNEL_ERR_BLOCK) {
  91. if (qemu_in_coroutine()) {
  92. qio_channel_yield(ioc, G_IO_IN);
  93. } else {
  94. qio_channel_wait(ioc, G_IO_IN);
  95. }
  96. continue;
  97. } else if (len < 0) {
  98. goto cleanup;
  99. } else if (len == 0) {
  100. if (partial) {
  101. error_setg(errp,
  102. "Unexpected end-of-file before all bytes were read");
  103. } else {
  104. ret = 0;
  105. }
  106. goto cleanup;
  107. }
  108. partial = true;
  109. iov_discard_front(&local_iov, &nlocal_iov, len);
  110. }
  111. ret = 1;
  112. cleanup:
  113. g_free(local_iov_head);
  114. return ret;
  115. }
  116. int qio_channel_readv_all(QIOChannel *ioc,
  117. const struct iovec *iov,
  118. size_t niov,
  119. Error **errp)
  120. {
  121. int ret = qio_channel_readv_all_eof(ioc, iov, niov, errp);
  122. if (ret == 0) {
  123. ret = -1;
  124. error_setg(errp,
  125. "Unexpected end-of-file before all bytes were read");
  126. } else if (ret == 1) {
  127. ret = 0;
  128. }
  129. return ret;
  130. }
  131. int qio_channel_writev_all(QIOChannel *ioc,
  132. const struct iovec *iov,
  133. size_t niov,
  134. Error **errp)
  135. {
  136. int ret = -1;
  137. struct iovec *local_iov = g_new(struct iovec, niov);
  138. struct iovec *local_iov_head = local_iov;
  139. unsigned int nlocal_iov = niov;
  140. nlocal_iov = iov_copy(local_iov, nlocal_iov,
  141. iov, niov,
  142. 0, iov_size(iov, niov));
  143. while (nlocal_iov > 0) {
  144. ssize_t len;
  145. len = qio_channel_writev(ioc, local_iov, nlocal_iov, errp);
  146. if (len == QIO_CHANNEL_ERR_BLOCK) {
  147. if (qemu_in_coroutine()) {
  148. qio_channel_yield(ioc, G_IO_OUT);
  149. } else {
  150. qio_channel_wait(ioc, G_IO_OUT);
  151. }
  152. continue;
  153. }
  154. if (len < 0) {
  155. goto cleanup;
  156. }
  157. iov_discard_front(&local_iov, &nlocal_iov, len);
  158. }
  159. ret = 0;
  160. cleanup:
  161. g_free(local_iov_head);
  162. return ret;
  163. }
  164. ssize_t qio_channel_readv(QIOChannel *ioc,
  165. const struct iovec *iov,
  166. size_t niov,
  167. Error **errp)
  168. {
  169. return qio_channel_readv_full(ioc, iov, niov, NULL, NULL, errp);
  170. }
  171. ssize_t qio_channel_writev(QIOChannel *ioc,
  172. const struct iovec *iov,
  173. size_t niov,
  174. Error **errp)
  175. {
  176. return qio_channel_writev_full(ioc, iov, niov, NULL, 0, errp);
  177. }
  178. ssize_t qio_channel_read(QIOChannel *ioc,
  179. char *buf,
  180. size_t buflen,
  181. Error **errp)
  182. {
  183. struct iovec iov = { .iov_base = buf, .iov_len = buflen };
  184. return qio_channel_readv_full(ioc, &iov, 1, NULL, NULL, errp);
  185. }
  186. ssize_t qio_channel_write(QIOChannel *ioc,
  187. const char *buf,
  188. size_t buflen,
  189. Error **errp)
  190. {
  191. struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen };
  192. return qio_channel_writev_full(ioc, &iov, 1, NULL, 0, errp);
  193. }
  194. int qio_channel_read_all_eof(QIOChannel *ioc,
  195. char *buf,
  196. size_t buflen,
  197. Error **errp)
  198. {
  199. struct iovec iov = { .iov_base = buf, .iov_len = buflen };
  200. return qio_channel_readv_all_eof(ioc, &iov, 1, errp);
  201. }
  202. int qio_channel_read_all(QIOChannel *ioc,
  203. char *buf,
  204. size_t buflen,
  205. Error **errp)
  206. {
  207. struct iovec iov = { .iov_base = buf, .iov_len = buflen };
  208. return qio_channel_readv_all(ioc, &iov, 1, errp);
  209. }
  210. int qio_channel_write_all(QIOChannel *ioc,
  211. const char *buf,
  212. size_t buflen,
  213. Error **errp)
  214. {
  215. struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen };
  216. return qio_channel_writev_all(ioc, &iov, 1, errp);
  217. }
  218. int qio_channel_set_blocking(QIOChannel *ioc,
  219. bool enabled,
  220. Error **errp)
  221. {
  222. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  223. return klass->io_set_blocking(ioc, enabled, errp);
  224. }
  225. int qio_channel_close(QIOChannel *ioc,
  226. Error **errp)
  227. {
  228. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  229. return klass->io_close(ioc, errp);
  230. }
  231. GSource *qio_channel_create_watch(QIOChannel *ioc,
  232. GIOCondition condition)
  233. {
  234. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  235. GSource *ret = klass->io_create_watch(ioc, condition);
  236. if (ioc->name) {
  237. g_source_set_name(ret, ioc->name);
  238. }
  239. return ret;
  240. }
  241. void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
  242. AioContext *ctx,
  243. IOHandler *io_read,
  244. IOHandler *io_write,
  245. void *opaque)
  246. {
  247. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  248. klass->io_set_aio_fd_handler(ioc, ctx, io_read, io_write, opaque);
  249. }
  250. guint qio_channel_add_watch_full(QIOChannel *ioc,
  251. GIOCondition condition,
  252. QIOChannelFunc func,
  253. gpointer user_data,
  254. GDestroyNotify notify,
  255. GMainContext *context)
  256. {
  257. GSource *source;
  258. guint id;
  259. source = qio_channel_create_watch(ioc, condition);
  260. g_source_set_callback(source, (GSourceFunc)func, user_data, notify);
  261. id = g_source_attach(source, context);
  262. g_source_unref(source);
  263. return id;
  264. }
  265. guint qio_channel_add_watch(QIOChannel *ioc,
  266. GIOCondition condition,
  267. QIOChannelFunc func,
  268. gpointer user_data,
  269. GDestroyNotify notify)
  270. {
  271. return qio_channel_add_watch_full(ioc, condition, func,
  272. user_data, notify, NULL);
  273. }
  274. GSource *qio_channel_add_watch_source(QIOChannel *ioc,
  275. GIOCondition condition,
  276. QIOChannelFunc func,
  277. gpointer user_data,
  278. GDestroyNotify notify,
  279. GMainContext *context)
  280. {
  281. GSource *source;
  282. guint id;
  283. id = qio_channel_add_watch_full(ioc, condition, func,
  284. user_data, notify, context);
  285. source = g_main_context_find_source_by_id(context, id);
  286. g_source_ref(source);
  287. return source;
  288. }
  289. int qio_channel_shutdown(QIOChannel *ioc,
  290. QIOChannelShutdown how,
  291. Error **errp)
  292. {
  293. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  294. if (!klass->io_shutdown) {
  295. error_setg(errp, "Data path shutdown not supported");
  296. return -1;
  297. }
  298. return klass->io_shutdown(ioc, how, errp);
  299. }
  300. void qio_channel_set_delay(QIOChannel *ioc,
  301. bool enabled)
  302. {
  303. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  304. if (klass->io_set_delay) {
  305. klass->io_set_delay(ioc, enabled);
  306. }
  307. }
  308. void qio_channel_set_cork(QIOChannel *ioc,
  309. bool enabled)
  310. {
  311. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  312. if (klass->io_set_cork) {
  313. klass->io_set_cork(ioc, enabled);
  314. }
  315. }
  316. off_t qio_channel_io_seek(QIOChannel *ioc,
  317. off_t offset,
  318. int whence,
  319. Error **errp)
  320. {
  321. QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
  322. if (!klass->io_seek) {
  323. error_setg(errp, "Channel does not support random access");
  324. return -1;
  325. }
  326. return klass->io_seek(ioc, offset, whence, errp);
  327. }
  328. static void qio_channel_restart_read(void *opaque)
  329. {
  330. QIOChannel *ioc = opaque;
  331. Coroutine *co = ioc->read_coroutine;
  332. /* Assert that aio_co_wake() reenters the coroutine directly */
  333. assert(qemu_get_current_aio_context() ==
  334. qemu_coroutine_get_aio_context(co));
  335. aio_co_wake(co);
  336. }
  337. static void qio_channel_restart_write(void *opaque)
  338. {
  339. QIOChannel *ioc = opaque;
  340. Coroutine *co = ioc->write_coroutine;
  341. /* Assert that aio_co_wake() reenters the coroutine directly */
  342. assert(qemu_get_current_aio_context() ==
  343. qemu_coroutine_get_aio_context(co));
  344. aio_co_wake(co);
  345. }
  346. static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc)
  347. {
  348. IOHandler *rd_handler = NULL, *wr_handler = NULL;
  349. AioContext *ctx;
  350. if (ioc->read_coroutine) {
  351. rd_handler = qio_channel_restart_read;
  352. }
  353. if (ioc->write_coroutine) {
  354. wr_handler = qio_channel_restart_write;
  355. }
  356. ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context();
  357. qio_channel_set_aio_fd_handler(ioc, ctx, rd_handler, wr_handler, ioc);
  358. }
  359. void qio_channel_attach_aio_context(QIOChannel *ioc,
  360. AioContext *ctx)
  361. {
  362. assert(!ioc->read_coroutine);
  363. assert(!ioc->write_coroutine);
  364. ioc->ctx = ctx;
  365. }
  366. void qio_channel_detach_aio_context(QIOChannel *ioc)
  367. {
  368. ioc->read_coroutine = NULL;
  369. ioc->write_coroutine = NULL;
  370. qio_channel_set_aio_fd_handlers(ioc);
  371. ioc->ctx = NULL;
  372. }
  373. void coroutine_fn qio_channel_yield(QIOChannel *ioc,
  374. GIOCondition condition)
  375. {
  376. assert(qemu_in_coroutine());
  377. if (condition == G_IO_IN) {
  378. assert(!ioc->read_coroutine);
  379. ioc->read_coroutine = qemu_coroutine_self();
  380. } else if (condition == G_IO_OUT) {
  381. assert(!ioc->write_coroutine);
  382. ioc->write_coroutine = qemu_coroutine_self();
  383. } else {
  384. abort();
  385. }
  386. qio_channel_set_aio_fd_handlers(ioc);
  387. qemu_coroutine_yield();
  388. /* Allow interrupting the operation by reentering the coroutine other than
  389. * through the aio_fd_handlers. */
  390. if (condition == G_IO_IN && ioc->read_coroutine) {
  391. ioc->read_coroutine = NULL;
  392. qio_channel_set_aio_fd_handlers(ioc);
  393. } else if (condition == G_IO_OUT && ioc->write_coroutine) {
  394. ioc->write_coroutine = NULL;
  395. qio_channel_set_aio_fd_handlers(ioc);
  396. }
  397. }
  398. static gboolean qio_channel_wait_complete(QIOChannel *ioc,
  399. GIOCondition condition,
  400. gpointer opaque)
  401. {
  402. GMainLoop *loop = opaque;
  403. g_main_loop_quit(loop);
  404. return FALSE;
  405. }
  406. void qio_channel_wait(QIOChannel *ioc,
  407. GIOCondition condition)
  408. {
  409. GMainContext *ctxt = g_main_context_new();
  410. GMainLoop *loop = g_main_loop_new(ctxt, TRUE);
  411. GSource *source;
  412. source = qio_channel_create_watch(ioc, condition);
  413. g_source_set_callback(source,
  414. (GSourceFunc)qio_channel_wait_complete,
  415. loop,
  416. NULL);
  417. g_source_attach(source, ctxt);
  418. g_main_loop_run(loop);
  419. g_source_unref(source);
  420. g_main_loop_unref(loop);
  421. g_main_context_unref(ctxt);
  422. }
  423. static void qio_channel_finalize(Object *obj)
  424. {
  425. QIOChannel *ioc = QIO_CHANNEL(obj);
  426. g_free(ioc->name);
  427. #ifdef _WIN32
  428. if (ioc->event) {
  429. CloseHandle(ioc->event);
  430. }
  431. #endif
  432. }
  433. static const TypeInfo qio_channel_info = {
  434. .parent = TYPE_OBJECT,
  435. .name = TYPE_QIO_CHANNEL,
  436. .instance_size = sizeof(QIOChannel),
  437. .instance_finalize = qio_channel_finalize,
  438. .abstract = true,
  439. .class_size = sizeof(QIOChannelClass),
  440. };
  441. static void qio_channel_register_types(void)
  442. {
  443. type_register_static(&qio_channel_info);
  444. }
  445. type_init(qio_channel_register_types);