iov.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * Helpers for getting linearized buffers from iov / filling buffers into iovs
  3. *
  4. * Copyright IBM, Corp. 2007, 2008
  5. * Copyright (C) 2010 Red Hat, Inc.
  6. *
  7. * Author(s):
  8. * Anthony Liguori <aliguori@us.ibm.com>
  9. * Amit Shah <amit.shah@redhat.com>
  10. * Michael Tokarev <mjt@tls.msk.ru>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. * Contributions after 2012-01-13 are licensed under the terms of the
  16. * GNU GPL, version 2 or (at your option) any later version.
  17. */
  18. #include "qemu/osdep.h"
  19. #include "qemu/iov.h"
  20. #include "qemu/sockets.h"
  21. #include "qemu/cutils.h"
  22. size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
  23. size_t offset, const void *buf, size_t bytes)
  24. {
  25. size_t done;
  26. unsigned int i;
  27. for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
  28. if (offset < iov[i].iov_len) {
  29. size_t len = MIN(iov[i].iov_len - offset, bytes - done);
  30. memcpy(iov[i].iov_base + offset, buf + done, len);
  31. done += len;
  32. offset = 0;
  33. } else {
  34. offset -= iov[i].iov_len;
  35. }
  36. }
  37. assert(offset == 0);
  38. return done;
  39. }
  40. size_t iov_to_buf_full(const struct iovec *iov, const unsigned int iov_cnt,
  41. size_t offset, void *buf, size_t bytes)
  42. {
  43. size_t done;
  44. unsigned int i;
  45. for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
  46. if (offset < iov[i].iov_len) {
  47. size_t len = MIN(iov[i].iov_len - offset, bytes - done);
  48. memcpy(buf + done, iov[i].iov_base + offset, len);
  49. done += len;
  50. offset = 0;
  51. } else {
  52. offset -= iov[i].iov_len;
  53. }
  54. }
  55. assert(offset == 0);
  56. return done;
  57. }
  58. size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
  59. size_t offset, int fillc, size_t bytes)
  60. {
  61. size_t done;
  62. unsigned int i;
  63. for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
  64. if (offset < iov[i].iov_len) {
  65. size_t len = MIN(iov[i].iov_len - offset, bytes - done);
  66. memset(iov[i].iov_base + offset, fillc, len);
  67. done += len;
  68. offset = 0;
  69. } else {
  70. offset -= iov[i].iov_len;
  71. }
  72. }
  73. assert(offset == 0);
  74. return done;
  75. }
  76. size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt)
  77. {
  78. size_t len;
  79. unsigned int i;
  80. len = 0;
  81. for (i = 0; i < iov_cnt; i++) {
  82. len += iov[i].iov_len;
  83. }
  84. return len;
  85. }
  86. /* helper function for iov_send_recv() */
  87. static ssize_t
  88. do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
  89. {
  90. #ifdef CONFIG_POSIX
  91. ssize_t ret;
  92. struct msghdr msg;
  93. memset(&msg, 0, sizeof(msg));
  94. msg.msg_iov = iov;
  95. msg.msg_iovlen = iov_cnt;
  96. do {
  97. ret = do_send
  98. ? sendmsg(sockfd, &msg, 0)
  99. : recvmsg(sockfd, &msg, 0);
  100. } while (ret < 0 && errno == EINTR);
  101. return ret;
  102. #else
  103. /* else send piece-by-piece */
  104. /*XXX Note: windows has WSASend() and WSARecv() */
  105. unsigned i = 0;
  106. ssize_t ret = 0;
  107. ssize_t off = 0;
  108. while (i < iov_cnt) {
  109. ssize_t r = do_send
  110. ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0)
  111. : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0);
  112. if (r > 0) {
  113. ret += r;
  114. off += r;
  115. if (off < iov[i].iov_len) {
  116. continue;
  117. }
  118. } else if (!r) {
  119. break;
  120. } else if (errno == EINTR) {
  121. continue;
  122. } else {
  123. /* else it is some "other" error,
  124. * only return if there was no data processed. */
  125. if (ret == 0) {
  126. ret = -1;
  127. }
  128. break;
  129. }
  130. off = 0;
  131. i++;
  132. }
  133. return ret;
  134. #endif
  135. }
  136. ssize_t iov_send_recv(int sockfd, const struct iovec *_iov, unsigned iov_cnt,
  137. size_t offset, size_t bytes,
  138. bool do_send)
  139. {
  140. ssize_t total = 0;
  141. ssize_t ret;
  142. size_t orig_len, tail;
  143. unsigned niov;
  144. struct iovec *local_iov, *iov;
  145. if (bytes <= 0) {
  146. return 0;
  147. }
  148. local_iov = g_new0(struct iovec, iov_cnt);
  149. iov_copy(local_iov, iov_cnt, _iov, iov_cnt, offset, bytes);
  150. offset = 0;
  151. iov = local_iov;
  152. while (bytes > 0) {
  153. /* Find the start position, skipping `offset' bytes:
  154. * first, skip all full-sized vector elements, */
  155. for (niov = 0; niov < iov_cnt && offset >= iov[niov].iov_len; ++niov) {
  156. offset -= iov[niov].iov_len;
  157. }
  158. /* niov == iov_cnt would only be valid if bytes == 0, which
  159. * we already ruled out in the loop condition. */
  160. assert(niov < iov_cnt);
  161. iov += niov;
  162. iov_cnt -= niov;
  163. if (offset) {
  164. /* second, skip `offset' bytes from the (now) first element,
  165. * undo it on exit */
  166. iov[0].iov_base += offset;
  167. iov[0].iov_len -= offset;
  168. }
  169. /* Find the end position skipping `bytes' bytes: */
  170. /* first, skip all full-sized elements */
  171. tail = bytes;
  172. for (niov = 0; niov < iov_cnt && iov[niov].iov_len <= tail; ++niov) {
  173. tail -= iov[niov].iov_len;
  174. }
  175. if (tail) {
  176. /* second, fixup the last element, and remember the original
  177. * length */
  178. assert(niov < iov_cnt);
  179. assert(iov[niov].iov_len > tail);
  180. orig_len = iov[niov].iov_len;
  181. iov[niov++].iov_len = tail;
  182. ret = do_send_recv(sockfd, iov, niov, do_send);
  183. /* Undo the changes above before checking for errors */
  184. iov[niov-1].iov_len = orig_len;
  185. } else {
  186. ret = do_send_recv(sockfd, iov, niov, do_send);
  187. }
  188. if (offset) {
  189. iov[0].iov_base -= offset;
  190. iov[0].iov_len += offset;
  191. }
  192. if (ret < 0) {
  193. assert(errno != EINTR);
  194. g_free(local_iov);
  195. if (errno == EAGAIN && total > 0) {
  196. return total;
  197. }
  198. return -1;
  199. }
  200. if (ret == 0 && !do_send) {
  201. /* recv returns 0 when the peer has performed an orderly
  202. * shutdown. */
  203. break;
  204. }
  205. /* Prepare for the next iteration */
  206. offset += ret;
  207. total += ret;
  208. bytes -= ret;
  209. }
  210. g_free(local_iov);
  211. return total;
  212. }
  213. void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt,
  214. FILE *fp, const char *prefix, size_t limit)
  215. {
  216. int v;
  217. size_t size = 0;
  218. char *buf;
  219. for (v = 0; v < iov_cnt; v++) {
  220. size += iov[v].iov_len;
  221. }
  222. size = size > limit ? limit : size;
  223. buf = g_malloc(size);
  224. iov_to_buf(iov, iov_cnt, 0, buf, size);
  225. qemu_hexdump(fp, prefix, buf, size);
  226. g_free(buf);
  227. }
  228. unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt,
  229. const struct iovec *iov, unsigned int iov_cnt,
  230. size_t offset, size_t bytes)
  231. {
  232. size_t len;
  233. unsigned int i, j;
  234. for (i = 0, j = 0;
  235. i < iov_cnt && j < dst_iov_cnt && (offset || bytes); i++) {
  236. if (offset >= iov[i].iov_len) {
  237. offset -= iov[i].iov_len;
  238. continue;
  239. }
  240. len = MIN(bytes, iov[i].iov_len - offset);
  241. dst_iov[j].iov_base = iov[i].iov_base + offset;
  242. dst_iov[j].iov_len = len;
  243. j++;
  244. bytes -= len;
  245. offset = 0;
  246. }
  247. assert(offset == 0);
  248. return j;
  249. }
  250. /* io vectors */
  251. void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint)
  252. {
  253. qiov->iov = g_new(struct iovec, alloc_hint);
  254. qiov->niov = 0;
  255. qiov->nalloc = alloc_hint;
  256. qiov->size = 0;
  257. }
  258. void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov)
  259. {
  260. int i;
  261. qiov->iov = iov;
  262. qiov->niov = niov;
  263. qiov->nalloc = -1;
  264. qiov->size = 0;
  265. for (i = 0; i < niov; i++)
  266. qiov->size += iov[i].iov_len;
  267. }
  268. void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
  269. {
  270. assert(qiov->nalloc != -1);
  271. if (qiov->niov == qiov->nalloc) {
  272. qiov->nalloc = 2 * qiov->nalloc + 1;
  273. qiov->iov = g_renew(struct iovec, qiov->iov, qiov->nalloc);
  274. }
  275. qiov->iov[qiov->niov].iov_base = base;
  276. qiov->iov[qiov->niov].iov_len = len;
  277. qiov->size += len;
  278. ++qiov->niov;
  279. }
  280. /*
  281. * Concatenates (partial) iovecs from src_iov to the end of dst.
  282. * It starts copying after skipping `soffset' bytes at the
  283. * beginning of src and adds individual vectors from src to
  284. * dst copies up to `sbytes' bytes total, or up to the end
  285. * of src_iov if it comes first. This way, it is okay to specify
  286. * very large value for `sbytes' to indicate "up to the end
  287. * of src".
  288. * Only vector pointers are processed, not the actual data buffers.
  289. */
  290. size_t qemu_iovec_concat_iov(QEMUIOVector *dst,
  291. struct iovec *src_iov, unsigned int src_cnt,
  292. size_t soffset, size_t sbytes)
  293. {
  294. int i;
  295. size_t done;
  296. if (!sbytes) {
  297. return 0;
  298. }
  299. assert(dst->nalloc != -1);
  300. for (i = 0, done = 0; done < sbytes && i < src_cnt; i++) {
  301. if (soffset < src_iov[i].iov_len) {
  302. size_t len = MIN(src_iov[i].iov_len - soffset, sbytes - done);
  303. qemu_iovec_add(dst, src_iov[i].iov_base + soffset, len);
  304. done += len;
  305. soffset = 0;
  306. } else {
  307. soffset -= src_iov[i].iov_len;
  308. }
  309. }
  310. assert(soffset == 0); /* offset beyond end of src */
  311. return done;
  312. }
  313. /*
  314. * Concatenates (partial) iovecs from src to the end of dst.
  315. * It starts copying after skipping `soffset' bytes at the
  316. * beginning of src and adds individual vectors from src to
  317. * dst copies up to `sbytes' bytes total, or up to the end
  318. * of src if it comes first. This way, it is okay to specify
  319. * very large value for `sbytes' to indicate "up to the end
  320. * of src".
  321. * Only vector pointers are processed, not the actual data buffers.
  322. */
  323. void qemu_iovec_concat(QEMUIOVector *dst,
  324. QEMUIOVector *src, size_t soffset, size_t sbytes)
  325. {
  326. qemu_iovec_concat_iov(dst, src->iov, src->niov, soffset, sbytes);
  327. }
  328. /*
  329. * qiov_find_iov
  330. *
  331. * Return pointer to iovec structure, where byte at @offset in original vector
  332. * @iov exactly is.
  333. * Set @remaining_offset to be offset inside that iovec to the same byte.
  334. */
  335. static struct iovec *iov_skip_offset(struct iovec *iov, size_t offset,
  336. size_t *remaining_offset)
  337. {
  338. while (offset > 0 && offset >= iov->iov_len) {
  339. offset -= iov->iov_len;
  340. iov++;
  341. }
  342. *remaining_offset = offset;
  343. return iov;
  344. }
  345. /*
  346. * qiov_slice
  347. *
  348. * Find subarray of iovec's, containing requested range. @head would
  349. * be offset in first iov (returned by the function), @tail would be
  350. * count of extra bytes in last iovec (returned iov + @niov - 1).
  351. */
  352. static struct iovec *qiov_slice(QEMUIOVector *qiov,
  353. size_t offset, size_t len,
  354. size_t *head, size_t *tail, int *niov)
  355. {
  356. struct iovec *iov, *end_iov;
  357. assert(offset + len <= qiov->size);
  358. iov = iov_skip_offset(qiov->iov, offset, head);
  359. end_iov = iov_skip_offset(iov, *head + len, tail);
  360. if (*tail > 0) {
  361. assert(*tail < end_iov->iov_len);
  362. *tail = end_iov->iov_len - *tail;
  363. end_iov++;
  364. }
  365. *niov = end_iov - iov;
  366. return iov;
  367. }
  368. int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len)
  369. {
  370. size_t head, tail;
  371. int niov;
  372. qiov_slice(qiov, offset, len, &head, &tail, &niov);
  373. return niov;
  374. }
  375. /*
  376. * Compile new iovec, combining @head_buf buffer, sub-qiov of @mid_qiov,
  377. * and @tail_buf buffer into new qiov.
  378. */
  379. int qemu_iovec_init_extended(
  380. QEMUIOVector *qiov,
  381. void *head_buf, size_t head_len,
  382. QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len,
  383. void *tail_buf, size_t tail_len)
  384. {
  385. size_t mid_head, mid_tail;
  386. int total_niov, mid_niov = 0;
  387. struct iovec *p, *mid_iov = NULL;
  388. assert(mid_qiov->niov <= IOV_MAX);
  389. if (SIZE_MAX - head_len < mid_len ||
  390. SIZE_MAX - head_len - mid_len < tail_len)
  391. {
  392. return -EINVAL;
  393. }
  394. if (mid_len) {
  395. mid_iov = qiov_slice(mid_qiov, mid_offset, mid_len,
  396. &mid_head, &mid_tail, &mid_niov);
  397. }
  398. total_niov = !!head_len + mid_niov + !!tail_len;
  399. if (total_niov > IOV_MAX) {
  400. return -EINVAL;
  401. }
  402. if (total_niov == 1) {
  403. qemu_iovec_init_buf(qiov, NULL, 0);
  404. p = &qiov->local_iov;
  405. } else {
  406. qiov->niov = qiov->nalloc = total_niov;
  407. qiov->size = head_len + mid_len + tail_len;
  408. p = qiov->iov = g_new(struct iovec, qiov->niov);
  409. }
  410. if (head_len) {
  411. p->iov_base = head_buf;
  412. p->iov_len = head_len;
  413. p++;
  414. }
  415. assert(!mid_niov == !mid_len);
  416. if (mid_niov) {
  417. memcpy(p, mid_iov, mid_niov * sizeof(*p));
  418. p[0].iov_base = (uint8_t *)p[0].iov_base + mid_head;
  419. p[0].iov_len -= mid_head;
  420. p[mid_niov - 1].iov_len -= mid_tail;
  421. p += mid_niov;
  422. }
  423. if (tail_len) {
  424. p->iov_base = tail_buf;
  425. p->iov_len = tail_len;
  426. }
  427. return 0;
  428. }
  429. /*
  430. * Check if the contents of subrange of qiov data is all zeroes.
  431. */
  432. bool qemu_iovec_is_zero(QEMUIOVector *qiov, size_t offset, size_t bytes)
  433. {
  434. struct iovec *iov;
  435. size_t current_offset;
  436. assert(offset + bytes <= qiov->size);
  437. iov = iov_skip_offset(qiov->iov, offset, &current_offset);
  438. while (bytes) {
  439. uint8_t *base = (uint8_t *)iov->iov_base + current_offset;
  440. size_t len = MIN(iov->iov_len - current_offset, bytes);
  441. if (!buffer_is_zero(base, len)) {
  442. return false;
  443. }
  444. current_offset = 0;
  445. bytes -= len;
  446. iov++;
  447. }
  448. return true;
  449. }
  450. void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source,
  451. size_t offset, size_t len)
  452. {
  453. int ret;
  454. assert(source->size >= len);
  455. assert(source->size - len >= offset);
  456. /* We shrink the request, so we can't overflow neither size_t nor MAX_IOV */
  457. ret = qemu_iovec_init_extended(qiov, NULL, 0, source, offset, len, NULL, 0);
  458. assert(ret == 0);
  459. }
  460. void qemu_iovec_destroy(QEMUIOVector *qiov)
  461. {
  462. if (qiov->nalloc != -1) {
  463. g_free(qiov->iov);
  464. }
  465. memset(qiov, 0, sizeof(*qiov));
  466. }
  467. void qemu_iovec_reset(QEMUIOVector *qiov)
  468. {
  469. assert(qiov->nalloc != -1);
  470. qiov->niov = 0;
  471. qiov->size = 0;
  472. }
  473. size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset,
  474. void *buf, size_t bytes)
  475. {
  476. return iov_to_buf(qiov->iov, qiov->niov, offset, buf, bytes);
  477. }
  478. size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
  479. const void *buf, size_t bytes)
  480. {
  481. return iov_from_buf(qiov->iov, qiov->niov, offset, buf, bytes);
  482. }
  483. size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
  484. int fillc, size_t bytes)
  485. {
  486. return iov_memset(qiov->iov, qiov->niov, offset, fillc, bytes);
  487. }
  488. /**
  489. * Check that I/O vector contents are identical
  490. *
  491. * The IO vectors must have the same structure (same length of all parts).
  492. * A typical usage is to compare vectors created with qemu_iovec_clone().
  493. *
  494. * @a: I/O vector
  495. * @b: I/O vector
  496. * @ret: Offset to first mismatching byte or -1 if match
  497. */
  498. ssize_t qemu_iovec_compare(QEMUIOVector *a, QEMUIOVector *b)
  499. {
  500. int i;
  501. ssize_t offset = 0;
  502. assert(a->niov == b->niov);
  503. for (i = 0; i < a->niov; i++) {
  504. size_t len = 0;
  505. uint8_t *p = (uint8_t *)a->iov[i].iov_base;
  506. uint8_t *q = (uint8_t *)b->iov[i].iov_base;
  507. assert(a->iov[i].iov_len == b->iov[i].iov_len);
  508. while (len < a->iov[i].iov_len && *p++ == *q++) {
  509. len++;
  510. }
  511. offset += len;
  512. if (len != a->iov[i].iov_len) {
  513. return offset;
  514. }
  515. }
  516. return -1;
  517. }
  518. typedef struct {
  519. int src_index;
  520. struct iovec *src_iov;
  521. void *dest_base;
  522. } IOVectorSortElem;
  523. static int sortelem_cmp_src_base(const void *a, const void *b)
  524. {
  525. const IOVectorSortElem *elem_a = a;
  526. const IOVectorSortElem *elem_b = b;
  527. /* Don't overflow */
  528. if (elem_a->src_iov->iov_base < elem_b->src_iov->iov_base) {
  529. return -1;
  530. } else if (elem_a->src_iov->iov_base > elem_b->src_iov->iov_base) {
  531. return 1;
  532. } else {
  533. return 0;
  534. }
  535. }
  536. static int sortelem_cmp_src_index(const void *a, const void *b)
  537. {
  538. const IOVectorSortElem *elem_a = a;
  539. const IOVectorSortElem *elem_b = b;
  540. return elem_a->src_index - elem_b->src_index;
  541. }
  542. /**
  543. * Copy contents of I/O vector
  544. *
  545. * The relative relationships of overlapping iovecs are preserved. This is
  546. * necessary to ensure identical semantics in the cloned I/O vector.
  547. */
  548. void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf)
  549. {
  550. IOVectorSortElem sortelems[src->niov];
  551. void *last_end;
  552. int i;
  553. /* Sort by source iovecs by base address */
  554. for (i = 0; i < src->niov; i++) {
  555. sortelems[i].src_index = i;
  556. sortelems[i].src_iov = &src->iov[i];
  557. }
  558. qsort(sortelems, src->niov, sizeof(sortelems[0]), sortelem_cmp_src_base);
  559. /* Allocate buffer space taking into account overlapping iovecs */
  560. last_end = NULL;
  561. for (i = 0; i < src->niov; i++) {
  562. struct iovec *cur = sortelems[i].src_iov;
  563. ptrdiff_t rewind = 0;
  564. /* Detect overlap */
  565. if (last_end && last_end > cur->iov_base) {
  566. rewind = last_end - cur->iov_base;
  567. }
  568. sortelems[i].dest_base = buf - rewind;
  569. buf += cur->iov_len - MIN(rewind, cur->iov_len);
  570. last_end = MAX(cur->iov_base + cur->iov_len, last_end);
  571. }
  572. /* Sort by source iovec index and build destination iovec */
  573. qsort(sortelems, src->niov, sizeof(sortelems[0]), sortelem_cmp_src_index);
  574. for (i = 0; i < src->niov; i++) {
  575. qemu_iovec_add(dest, sortelems[i].dest_base, src->iov[i].iov_len);
  576. }
  577. }
  578. void iov_discard_undo(IOVDiscardUndo *undo)
  579. {
  580. /* Restore original iovec if it was modified */
  581. if (undo->modified_iov) {
  582. *undo->modified_iov = undo->orig;
  583. }
  584. }
  585. size_t iov_discard_front_undoable(struct iovec **iov,
  586. unsigned int *iov_cnt,
  587. size_t bytes,
  588. IOVDiscardUndo *undo)
  589. {
  590. size_t total = 0;
  591. struct iovec *cur;
  592. if (undo) {
  593. undo->modified_iov = NULL;
  594. }
  595. for (cur = *iov; *iov_cnt > 0; cur++) {
  596. if (cur->iov_len > bytes) {
  597. if (undo) {
  598. undo->modified_iov = cur;
  599. undo->orig = *cur;
  600. }
  601. cur->iov_base += bytes;
  602. cur->iov_len -= bytes;
  603. total += bytes;
  604. break;
  605. }
  606. bytes -= cur->iov_len;
  607. total += cur->iov_len;
  608. *iov_cnt -= 1;
  609. }
  610. *iov = cur;
  611. return total;
  612. }
  613. size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
  614. size_t bytes)
  615. {
  616. return iov_discard_front_undoable(iov, iov_cnt, bytes, NULL);
  617. }
  618. size_t iov_discard_back_undoable(struct iovec *iov,
  619. unsigned int *iov_cnt,
  620. size_t bytes,
  621. IOVDiscardUndo *undo)
  622. {
  623. size_t total = 0;
  624. struct iovec *cur;
  625. if (undo) {
  626. undo->modified_iov = NULL;
  627. }
  628. if (*iov_cnt == 0) {
  629. return 0;
  630. }
  631. cur = iov + (*iov_cnt - 1);
  632. while (*iov_cnt > 0) {
  633. if (cur->iov_len > bytes) {
  634. if (undo) {
  635. undo->modified_iov = cur;
  636. undo->orig = *cur;
  637. }
  638. cur->iov_len -= bytes;
  639. total += bytes;
  640. break;
  641. }
  642. bytes -= cur->iov_len;
  643. total += cur->iov_len;
  644. cur--;
  645. *iov_cnt -= 1;
  646. }
  647. return total;
  648. }
  649. size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
  650. size_t bytes)
  651. {
  652. return iov_discard_back_undoable(iov, iov_cnt, bytes, NULL);
  653. }
  654. void qemu_iovec_discard_back(QEMUIOVector *qiov, size_t bytes)
  655. {
  656. size_t total;
  657. unsigned int niov = qiov->niov;
  658. assert(qiov->size >= bytes);
  659. total = iov_discard_back(qiov->iov, &niov, bytes);
  660. assert(total == bytes);
  661. qiov->niov = niov;
  662. qiov->size -= bytes;
  663. }