2
0

posix-aio-compat.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. /*
  2. * QEMU posix-aio emulation
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include <sys/ioctl.h>
  16. #include <sys/types.h>
  17. #include <pthread.h>
  18. #include <unistd.h>
  19. #include <errno.h>
  20. #include <time.h>
  21. #include <string.h>
  22. #include <stdlib.h>
  23. #include <stdio.h>
  24. #include "qemu-queue.h"
  25. #include "osdep.h"
  26. #include "sysemu.h"
  27. #include "qemu-common.h"
  28. #include "trace.h"
  29. #include "block_int.h"
  30. #include "iov.h"
  31. #include "block/raw-posix-aio.h"
  32. static void do_spawn_thread(void);
  33. struct qemu_paiocb {
  34. BlockDriverAIOCB common;
  35. int aio_fildes;
  36. union {
  37. struct iovec *aio_iov;
  38. void *aio_ioctl_buf;
  39. };
  40. int aio_niov;
  41. size_t aio_nbytes;
  42. #define aio_ioctl_cmd aio_nbytes /* for QEMU_AIO_IOCTL */
  43. off_t aio_offset;
  44. QTAILQ_ENTRY(qemu_paiocb) node;
  45. int aio_type;
  46. ssize_t ret;
  47. int active;
  48. struct qemu_paiocb *next;
  49. };
  50. typedef struct PosixAioState {
  51. int rfd, wfd;
  52. struct qemu_paiocb *first_aio;
  53. } PosixAioState;
  54. static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
  55. static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
  56. static pthread_t thread_id;
  57. static pthread_attr_t attr;
  58. static int max_threads = 64;
  59. static int cur_threads = 0;
  60. static int idle_threads = 0;
  61. static int new_threads = 0; /* backlog of threads we need to create */
  62. static int pending_threads = 0; /* threads created but not running yet */
  63. static QEMUBH *new_thread_bh;
  64. static QTAILQ_HEAD(, qemu_paiocb) request_list;
  65. #ifdef CONFIG_PREADV
  66. static int preadv_present = 1;
  67. #else
  68. static int preadv_present = 0;
  69. #endif
  70. static void die2(int err, const char *what)
  71. {
  72. fprintf(stderr, "%s failed: %s\n", what, strerror(err));
  73. abort();
  74. }
  75. static void die(const char *what)
  76. {
  77. die2(errno, what);
  78. }
  79. static void mutex_lock(pthread_mutex_t *mutex)
  80. {
  81. int ret = pthread_mutex_lock(mutex);
  82. if (ret) die2(ret, "pthread_mutex_lock");
  83. }
  84. static void mutex_unlock(pthread_mutex_t *mutex)
  85. {
  86. int ret = pthread_mutex_unlock(mutex);
  87. if (ret) die2(ret, "pthread_mutex_unlock");
  88. }
  89. static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
  90. struct timespec *ts)
  91. {
  92. int ret = pthread_cond_timedwait(cond, mutex, ts);
  93. if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
  94. return ret;
  95. }
  96. static void cond_signal(pthread_cond_t *cond)
  97. {
  98. int ret = pthread_cond_signal(cond);
  99. if (ret) die2(ret, "pthread_cond_signal");
  100. }
  101. static void thread_create(pthread_t *thread, pthread_attr_t *attr,
  102. void *(*start_routine)(void*), void *arg)
  103. {
  104. int ret = pthread_create(thread, attr, start_routine, arg);
  105. if (ret) die2(ret, "pthread_create");
  106. }
  107. static ssize_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb)
  108. {
  109. int ret;
  110. ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf);
  111. if (ret == -1)
  112. return -errno;
  113. /*
  114. * This looks weird, but the aio code only considers a request
  115. * successful if it has written the full number of bytes.
  116. *
  117. * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command,
  118. * so in fact we return the ioctl command here to make posix_aio_read()
  119. * happy..
  120. */
  121. return aiocb->aio_nbytes;
  122. }
  123. static ssize_t handle_aiocb_flush(struct qemu_paiocb *aiocb)
  124. {
  125. int ret;
  126. ret = qemu_fdatasync(aiocb->aio_fildes);
  127. if (ret == -1)
  128. return -errno;
  129. return 0;
  130. }
  131. #ifdef CONFIG_PREADV
  132. static ssize_t
  133. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  134. {
  135. return preadv(fd, iov, nr_iov, offset);
  136. }
  137. static ssize_t
  138. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  139. {
  140. return pwritev(fd, iov, nr_iov, offset);
  141. }
  142. #else
  143. static ssize_t
  144. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  145. {
  146. return -ENOSYS;
  147. }
  148. static ssize_t
  149. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  150. {
  151. return -ENOSYS;
  152. }
  153. #endif
  154. static ssize_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb)
  155. {
  156. ssize_t len;
  157. do {
  158. if (aiocb->aio_type & QEMU_AIO_WRITE)
  159. len = qemu_pwritev(aiocb->aio_fildes,
  160. aiocb->aio_iov,
  161. aiocb->aio_niov,
  162. aiocb->aio_offset);
  163. else
  164. len = qemu_preadv(aiocb->aio_fildes,
  165. aiocb->aio_iov,
  166. aiocb->aio_niov,
  167. aiocb->aio_offset);
  168. } while (len == -1 && errno == EINTR);
  169. if (len == -1)
  170. return -errno;
  171. return len;
  172. }
  173. /*
  174. * Read/writes the data to/from a given linear buffer.
  175. *
  176. * Returns the number of bytes handles or -errno in case of an error. Short
  177. * reads are only returned if the end of the file is reached.
  178. */
  179. static ssize_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf)
  180. {
  181. ssize_t offset = 0;
  182. ssize_t len;
  183. while (offset < aiocb->aio_nbytes) {
  184. if (aiocb->aio_type & QEMU_AIO_WRITE)
  185. len = pwrite(aiocb->aio_fildes,
  186. (const char *)buf + offset,
  187. aiocb->aio_nbytes - offset,
  188. aiocb->aio_offset + offset);
  189. else
  190. len = pread(aiocb->aio_fildes,
  191. buf + offset,
  192. aiocb->aio_nbytes - offset,
  193. aiocb->aio_offset + offset);
  194. if (len == -1 && errno == EINTR)
  195. continue;
  196. else if (len == -1) {
  197. offset = -errno;
  198. break;
  199. } else if (len == 0)
  200. break;
  201. offset += len;
  202. }
  203. return offset;
  204. }
  205. static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
  206. {
  207. ssize_t nbytes;
  208. char *buf;
  209. if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
  210. /*
  211. * If there is just a single buffer, and it is properly aligned
  212. * we can just use plain pread/pwrite without any problems.
  213. */
  214. if (aiocb->aio_niov == 1)
  215. return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base);
  216. /*
  217. * We have more than one iovec, and all are properly aligned.
  218. *
  219. * Try preadv/pwritev first and fall back to linearizing the
  220. * buffer if it's not supported.
  221. */
  222. if (preadv_present) {
  223. nbytes = handle_aiocb_rw_vector(aiocb);
  224. if (nbytes == aiocb->aio_nbytes)
  225. return nbytes;
  226. if (nbytes < 0 && nbytes != -ENOSYS)
  227. return nbytes;
  228. preadv_present = 0;
  229. }
  230. /*
  231. * XXX(hch): short read/write. no easy way to handle the reminder
  232. * using these interfaces. For now retry using plain
  233. * pread/pwrite?
  234. */
  235. }
  236. /*
  237. * Ok, we have to do it the hard way, copy all segments into
  238. * a single aligned buffer.
  239. */
  240. buf = qemu_blockalign(aiocb->common.bs, aiocb->aio_nbytes);
  241. if (aiocb->aio_type & QEMU_AIO_WRITE) {
  242. char *p = buf;
  243. int i;
  244. for (i = 0; i < aiocb->aio_niov; ++i) {
  245. memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len);
  246. p += aiocb->aio_iov[i].iov_len;
  247. }
  248. }
  249. nbytes = handle_aiocb_rw_linear(aiocb, buf);
  250. if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
  251. char *p = buf;
  252. size_t count = aiocb->aio_nbytes, copy;
  253. int i;
  254. for (i = 0; i < aiocb->aio_niov && count; ++i) {
  255. copy = count;
  256. if (copy > aiocb->aio_iov[i].iov_len)
  257. copy = aiocb->aio_iov[i].iov_len;
  258. memcpy(aiocb->aio_iov[i].iov_base, p, copy);
  259. p += copy;
  260. count -= copy;
  261. }
  262. }
  263. qemu_vfree(buf);
  264. return nbytes;
  265. }
  266. static void posix_aio_notify_event(void);
  267. static void *aio_thread(void *unused)
  268. {
  269. mutex_lock(&lock);
  270. pending_threads--;
  271. mutex_unlock(&lock);
  272. do_spawn_thread();
  273. while (1) {
  274. struct qemu_paiocb *aiocb;
  275. ssize_t ret = 0;
  276. qemu_timeval tv;
  277. struct timespec ts;
  278. qemu_gettimeofday(&tv);
  279. ts.tv_sec = tv.tv_sec + 10;
  280. ts.tv_nsec = 0;
  281. mutex_lock(&lock);
  282. while (QTAILQ_EMPTY(&request_list) &&
  283. !(ret == ETIMEDOUT)) {
  284. idle_threads++;
  285. ret = cond_timedwait(&cond, &lock, &ts);
  286. idle_threads--;
  287. }
  288. if (QTAILQ_EMPTY(&request_list))
  289. break;
  290. aiocb = QTAILQ_FIRST(&request_list);
  291. QTAILQ_REMOVE(&request_list, aiocb, node);
  292. aiocb->active = 1;
  293. mutex_unlock(&lock);
  294. switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
  295. case QEMU_AIO_READ:
  296. ret = handle_aiocb_rw(aiocb);
  297. if (ret >= 0 && ret < aiocb->aio_nbytes && aiocb->common.bs->growable) {
  298. /* A short read means that we have reached EOF. Pad the buffer
  299. * with zeros for bytes after EOF. */
  300. iov_memset(aiocb->aio_iov, aiocb->aio_niov, ret,
  301. 0, aiocb->aio_nbytes - ret);
  302. ret = aiocb->aio_nbytes;
  303. }
  304. break;
  305. case QEMU_AIO_WRITE:
  306. ret = handle_aiocb_rw(aiocb);
  307. break;
  308. case QEMU_AIO_FLUSH:
  309. ret = handle_aiocb_flush(aiocb);
  310. break;
  311. case QEMU_AIO_IOCTL:
  312. ret = handle_aiocb_ioctl(aiocb);
  313. break;
  314. default:
  315. fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
  316. ret = -EINVAL;
  317. break;
  318. }
  319. mutex_lock(&lock);
  320. aiocb->ret = ret;
  321. mutex_unlock(&lock);
  322. posix_aio_notify_event();
  323. }
  324. cur_threads--;
  325. mutex_unlock(&lock);
  326. return NULL;
  327. }
  328. static void do_spawn_thread(void)
  329. {
  330. sigset_t set, oldset;
  331. mutex_lock(&lock);
  332. if (!new_threads) {
  333. mutex_unlock(&lock);
  334. return;
  335. }
  336. new_threads--;
  337. pending_threads++;
  338. mutex_unlock(&lock);
  339. /* block all signals */
  340. if (sigfillset(&set)) die("sigfillset");
  341. if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask");
  342. thread_create(&thread_id, &attr, aio_thread, NULL);
  343. if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore");
  344. }
  345. static void spawn_thread_bh_fn(void *opaque)
  346. {
  347. do_spawn_thread();
  348. }
  349. static void spawn_thread(void)
  350. {
  351. cur_threads++;
  352. new_threads++;
  353. /* If there are threads being created, they will spawn new workers, so
  354. * we don't spend time creating many threads in a loop holding a mutex or
  355. * starving the current vcpu.
  356. *
  357. * If there are no idle threads, ask the main thread to create one, so we
  358. * inherit the correct affinity instead of the vcpu affinity.
  359. */
  360. if (!pending_threads) {
  361. qemu_bh_schedule(new_thread_bh);
  362. }
  363. }
  364. static void qemu_paio_submit(struct qemu_paiocb *aiocb)
  365. {
  366. aiocb->ret = -EINPROGRESS;
  367. aiocb->active = 0;
  368. mutex_lock(&lock);
  369. if (idle_threads == 0 && cur_threads < max_threads)
  370. spawn_thread();
  371. QTAILQ_INSERT_TAIL(&request_list, aiocb, node);
  372. mutex_unlock(&lock);
  373. cond_signal(&cond);
  374. }
  375. static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
  376. {
  377. ssize_t ret;
  378. mutex_lock(&lock);
  379. ret = aiocb->ret;
  380. mutex_unlock(&lock);
  381. return ret;
  382. }
  383. static int qemu_paio_error(struct qemu_paiocb *aiocb)
  384. {
  385. ssize_t ret = qemu_paio_return(aiocb);
  386. if (ret < 0)
  387. ret = -ret;
  388. else
  389. ret = 0;
  390. return ret;
  391. }
  392. static void posix_aio_read(void *opaque)
  393. {
  394. PosixAioState *s = opaque;
  395. struct qemu_paiocb *acb, **pacb;
  396. int ret;
  397. ssize_t len;
  398. /* read all bytes from signal pipe */
  399. for (;;) {
  400. char bytes[16];
  401. len = read(s->rfd, bytes, sizeof(bytes));
  402. if (len == -1 && errno == EINTR)
  403. continue; /* try again */
  404. if (len == sizeof(bytes))
  405. continue; /* more to read */
  406. break;
  407. }
  408. for(;;) {
  409. pacb = &s->first_aio;
  410. for(;;) {
  411. acb = *pacb;
  412. if (!acb)
  413. return;
  414. ret = qemu_paio_error(acb);
  415. if (ret == ECANCELED) {
  416. /* remove the request */
  417. *pacb = acb->next;
  418. qemu_aio_release(acb);
  419. } else if (ret != EINPROGRESS) {
  420. /* end of aio */
  421. if (ret == 0) {
  422. ret = qemu_paio_return(acb);
  423. if (ret == acb->aio_nbytes)
  424. ret = 0;
  425. else
  426. ret = -EINVAL;
  427. } else {
  428. ret = -ret;
  429. }
  430. trace_paio_complete(acb, acb->common.opaque, ret);
  431. /* remove the request */
  432. *pacb = acb->next;
  433. /* call the callback */
  434. acb->common.cb(acb->common.opaque, ret);
  435. qemu_aio_release(acb);
  436. break;
  437. } else {
  438. pacb = &acb->next;
  439. }
  440. }
  441. }
  442. }
  443. static int posix_aio_flush(void *opaque)
  444. {
  445. PosixAioState *s = opaque;
  446. return !!s->first_aio;
  447. }
  448. static PosixAioState *posix_aio_state;
  449. static void posix_aio_notify_event(void)
  450. {
  451. char byte = 0;
  452. ssize_t ret;
  453. ret = write(posix_aio_state->wfd, &byte, sizeof(byte));
  454. if (ret < 0 && errno != EAGAIN)
  455. die("write()");
  456. }
  457. static void paio_remove(struct qemu_paiocb *acb)
  458. {
  459. struct qemu_paiocb **pacb;
  460. /* remove the callback from the queue */
  461. pacb = &posix_aio_state->first_aio;
  462. for(;;) {
  463. if (*pacb == NULL) {
  464. fprintf(stderr, "paio_remove: aio request not found!\n");
  465. break;
  466. } else if (*pacb == acb) {
  467. *pacb = acb->next;
  468. qemu_aio_release(acb);
  469. break;
  470. }
  471. pacb = &(*pacb)->next;
  472. }
  473. }
  474. static void paio_cancel(BlockDriverAIOCB *blockacb)
  475. {
  476. struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb;
  477. int active = 0;
  478. trace_paio_cancel(acb, acb->common.opaque);
  479. mutex_lock(&lock);
  480. if (!acb->active) {
  481. QTAILQ_REMOVE(&request_list, acb, node);
  482. acb->ret = -ECANCELED;
  483. } else if (acb->ret == -EINPROGRESS) {
  484. active = 1;
  485. }
  486. mutex_unlock(&lock);
  487. if (active) {
  488. /* fail safe: if the aio could not be canceled, we wait for
  489. it */
  490. while (qemu_paio_error(acb) == EINPROGRESS)
  491. ;
  492. }
  493. paio_remove(acb);
  494. }
  495. static AIOPool raw_aio_pool = {
  496. .aiocb_size = sizeof(struct qemu_paiocb),
  497. .cancel = paio_cancel,
  498. };
  499. BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
  500. int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  501. BlockDriverCompletionFunc *cb, void *opaque, int type)
  502. {
  503. struct qemu_paiocb *acb;
  504. acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  505. acb->aio_type = type;
  506. acb->aio_fildes = fd;
  507. if (qiov) {
  508. acb->aio_iov = qiov->iov;
  509. acb->aio_niov = qiov->niov;
  510. }
  511. acb->aio_nbytes = nb_sectors * 512;
  512. acb->aio_offset = sector_num * 512;
  513. acb->next = posix_aio_state->first_aio;
  514. posix_aio_state->first_aio = acb;
  515. trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
  516. qemu_paio_submit(acb);
  517. return &acb->common;
  518. }
  519. BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
  520. unsigned long int req, void *buf,
  521. BlockDriverCompletionFunc *cb, void *opaque)
  522. {
  523. struct qemu_paiocb *acb;
  524. acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  525. acb->aio_type = QEMU_AIO_IOCTL;
  526. acb->aio_fildes = fd;
  527. acb->aio_offset = 0;
  528. acb->aio_ioctl_buf = buf;
  529. acb->aio_ioctl_cmd = req;
  530. acb->next = posix_aio_state->first_aio;
  531. posix_aio_state->first_aio = acb;
  532. qemu_paio_submit(acb);
  533. return &acb->common;
  534. }
  535. int paio_init(void)
  536. {
  537. PosixAioState *s;
  538. int fds[2];
  539. int ret;
  540. if (posix_aio_state)
  541. return 0;
  542. s = g_malloc(sizeof(PosixAioState));
  543. s->first_aio = NULL;
  544. if (qemu_pipe(fds) == -1) {
  545. fprintf(stderr, "failed to create pipe\n");
  546. g_free(s);
  547. return -1;
  548. }
  549. s->rfd = fds[0];
  550. s->wfd = fds[1];
  551. fcntl(s->rfd, F_SETFL, O_NONBLOCK);
  552. fcntl(s->wfd, F_SETFL, O_NONBLOCK);
  553. qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s);
  554. ret = pthread_attr_init(&attr);
  555. if (ret)
  556. die2(ret, "pthread_attr_init");
  557. ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  558. if (ret)
  559. die2(ret, "pthread_attr_setdetachstate");
  560. QTAILQ_INIT(&request_list);
  561. new_thread_bh = qemu_bh_new(spawn_thread_bh_fn, NULL);
  562. posix_aio_state = s;
  563. return 0;
  564. }