posix-aio-compat.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * QEMU posix-aio emulation
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include <sys/ioctl.h>
  16. #include <sys/types.h>
  17. #include <pthread.h>
  18. #include <unistd.h>
  19. #include <errno.h>
  20. #include <time.h>
  21. #include <string.h>
  22. #include <stdlib.h>
  23. #include <stdio.h>
  24. #include "qemu-queue.h"
  25. #include "osdep.h"
  26. #include "sysemu.h"
  27. #include "qemu-common.h"
  28. #include "trace.h"
  29. #include "block_int.h"
  30. #include "block/raw-posix-aio.h"
  31. static void do_spawn_thread(void);
  32. struct qemu_paiocb {
  33. BlockDriverAIOCB common;
  34. int aio_fildes;
  35. union {
  36. struct iovec *aio_iov;
  37. void *aio_ioctl_buf;
  38. };
  39. int aio_niov;
  40. size_t aio_nbytes;
  41. #define aio_ioctl_cmd aio_nbytes /* for QEMU_AIO_IOCTL */
  42. off_t aio_offset;
  43. QTAILQ_ENTRY(qemu_paiocb) node;
  44. int aio_type;
  45. ssize_t ret;
  46. int active;
  47. struct qemu_paiocb *next;
  48. };
  49. typedef struct PosixAioState {
  50. int rfd, wfd;
  51. struct qemu_paiocb *first_aio;
  52. } PosixAioState;
  53. static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
  54. static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
  55. static pthread_t thread_id;
  56. static pthread_attr_t attr;
  57. static int max_threads = 64;
  58. static int cur_threads = 0;
  59. static int idle_threads = 0;
  60. static int new_threads = 0; /* backlog of threads we need to create */
  61. static int pending_threads = 0; /* threads created but not running yet */
  62. static QEMUBH *new_thread_bh;
  63. static QTAILQ_HEAD(, qemu_paiocb) request_list;
  64. #ifdef CONFIG_PREADV
  65. static int preadv_present = 1;
  66. #else
  67. static int preadv_present = 0;
  68. #endif
  69. static void die2(int err, const char *what)
  70. {
  71. fprintf(stderr, "%s failed: %s\n", what, strerror(err));
  72. abort();
  73. }
  74. static void die(const char *what)
  75. {
  76. die2(errno, what);
  77. }
  78. static void mutex_lock(pthread_mutex_t *mutex)
  79. {
  80. int ret = pthread_mutex_lock(mutex);
  81. if (ret) die2(ret, "pthread_mutex_lock");
  82. }
  83. static void mutex_unlock(pthread_mutex_t *mutex)
  84. {
  85. int ret = pthread_mutex_unlock(mutex);
  86. if (ret) die2(ret, "pthread_mutex_unlock");
  87. }
  88. static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
  89. struct timespec *ts)
  90. {
  91. int ret = pthread_cond_timedwait(cond, mutex, ts);
  92. if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
  93. return ret;
  94. }
  95. static void cond_signal(pthread_cond_t *cond)
  96. {
  97. int ret = pthread_cond_signal(cond);
  98. if (ret) die2(ret, "pthread_cond_signal");
  99. }
  100. static void thread_create(pthread_t *thread, pthread_attr_t *attr,
  101. void *(*start_routine)(void*), void *arg)
  102. {
  103. int ret = pthread_create(thread, attr, start_routine, arg);
  104. if (ret) die2(ret, "pthread_create");
  105. }
  106. static ssize_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb)
  107. {
  108. int ret;
  109. ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf);
  110. if (ret == -1)
  111. return -errno;
  112. /*
  113. * This looks weird, but the aio code only considers a request
  114. * successful if it has written the full number of bytes.
  115. *
  116. * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command,
  117. * so in fact we return the ioctl command here to make posix_aio_read()
  118. * happy..
  119. */
  120. return aiocb->aio_nbytes;
  121. }
  122. static ssize_t handle_aiocb_flush(struct qemu_paiocb *aiocb)
  123. {
  124. int ret;
  125. ret = qemu_fdatasync(aiocb->aio_fildes);
  126. if (ret == -1)
  127. return -errno;
  128. return 0;
  129. }
  130. #ifdef CONFIG_PREADV
  131. static ssize_t
  132. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  133. {
  134. return preadv(fd, iov, nr_iov, offset);
  135. }
  136. static ssize_t
  137. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  138. {
  139. return pwritev(fd, iov, nr_iov, offset);
  140. }
  141. #else
  142. static ssize_t
  143. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  144. {
  145. return -ENOSYS;
  146. }
  147. static ssize_t
  148. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  149. {
  150. return -ENOSYS;
  151. }
  152. #endif
  153. static ssize_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb)
  154. {
  155. ssize_t len;
  156. do {
  157. if (aiocb->aio_type & QEMU_AIO_WRITE)
  158. len = qemu_pwritev(aiocb->aio_fildes,
  159. aiocb->aio_iov,
  160. aiocb->aio_niov,
  161. aiocb->aio_offset);
  162. else
  163. len = qemu_preadv(aiocb->aio_fildes,
  164. aiocb->aio_iov,
  165. aiocb->aio_niov,
  166. aiocb->aio_offset);
  167. } while (len == -1 && errno == EINTR);
  168. if (len == -1)
  169. return -errno;
  170. return len;
  171. }
  172. /*
  173. * Read/writes the data to/from a given linear buffer.
  174. *
  175. * Returns the number of bytes handles or -errno in case of an error. Short
  176. * reads are only returned if the end of the file is reached.
  177. */
  178. static ssize_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf)
  179. {
  180. ssize_t offset = 0;
  181. ssize_t len;
  182. while (offset < aiocb->aio_nbytes) {
  183. if (aiocb->aio_type & QEMU_AIO_WRITE)
  184. len = pwrite(aiocb->aio_fildes,
  185. (const char *)buf + offset,
  186. aiocb->aio_nbytes - offset,
  187. aiocb->aio_offset + offset);
  188. else
  189. len = pread(aiocb->aio_fildes,
  190. buf + offset,
  191. aiocb->aio_nbytes - offset,
  192. aiocb->aio_offset + offset);
  193. if (len == -1 && errno == EINTR)
  194. continue;
  195. else if (len == -1) {
  196. offset = -errno;
  197. break;
  198. } else if (len == 0)
  199. break;
  200. offset += len;
  201. }
  202. return offset;
  203. }
  204. static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
  205. {
  206. ssize_t nbytes;
  207. char *buf;
  208. if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
  209. /*
  210. * If there is just a single buffer, and it is properly aligned
  211. * we can just use plain pread/pwrite without any problems.
  212. */
  213. if (aiocb->aio_niov == 1)
  214. return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base);
  215. /*
  216. * We have more than one iovec, and all are properly aligned.
  217. *
  218. * Try preadv/pwritev first and fall back to linearizing the
  219. * buffer if it's not supported.
  220. */
  221. if (preadv_present) {
  222. nbytes = handle_aiocb_rw_vector(aiocb);
  223. if (nbytes == aiocb->aio_nbytes)
  224. return nbytes;
  225. if (nbytes < 0 && nbytes != -ENOSYS)
  226. return nbytes;
  227. preadv_present = 0;
  228. }
  229. /*
  230. * XXX(hch): short read/write. no easy way to handle the reminder
  231. * using these interfaces. For now retry using plain
  232. * pread/pwrite?
  233. */
  234. }
  235. /*
  236. * Ok, we have to do it the hard way, copy all segments into
  237. * a single aligned buffer.
  238. */
  239. buf = qemu_blockalign(aiocb->common.bs, aiocb->aio_nbytes);
  240. if (aiocb->aio_type & QEMU_AIO_WRITE) {
  241. char *p = buf;
  242. int i;
  243. for (i = 0; i < aiocb->aio_niov; ++i) {
  244. memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len);
  245. p += aiocb->aio_iov[i].iov_len;
  246. }
  247. }
  248. nbytes = handle_aiocb_rw_linear(aiocb, buf);
  249. if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
  250. char *p = buf;
  251. size_t count = aiocb->aio_nbytes, copy;
  252. int i;
  253. for (i = 0; i < aiocb->aio_niov && count; ++i) {
  254. copy = count;
  255. if (copy > aiocb->aio_iov[i].iov_len)
  256. copy = aiocb->aio_iov[i].iov_len;
  257. memcpy(aiocb->aio_iov[i].iov_base, p, copy);
  258. p += copy;
  259. count -= copy;
  260. }
  261. }
  262. qemu_vfree(buf);
  263. return nbytes;
  264. }
  265. static void posix_aio_notify_event(void);
  266. static void *aio_thread(void *unused)
  267. {
  268. mutex_lock(&lock);
  269. pending_threads--;
  270. mutex_unlock(&lock);
  271. do_spawn_thread();
  272. while (1) {
  273. struct qemu_paiocb *aiocb;
  274. ssize_t ret = 0;
  275. qemu_timeval tv;
  276. struct timespec ts;
  277. qemu_gettimeofday(&tv);
  278. ts.tv_sec = tv.tv_sec + 10;
  279. ts.tv_nsec = 0;
  280. mutex_lock(&lock);
  281. while (QTAILQ_EMPTY(&request_list) &&
  282. !(ret == ETIMEDOUT)) {
  283. idle_threads++;
  284. ret = cond_timedwait(&cond, &lock, &ts);
  285. idle_threads--;
  286. }
  287. if (QTAILQ_EMPTY(&request_list))
  288. break;
  289. aiocb = QTAILQ_FIRST(&request_list);
  290. QTAILQ_REMOVE(&request_list, aiocb, node);
  291. aiocb->active = 1;
  292. mutex_unlock(&lock);
  293. switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
  294. case QEMU_AIO_READ:
  295. ret = handle_aiocb_rw(aiocb);
  296. if (ret >= 0 && ret < aiocb->aio_nbytes && aiocb->common.bs->growable) {
  297. /* A short read means that we have reached EOF. Pad the buffer
  298. * with zeros for bytes after EOF. */
  299. QEMUIOVector qiov;
  300. qemu_iovec_init_external(&qiov, aiocb->aio_iov,
  301. aiocb->aio_niov);
  302. qemu_iovec_memset_skip(&qiov, 0, aiocb->aio_nbytes - ret, ret);
  303. ret = aiocb->aio_nbytes;
  304. }
  305. break;
  306. case QEMU_AIO_WRITE:
  307. ret = handle_aiocb_rw(aiocb);
  308. break;
  309. case QEMU_AIO_FLUSH:
  310. ret = handle_aiocb_flush(aiocb);
  311. break;
  312. case QEMU_AIO_IOCTL:
  313. ret = handle_aiocb_ioctl(aiocb);
  314. break;
  315. default:
  316. fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
  317. ret = -EINVAL;
  318. break;
  319. }
  320. mutex_lock(&lock);
  321. aiocb->ret = ret;
  322. mutex_unlock(&lock);
  323. posix_aio_notify_event();
  324. }
  325. cur_threads--;
  326. mutex_unlock(&lock);
  327. return NULL;
  328. }
  329. static void do_spawn_thread(void)
  330. {
  331. sigset_t set, oldset;
  332. mutex_lock(&lock);
  333. if (!new_threads) {
  334. mutex_unlock(&lock);
  335. return;
  336. }
  337. new_threads--;
  338. pending_threads++;
  339. mutex_unlock(&lock);
  340. /* block all signals */
  341. if (sigfillset(&set)) die("sigfillset");
  342. if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask");
  343. thread_create(&thread_id, &attr, aio_thread, NULL);
  344. if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore");
  345. }
  346. static void spawn_thread_bh_fn(void *opaque)
  347. {
  348. do_spawn_thread();
  349. }
  350. static void spawn_thread(void)
  351. {
  352. cur_threads++;
  353. new_threads++;
  354. /* If there are threads being created, they will spawn new workers, so
  355. * we don't spend time creating many threads in a loop holding a mutex or
  356. * starving the current vcpu.
  357. *
  358. * If there are no idle threads, ask the main thread to create one, so we
  359. * inherit the correct affinity instead of the vcpu affinity.
  360. */
  361. if (!pending_threads) {
  362. qemu_bh_schedule(new_thread_bh);
  363. }
  364. }
  365. static void qemu_paio_submit(struct qemu_paiocb *aiocb)
  366. {
  367. aiocb->ret = -EINPROGRESS;
  368. aiocb->active = 0;
  369. mutex_lock(&lock);
  370. if (idle_threads == 0 && cur_threads < max_threads)
  371. spawn_thread();
  372. QTAILQ_INSERT_TAIL(&request_list, aiocb, node);
  373. mutex_unlock(&lock);
  374. cond_signal(&cond);
  375. }
  376. static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
  377. {
  378. ssize_t ret;
  379. mutex_lock(&lock);
  380. ret = aiocb->ret;
  381. mutex_unlock(&lock);
  382. return ret;
  383. }
  384. static int qemu_paio_error(struct qemu_paiocb *aiocb)
  385. {
  386. ssize_t ret = qemu_paio_return(aiocb);
  387. if (ret < 0)
  388. ret = -ret;
  389. else
  390. ret = 0;
  391. return ret;
  392. }
  393. static void posix_aio_read(void *opaque)
  394. {
  395. PosixAioState *s = opaque;
  396. struct qemu_paiocb *acb, **pacb;
  397. int ret;
  398. ssize_t len;
  399. /* read all bytes from signal pipe */
  400. for (;;) {
  401. char bytes[16];
  402. len = read(s->rfd, bytes, sizeof(bytes));
  403. if (len == -1 && errno == EINTR)
  404. continue; /* try again */
  405. if (len == sizeof(bytes))
  406. continue; /* more to read */
  407. break;
  408. }
  409. for(;;) {
  410. pacb = &s->first_aio;
  411. for(;;) {
  412. acb = *pacb;
  413. if (!acb)
  414. return;
  415. ret = qemu_paio_error(acb);
  416. if (ret == ECANCELED) {
  417. /* remove the request */
  418. *pacb = acb->next;
  419. qemu_aio_release(acb);
  420. } else if (ret != EINPROGRESS) {
  421. /* end of aio */
  422. if (ret == 0) {
  423. ret = qemu_paio_return(acb);
  424. if (ret == acb->aio_nbytes)
  425. ret = 0;
  426. else
  427. ret = -EINVAL;
  428. } else {
  429. ret = -ret;
  430. }
  431. trace_paio_complete(acb, acb->common.opaque, ret);
  432. /* remove the request */
  433. *pacb = acb->next;
  434. /* call the callback */
  435. acb->common.cb(acb->common.opaque, ret);
  436. qemu_aio_release(acb);
  437. break;
  438. } else {
  439. pacb = &acb->next;
  440. }
  441. }
  442. }
  443. }
  444. static int posix_aio_flush(void *opaque)
  445. {
  446. PosixAioState *s = opaque;
  447. return !!s->first_aio;
  448. }
  449. static PosixAioState *posix_aio_state;
  450. static void posix_aio_notify_event(void)
  451. {
  452. char byte = 0;
  453. ssize_t ret;
  454. ret = write(posix_aio_state->wfd, &byte, sizeof(byte));
  455. if (ret < 0 && errno != EAGAIN)
  456. die("write()");
  457. }
  458. static void paio_remove(struct qemu_paiocb *acb)
  459. {
  460. struct qemu_paiocb **pacb;
  461. /* remove the callback from the queue */
  462. pacb = &posix_aio_state->first_aio;
  463. for(;;) {
  464. if (*pacb == NULL) {
  465. fprintf(stderr, "paio_remove: aio request not found!\n");
  466. break;
  467. } else if (*pacb == acb) {
  468. *pacb = acb->next;
  469. qemu_aio_release(acb);
  470. break;
  471. }
  472. pacb = &(*pacb)->next;
  473. }
  474. }
  475. static void paio_cancel(BlockDriverAIOCB *blockacb)
  476. {
  477. struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb;
  478. int active = 0;
  479. trace_paio_cancel(acb, acb->common.opaque);
  480. mutex_lock(&lock);
  481. if (!acb->active) {
  482. QTAILQ_REMOVE(&request_list, acb, node);
  483. acb->ret = -ECANCELED;
  484. } else if (acb->ret == -EINPROGRESS) {
  485. active = 1;
  486. }
  487. mutex_unlock(&lock);
  488. if (active) {
  489. /* fail safe: if the aio could not be canceled, we wait for
  490. it */
  491. while (qemu_paio_error(acb) == EINPROGRESS)
  492. ;
  493. }
  494. paio_remove(acb);
  495. }
  496. static AIOPool raw_aio_pool = {
  497. .aiocb_size = sizeof(struct qemu_paiocb),
  498. .cancel = paio_cancel,
  499. };
  500. BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
  501. int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  502. BlockDriverCompletionFunc *cb, void *opaque, int type)
  503. {
  504. struct qemu_paiocb *acb;
  505. acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  506. acb->aio_type = type;
  507. acb->aio_fildes = fd;
  508. if (qiov) {
  509. acb->aio_iov = qiov->iov;
  510. acb->aio_niov = qiov->niov;
  511. }
  512. acb->aio_nbytes = nb_sectors * 512;
  513. acb->aio_offset = sector_num * 512;
  514. acb->next = posix_aio_state->first_aio;
  515. posix_aio_state->first_aio = acb;
  516. trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
  517. qemu_paio_submit(acb);
  518. return &acb->common;
  519. }
  520. BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
  521. unsigned long int req, void *buf,
  522. BlockDriverCompletionFunc *cb, void *opaque)
  523. {
  524. struct qemu_paiocb *acb;
  525. acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  526. acb->aio_type = QEMU_AIO_IOCTL;
  527. acb->aio_fildes = fd;
  528. acb->aio_offset = 0;
  529. acb->aio_ioctl_buf = buf;
  530. acb->aio_ioctl_cmd = req;
  531. acb->next = posix_aio_state->first_aio;
  532. posix_aio_state->first_aio = acb;
  533. qemu_paio_submit(acb);
  534. return &acb->common;
  535. }
  536. int paio_init(void)
  537. {
  538. PosixAioState *s;
  539. int fds[2];
  540. int ret;
  541. if (posix_aio_state)
  542. return 0;
  543. s = g_malloc(sizeof(PosixAioState));
  544. s->first_aio = NULL;
  545. if (qemu_pipe(fds) == -1) {
  546. fprintf(stderr, "failed to create pipe\n");
  547. g_free(s);
  548. return -1;
  549. }
  550. s->rfd = fds[0];
  551. s->wfd = fds[1];
  552. fcntl(s->rfd, F_SETFL, O_NONBLOCK);
  553. fcntl(s->wfd, F_SETFL, O_NONBLOCK);
  554. qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s);
  555. ret = pthread_attr_init(&attr);
  556. if (ret)
  557. die2(ret, "pthread_attr_init");
  558. ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  559. if (ret)
  560. die2(ret, "pthread_attr_setdetachstate");
  561. QTAILQ_INIT(&request_list);
  562. new_thread_bh = qemu_bh_new(spawn_thread_bh_fn, NULL);
  563. posix_aio_state = s;
  564. return 0;
  565. }