posix-aio-compat.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /*
  2. * QEMU posix-aio emulation
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include <sys/ioctl.h>
  14. #include <sys/types.h>
  15. #include <pthread.h>
  16. #include <unistd.h>
  17. #include <errno.h>
  18. #include <time.h>
  19. #include <string.h>
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include "qemu-queue.h"
  23. #include "osdep.h"
  24. #include "sysemu.h"
  25. #include "qemu-common.h"
  26. #include "trace.h"
  27. #include "block_int.h"
  28. #include "block/raw-posix-aio.h"
  29. struct qemu_paiocb {
  30. BlockDriverAIOCB common;
  31. int aio_fildes;
  32. union {
  33. struct iovec *aio_iov;
  34. void *aio_ioctl_buf;
  35. };
  36. int aio_niov;
  37. size_t aio_nbytes;
  38. #define aio_ioctl_cmd aio_nbytes /* for QEMU_AIO_IOCTL */
  39. int ev_signo;
  40. off_t aio_offset;
  41. QTAILQ_ENTRY(qemu_paiocb) node;
  42. int aio_type;
  43. ssize_t ret;
  44. int active;
  45. struct qemu_paiocb *next;
  46. int async_context_id;
  47. };
  48. typedef struct PosixAioState {
  49. int rfd, wfd;
  50. struct qemu_paiocb *first_aio;
  51. } PosixAioState;
  52. static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
  53. static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
  54. static pthread_t thread_id;
  55. static pthread_attr_t attr;
  56. static int max_threads = 64;
  57. static int cur_threads = 0;
  58. static int idle_threads = 0;
  59. static QTAILQ_HEAD(, qemu_paiocb) request_list;
  60. #ifdef CONFIG_PREADV
  61. static int preadv_present = 1;
  62. #else
  63. static int preadv_present = 0;
  64. #endif
  65. static void die2(int err, const char *what)
  66. {
  67. fprintf(stderr, "%s failed: %s\n", what, strerror(err));
  68. abort();
  69. }
  70. static void die(const char *what)
  71. {
  72. die2(errno, what);
  73. }
  74. static void mutex_lock(pthread_mutex_t *mutex)
  75. {
  76. int ret = pthread_mutex_lock(mutex);
  77. if (ret) die2(ret, "pthread_mutex_lock");
  78. }
  79. static void mutex_unlock(pthread_mutex_t *mutex)
  80. {
  81. int ret = pthread_mutex_unlock(mutex);
  82. if (ret) die2(ret, "pthread_mutex_unlock");
  83. }
  84. static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
  85. struct timespec *ts)
  86. {
  87. int ret = pthread_cond_timedwait(cond, mutex, ts);
  88. if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
  89. return ret;
  90. }
  91. static void cond_signal(pthread_cond_t *cond)
  92. {
  93. int ret = pthread_cond_signal(cond);
  94. if (ret) die2(ret, "pthread_cond_signal");
  95. }
  96. static void thread_create(pthread_t *thread, pthread_attr_t *attr,
  97. void *(*start_routine)(void*), void *arg)
  98. {
  99. int ret = pthread_create(thread, attr, start_routine, arg);
  100. if (ret) die2(ret, "pthread_create");
  101. }
  102. static ssize_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb)
  103. {
  104. int ret;
  105. ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf);
  106. if (ret == -1)
  107. return -errno;
  108. /*
  109. * This looks weird, but the aio code only consideres a request
  110. * successful if it has written the number full number of bytes.
  111. *
  112. * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command,
  113. * so in fact we return the ioctl command here to make posix_aio_read()
  114. * happy..
  115. */
  116. return aiocb->aio_nbytes;
  117. }
  118. static ssize_t handle_aiocb_flush(struct qemu_paiocb *aiocb)
  119. {
  120. int ret;
  121. ret = qemu_fdatasync(aiocb->aio_fildes);
  122. if (ret == -1)
  123. return -errno;
  124. return 0;
  125. }
  126. #ifdef CONFIG_PREADV
  127. static ssize_t
  128. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  129. {
  130. return preadv(fd, iov, nr_iov, offset);
  131. }
  132. static ssize_t
  133. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  134. {
  135. return pwritev(fd, iov, nr_iov, offset);
  136. }
  137. #else
  138. static ssize_t
  139. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  140. {
  141. return -ENOSYS;
  142. }
  143. static ssize_t
  144. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  145. {
  146. return -ENOSYS;
  147. }
  148. #endif
  149. static ssize_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb)
  150. {
  151. size_t offset = 0;
  152. ssize_t len;
  153. do {
  154. if (aiocb->aio_type & QEMU_AIO_WRITE)
  155. len = qemu_pwritev(aiocb->aio_fildes,
  156. aiocb->aio_iov,
  157. aiocb->aio_niov,
  158. aiocb->aio_offset + offset);
  159. else
  160. len = qemu_preadv(aiocb->aio_fildes,
  161. aiocb->aio_iov,
  162. aiocb->aio_niov,
  163. aiocb->aio_offset + offset);
  164. } while (len == -1 && errno == EINTR);
  165. if (len == -1)
  166. return -errno;
  167. return len;
  168. }
  169. static ssize_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf)
  170. {
  171. ssize_t offset = 0;
  172. ssize_t len;
  173. while (offset < aiocb->aio_nbytes) {
  174. if (aiocb->aio_type & QEMU_AIO_WRITE)
  175. len = pwrite(aiocb->aio_fildes,
  176. (const char *)buf + offset,
  177. aiocb->aio_nbytes - offset,
  178. aiocb->aio_offset + offset);
  179. else
  180. len = pread(aiocb->aio_fildes,
  181. buf + offset,
  182. aiocb->aio_nbytes - offset,
  183. aiocb->aio_offset + offset);
  184. if (len == -1 && errno == EINTR)
  185. continue;
  186. else if (len == -1) {
  187. offset = -errno;
  188. break;
  189. } else if (len == 0)
  190. break;
  191. offset += len;
  192. }
  193. return offset;
  194. }
  195. static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
  196. {
  197. ssize_t nbytes;
  198. char *buf;
  199. if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
  200. /*
  201. * If there is just a single buffer, and it is properly aligned
  202. * we can just use plain pread/pwrite without any problems.
  203. */
  204. if (aiocb->aio_niov == 1)
  205. return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base);
  206. /*
  207. * We have more than one iovec, and all are properly aligned.
  208. *
  209. * Try preadv/pwritev first and fall back to linearizing the
  210. * buffer if it's not supported.
  211. */
  212. if (preadv_present) {
  213. nbytes = handle_aiocb_rw_vector(aiocb);
  214. if (nbytes == aiocb->aio_nbytes)
  215. return nbytes;
  216. if (nbytes < 0 && nbytes != -ENOSYS)
  217. return nbytes;
  218. preadv_present = 0;
  219. }
  220. /*
  221. * XXX(hch): short read/write. no easy way to handle the reminder
  222. * using these interfaces. For now retry using plain
  223. * pread/pwrite?
  224. */
  225. }
  226. /*
  227. * Ok, we have to do it the hard way, copy all segments into
  228. * a single aligned buffer.
  229. */
  230. buf = qemu_blockalign(aiocb->common.bs, aiocb->aio_nbytes);
  231. if (aiocb->aio_type & QEMU_AIO_WRITE) {
  232. char *p = buf;
  233. int i;
  234. for (i = 0; i < aiocb->aio_niov; ++i) {
  235. memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len);
  236. p += aiocb->aio_iov[i].iov_len;
  237. }
  238. }
  239. nbytes = handle_aiocb_rw_linear(aiocb, buf);
  240. if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
  241. char *p = buf;
  242. size_t count = aiocb->aio_nbytes, copy;
  243. int i;
  244. for (i = 0; i < aiocb->aio_niov && count; ++i) {
  245. copy = count;
  246. if (copy > aiocb->aio_iov[i].iov_len)
  247. copy = aiocb->aio_iov[i].iov_len;
  248. memcpy(aiocb->aio_iov[i].iov_base, p, copy);
  249. p += copy;
  250. count -= copy;
  251. }
  252. }
  253. qemu_vfree(buf);
  254. return nbytes;
  255. }
  256. static void *aio_thread(void *unused)
  257. {
  258. pid_t pid;
  259. pid = getpid();
  260. while (1) {
  261. struct qemu_paiocb *aiocb;
  262. ssize_t ret = 0;
  263. qemu_timeval tv;
  264. struct timespec ts;
  265. qemu_gettimeofday(&tv);
  266. ts.tv_sec = tv.tv_sec + 10;
  267. ts.tv_nsec = 0;
  268. mutex_lock(&lock);
  269. while (QTAILQ_EMPTY(&request_list) &&
  270. !(ret == ETIMEDOUT)) {
  271. idle_threads++;
  272. ret = cond_timedwait(&cond, &lock, &ts);
  273. idle_threads--;
  274. }
  275. if (QTAILQ_EMPTY(&request_list))
  276. break;
  277. aiocb = QTAILQ_FIRST(&request_list);
  278. QTAILQ_REMOVE(&request_list, aiocb, node);
  279. aiocb->active = 1;
  280. mutex_unlock(&lock);
  281. switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
  282. case QEMU_AIO_READ:
  283. case QEMU_AIO_WRITE:
  284. ret = handle_aiocb_rw(aiocb);
  285. break;
  286. case QEMU_AIO_FLUSH:
  287. ret = handle_aiocb_flush(aiocb);
  288. break;
  289. case QEMU_AIO_IOCTL:
  290. ret = handle_aiocb_ioctl(aiocb);
  291. break;
  292. default:
  293. fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
  294. ret = -EINVAL;
  295. break;
  296. }
  297. mutex_lock(&lock);
  298. aiocb->ret = ret;
  299. mutex_unlock(&lock);
  300. if (kill(pid, aiocb->ev_signo)) die("kill failed");
  301. }
  302. cur_threads--;
  303. mutex_unlock(&lock);
  304. return NULL;
  305. }
  306. static void spawn_thread(void)
  307. {
  308. sigset_t set, oldset;
  309. cur_threads++;
  310. /* block all signals */
  311. if (sigfillset(&set)) die("sigfillset");
  312. if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask");
  313. thread_create(&thread_id, &attr, aio_thread, NULL);
  314. if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore");
  315. }
  316. static void qemu_paio_submit(struct qemu_paiocb *aiocb)
  317. {
  318. aiocb->ret = -EINPROGRESS;
  319. aiocb->active = 0;
  320. mutex_lock(&lock);
  321. if (idle_threads == 0 && cur_threads < max_threads)
  322. spawn_thread();
  323. QTAILQ_INSERT_TAIL(&request_list, aiocb, node);
  324. mutex_unlock(&lock);
  325. cond_signal(&cond);
  326. }
  327. static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
  328. {
  329. ssize_t ret;
  330. mutex_lock(&lock);
  331. ret = aiocb->ret;
  332. mutex_unlock(&lock);
  333. return ret;
  334. }
  335. static int qemu_paio_error(struct qemu_paiocb *aiocb)
  336. {
  337. ssize_t ret = qemu_paio_return(aiocb);
  338. if (ret < 0)
  339. ret = -ret;
  340. else
  341. ret = 0;
  342. return ret;
  343. }
  344. static int posix_aio_process_queue(void *opaque)
  345. {
  346. PosixAioState *s = opaque;
  347. struct qemu_paiocb *acb, **pacb;
  348. int ret;
  349. int result = 0;
  350. int async_context_id = get_async_context_id();
  351. for(;;) {
  352. pacb = &s->first_aio;
  353. for(;;) {
  354. acb = *pacb;
  355. if (!acb)
  356. return result;
  357. /* we're only interested in requests in the right context */
  358. if (acb->async_context_id != async_context_id) {
  359. pacb = &acb->next;
  360. continue;
  361. }
  362. ret = qemu_paio_error(acb);
  363. if (ret == ECANCELED) {
  364. /* remove the request */
  365. *pacb = acb->next;
  366. qemu_aio_release(acb);
  367. result = 1;
  368. } else if (ret != EINPROGRESS) {
  369. /* end of aio */
  370. if (ret == 0) {
  371. ret = qemu_paio_return(acb);
  372. if (ret == acb->aio_nbytes)
  373. ret = 0;
  374. else
  375. ret = -EINVAL;
  376. } else {
  377. ret = -ret;
  378. }
  379. trace_paio_complete(acb, acb->common.opaque, ret);
  380. /* remove the request */
  381. *pacb = acb->next;
  382. /* call the callback */
  383. acb->common.cb(acb->common.opaque, ret);
  384. qemu_aio_release(acb);
  385. result = 1;
  386. break;
  387. } else {
  388. pacb = &acb->next;
  389. }
  390. }
  391. }
  392. return result;
  393. }
  394. static void posix_aio_read(void *opaque)
  395. {
  396. PosixAioState *s = opaque;
  397. ssize_t len;
  398. /* read all bytes from signal pipe */
  399. for (;;) {
  400. char bytes[16];
  401. len = read(s->rfd, bytes, sizeof(bytes));
  402. if (len == -1 && errno == EINTR)
  403. continue; /* try again */
  404. if (len == sizeof(bytes))
  405. continue; /* more to read */
  406. break;
  407. }
  408. posix_aio_process_queue(s);
  409. }
  410. static int posix_aio_flush(void *opaque)
  411. {
  412. PosixAioState *s = opaque;
  413. return !!s->first_aio;
  414. }
  415. static PosixAioState *posix_aio_state;
  416. static void aio_signal_handler(int signum)
  417. {
  418. if (posix_aio_state) {
  419. char byte = 0;
  420. ssize_t ret;
  421. ret = write(posix_aio_state->wfd, &byte, sizeof(byte));
  422. if (ret < 0 && errno != EAGAIN)
  423. die("write()");
  424. }
  425. qemu_service_io();
  426. }
  427. static void paio_remove(struct qemu_paiocb *acb)
  428. {
  429. struct qemu_paiocb **pacb;
  430. /* remove the callback from the queue */
  431. pacb = &posix_aio_state->first_aio;
  432. for(;;) {
  433. if (*pacb == NULL) {
  434. fprintf(stderr, "paio_remove: aio request not found!\n");
  435. break;
  436. } else if (*pacb == acb) {
  437. *pacb = acb->next;
  438. qemu_aio_release(acb);
  439. break;
  440. }
  441. pacb = &(*pacb)->next;
  442. }
  443. }
  444. static void paio_cancel(BlockDriverAIOCB *blockacb)
  445. {
  446. struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb;
  447. int active = 0;
  448. trace_paio_cancel(acb, acb->common.opaque);
  449. mutex_lock(&lock);
  450. if (!acb->active) {
  451. QTAILQ_REMOVE(&request_list, acb, node);
  452. acb->ret = -ECANCELED;
  453. } else if (acb->ret == -EINPROGRESS) {
  454. active = 1;
  455. }
  456. mutex_unlock(&lock);
  457. if (active) {
  458. /* fail safe: if the aio could not be canceled, we wait for
  459. it */
  460. while (qemu_paio_error(acb) == EINPROGRESS)
  461. ;
  462. }
  463. paio_remove(acb);
  464. }
  465. static AIOPool raw_aio_pool = {
  466. .aiocb_size = sizeof(struct qemu_paiocb),
  467. .cancel = paio_cancel,
  468. };
  469. BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
  470. int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  471. BlockDriverCompletionFunc *cb, void *opaque, int type)
  472. {
  473. struct qemu_paiocb *acb;
  474. acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  475. if (!acb)
  476. return NULL;
  477. acb->aio_type = type;
  478. acb->aio_fildes = fd;
  479. acb->ev_signo = SIGUSR2;
  480. acb->async_context_id = get_async_context_id();
  481. if (qiov) {
  482. acb->aio_iov = qiov->iov;
  483. acb->aio_niov = qiov->niov;
  484. }
  485. acb->aio_nbytes = nb_sectors * 512;
  486. acb->aio_offset = sector_num * 512;
  487. acb->next = posix_aio_state->first_aio;
  488. posix_aio_state->first_aio = acb;
  489. trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
  490. qemu_paio_submit(acb);
  491. return &acb->common;
  492. }
  493. BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
  494. unsigned long int req, void *buf,
  495. BlockDriverCompletionFunc *cb, void *opaque)
  496. {
  497. struct qemu_paiocb *acb;
  498. acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
  499. if (!acb)
  500. return NULL;
  501. acb->aio_type = QEMU_AIO_IOCTL;
  502. acb->aio_fildes = fd;
  503. acb->ev_signo = SIGUSR2;
  504. acb->async_context_id = get_async_context_id();
  505. acb->aio_offset = 0;
  506. acb->aio_ioctl_buf = buf;
  507. acb->aio_ioctl_cmd = req;
  508. acb->next = posix_aio_state->first_aio;
  509. posix_aio_state->first_aio = acb;
  510. qemu_paio_submit(acb);
  511. return &acb->common;
  512. }
  513. int paio_init(void)
  514. {
  515. struct sigaction act;
  516. PosixAioState *s;
  517. int fds[2];
  518. int ret;
  519. if (posix_aio_state)
  520. return 0;
  521. s = qemu_malloc(sizeof(PosixAioState));
  522. sigfillset(&act.sa_mask);
  523. act.sa_flags = 0; /* do not restart syscalls to interrupt select() */
  524. act.sa_handler = aio_signal_handler;
  525. sigaction(SIGUSR2, &act, NULL);
  526. s->first_aio = NULL;
  527. if (qemu_pipe(fds) == -1) {
  528. fprintf(stderr, "failed to create pipe\n");
  529. return -1;
  530. }
  531. s->rfd = fds[0];
  532. s->wfd = fds[1];
  533. fcntl(s->rfd, F_SETFL, O_NONBLOCK);
  534. fcntl(s->wfd, F_SETFL, O_NONBLOCK);
  535. qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush,
  536. posix_aio_process_queue, s);
  537. ret = pthread_attr_init(&attr);
  538. if (ret)
  539. die2(ret, "pthread_attr_init");
  540. ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  541. if (ret)
  542. die2(ret, "pthread_attr_setdetachstate");
  543. QTAILQ_INIT(&request_list);
  544. posix_aio_state = s;
  545. return 0;
  546. }