test-block-iothread.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908
  1. /*
  2. * Block tests for iothreads
  3. *
  4. * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "block/block.h"
  26. #include "block/blockjob_int.h"
  27. #include "sysemu/block-backend.h"
  28. #include "qapi/error.h"
  29. #include "qapi/qmp/qdict.h"
  30. #include "qemu/main-loop.h"
  31. #include "iothread.h"
  32. static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
  33. int64_t offset, int64_t bytes,
  34. QEMUIOVector *qiov,
  35. BdrvRequestFlags flags)
  36. {
  37. return 0;
  38. }
  39. static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
  40. int64_t offset, int64_t bytes,
  41. QEMUIOVector *qiov,
  42. BdrvRequestFlags flags)
  43. {
  44. return 0;
  45. }
  46. static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
  47. int64_t offset, int64_t bytes)
  48. {
  49. return 0;
  50. }
  51. static int coroutine_fn
  52. bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
  53. PreallocMode prealloc, BdrvRequestFlags flags,
  54. Error **errp)
  55. {
  56. return 0;
  57. }
  58. static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
  59. bool want_zero,
  60. int64_t offset, int64_t count,
  61. int64_t *pnum, int64_t *map,
  62. BlockDriverState **file)
  63. {
  64. *pnum = count;
  65. return 0;
  66. }
  67. static BlockDriver bdrv_test = {
  68. .format_name = "test",
  69. .instance_size = 1,
  70. .bdrv_co_preadv = bdrv_test_co_preadv,
  71. .bdrv_co_pwritev = bdrv_test_co_pwritev,
  72. .bdrv_co_pdiscard = bdrv_test_co_pdiscard,
  73. .bdrv_co_truncate = bdrv_test_co_truncate,
  74. .bdrv_co_block_status = bdrv_test_co_block_status,
  75. };
  76. static void test_sync_op_pread(BdrvChild *c)
  77. {
  78. uint8_t buf[512];
  79. int ret;
  80. /* Success */
  81. ret = bdrv_pread(c, 0, sizeof(buf), buf, 0);
  82. g_assert_cmpint(ret, ==, 0);
  83. /* Early error: Negative offset */
  84. ret = bdrv_pread(c, -2, sizeof(buf), buf, 0);
  85. g_assert_cmpint(ret, ==, -EIO);
  86. }
  87. static void test_sync_op_pwrite(BdrvChild *c)
  88. {
  89. uint8_t buf[512] = { 0 };
  90. int ret;
  91. /* Success */
  92. ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0);
  93. g_assert_cmpint(ret, ==, 0);
  94. /* Early error: Negative offset */
  95. ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0);
  96. g_assert_cmpint(ret, ==, -EIO);
  97. }
  98. static void test_sync_op_blk_pread(BlockBackend *blk)
  99. {
  100. uint8_t buf[512];
  101. int ret;
  102. /* Success */
  103. ret = blk_pread(blk, 0, sizeof(buf), buf, 0);
  104. g_assert_cmpint(ret, ==, 0);
  105. /* Early error: Negative offset */
  106. ret = blk_pread(blk, -2, sizeof(buf), buf, 0);
  107. g_assert_cmpint(ret, ==, -EIO);
  108. }
  109. static void test_sync_op_blk_pwrite(BlockBackend *blk)
  110. {
  111. uint8_t buf[512] = { 0 };
  112. int ret;
  113. /* Success */
  114. ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0);
  115. g_assert_cmpint(ret, ==, 0);
  116. /* Early error: Negative offset */
  117. ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0);
  118. g_assert_cmpint(ret, ==, -EIO);
  119. }
  120. static void test_sync_op_blk_preadv(BlockBackend *blk)
  121. {
  122. uint8_t buf[512];
  123. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  124. int ret;
  125. /* Success */
  126. ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0);
  127. g_assert_cmpint(ret, ==, 0);
  128. /* Early error: Negative offset */
  129. ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0);
  130. g_assert_cmpint(ret, ==, -EIO);
  131. }
  132. static void test_sync_op_blk_pwritev(BlockBackend *blk)
  133. {
  134. uint8_t buf[512] = { 0 };
  135. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  136. int ret;
  137. /* Success */
  138. ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0);
  139. g_assert_cmpint(ret, ==, 0);
  140. /* Early error: Negative offset */
  141. ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0);
  142. g_assert_cmpint(ret, ==, -EIO);
  143. }
  144. static void test_sync_op_blk_preadv_part(BlockBackend *blk)
  145. {
  146. uint8_t buf[512];
  147. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  148. int ret;
  149. /* Success */
  150. ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0);
  151. g_assert_cmpint(ret, ==, 0);
  152. /* Early error: Negative offset */
  153. ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0);
  154. g_assert_cmpint(ret, ==, -EIO);
  155. }
  156. static void test_sync_op_blk_pwritev_part(BlockBackend *blk)
  157. {
  158. uint8_t buf[512] = { 0 };
  159. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  160. int ret;
  161. /* Success */
  162. ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0);
  163. g_assert_cmpint(ret, ==, 0);
  164. /* Early error: Negative offset */
  165. ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0);
  166. g_assert_cmpint(ret, ==, -EIO);
  167. }
  168. static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk)
  169. {
  170. uint8_t buf[512] = { 0 };
  171. int ret;
  172. /* Late error: Not supported */
  173. ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf);
  174. g_assert_cmpint(ret, ==, -ENOTSUP);
  175. /* Early error: Negative offset */
  176. ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf);
  177. g_assert_cmpint(ret, ==, -EIO);
  178. }
  179. static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk)
  180. {
  181. int ret;
  182. /* Success */
  183. ret = blk_pwrite_zeroes(blk, 0, 512, 0);
  184. g_assert_cmpint(ret, ==, 0);
  185. /* Early error: Negative offset */
  186. ret = blk_pwrite_zeroes(blk, -2, 512, 0);
  187. g_assert_cmpint(ret, ==, -EIO);
  188. }
  189. static void test_sync_op_load_vmstate(BdrvChild *c)
  190. {
  191. uint8_t buf[512];
  192. int ret;
  193. /* Error: Driver does not support snapshots */
  194. ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
  195. g_assert_cmpint(ret, ==, -ENOTSUP);
  196. }
  197. static void test_sync_op_save_vmstate(BdrvChild *c)
  198. {
  199. uint8_t buf[512] = { 0 };
  200. int ret;
  201. /* Error: Driver does not support snapshots */
  202. ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
  203. g_assert_cmpint(ret, ==, -ENOTSUP);
  204. }
  205. static void test_sync_op_pdiscard(BdrvChild *c)
  206. {
  207. int ret;
  208. /* Normal success path */
  209. c->bs->open_flags |= BDRV_O_UNMAP;
  210. ret = bdrv_pdiscard(c, 0, 512);
  211. g_assert_cmpint(ret, ==, 0);
  212. /* Early success: UNMAP not supported */
  213. c->bs->open_flags &= ~BDRV_O_UNMAP;
  214. ret = bdrv_pdiscard(c, 0, 512);
  215. g_assert_cmpint(ret, ==, 0);
  216. /* Early error: Negative offset */
  217. ret = bdrv_pdiscard(c, -2, 512);
  218. g_assert_cmpint(ret, ==, -EIO);
  219. }
  220. static void test_sync_op_blk_pdiscard(BlockBackend *blk)
  221. {
  222. int ret;
  223. /* Early success: UNMAP not supported */
  224. ret = blk_pdiscard(blk, 0, 512);
  225. g_assert_cmpint(ret, ==, 0);
  226. /* Early error: Negative offset */
  227. ret = blk_pdiscard(blk, -2, 512);
  228. g_assert_cmpint(ret, ==, -EIO);
  229. }
  230. static void test_sync_op_truncate(BdrvChild *c)
  231. {
  232. int ret;
  233. /* Normal success path */
  234. ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
  235. g_assert_cmpint(ret, ==, 0);
  236. /* Early error: Negative offset */
  237. ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
  238. g_assert_cmpint(ret, ==, -EINVAL);
  239. /* Error: Read-only image */
  240. c->bs->open_flags &= ~BDRV_O_RDWR;
  241. ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
  242. g_assert_cmpint(ret, ==, -EACCES);
  243. c->bs->open_flags |= BDRV_O_RDWR;
  244. }
  245. static void test_sync_op_blk_truncate(BlockBackend *blk)
  246. {
  247. int ret;
  248. /* Normal success path */
  249. ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
  250. g_assert_cmpint(ret, ==, 0);
  251. /* Early error: Negative offset */
  252. ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL);
  253. g_assert_cmpint(ret, ==, -EINVAL);
  254. }
  255. static void test_sync_op_block_status(BdrvChild *c)
  256. {
  257. int ret;
  258. int64_t n;
  259. /* Normal success path */
  260. ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
  261. g_assert_cmpint(ret, ==, 0);
  262. /* Early success: No driver support */
  263. bdrv_test.bdrv_co_block_status = NULL;
  264. ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
  265. g_assert_cmpint(ret, ==, 1);
  266. /* Early success: bytes = 0 */
  267. ret = bdrv_is_allocated(c->bs, 0, 0, &n);
  268. g_assert_cmpint(ret, ==, 0);
  269. /* Early success: Offset > image size*/
  270. ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
  271. g_assert_cmpint(ret, ==, 0);
  272. }
  273. static void test_sync_op_flush(BdrvChild *c)
  274. {
  275. int ret;
  276. /* Normal success path */
  277. ret = bdrv_flush(c->bs);
  278. g_assert_cmpint(ret, ==, 0);
  279. /* Early success: Read-only image */
  280. c->bs->open_flags &= ~BDRV_O_RDWR;
  281. ret = bdrv_flush(c->bs);
  282. g_assert_cmpint(ret, ==, 0);
  283. c->bs->open_flags |= BDRV_O_RDWR;
  284. }
  285. static void test_sync_op_blk_flush(BlockBackend *blk)
  286. {
  287. BlockDriverState *bs = blk_bs(blk);
  288. int ret;
  289. /* Normal success path */
  290. ret = blk_flush(blk);
  291. g_assert_cmpint(ret, ==, 0);
  292. /* Early success: Read-only image */
  293. bs->open_flags &= ~BDRV_O_RDWR;
  294. ret = blk_flush(blk);
  295. g_assert_cmpint(ret, ==, 0);
  296. bs->open_flags |= BDRV_O_RDWR;
  297. }
  298. static void test_sync_op_check(BdrvChild *c)
  299. {
  300. BdrvCheckResult result;
  301. int ret;
  302. /* Error: Driver does not implement check */
  303. ret = bdrv_check(c->bs, &result, 0);
  304. g_assert_cmpint(ret, ==, -ENOTSUP);
  305. }
  306. static void test_sync_op_activate(BdrvChild *c)
  307. {
  308. /* Early success: Image is not inactive */
  309. bdrv_activate(c->bs, NULL);
  310. }
  311. typedef struct SyncOpTest {
  312. const char *name;
  313. void (*fn)(BdrvChild *c);
  314. void (*blkfn)(BlockBackend *blk);
  315. } SyncOpTest;
  316. const SyncOpTest sync_op_tests[] = {
  317. {
  318. .name = "/sync-op/pread",
  319. .fn = test_sync_op_pread,
  320. .blkfn = test_sync_op_blk_pread,
  321. }, {
  322. .name = "/sync-op/pwrite",
  323. .fn = test_sync_op_pwrite,
  324. .blkfn = test_sync_op_blk_pwrite,
  325. }, {
  326. .name = "/sync-op/preadv",
  327. .fn = NULL,
  328. .blkfn = test_sync_op_blk_preadv,
  329. }, {
  330. .name = "/sync-op/pwritev",
  331. .fn = NULL,
  332. .blkfn = test_sync_op_blk_pwritev,
  333. }, {
  334. .name = "/sync-op/preadv_part",
  335. .fn = NULL,
  336. .blkfn = test_sync_op_blk_preadv_part,
  337. }, {
  338. .name = "/sync-op/pwritev_part",
  339. .fn = NULL,
  340. .blkfn = test_sync_op_blk_pwritev_part,
  341. }, {
  342. .name = "/sync-op/pwrite_compressed",
  343. .fn = NULL,
  344. .blkfn = test_sync_op_blk_pwrite_compressed,
  345. }, {
  346. .name = "/sync-op/pwrite_zeroes",
  347. .fn = NULL,
  348. .blkfn = test_sync_op_blk_pwrite_zeroes,
  349. }, {
  350. .name = "/sync-op/load_vmstate",
  351. .fn = test_sync_op_load_vmstate,
  352. }, {
  353. .name = "/sync-op/save_vmstate",
  354. .fn = test_sync_op_save_vmstate,
  355. }, {
  356. .name = "/sync-op/pdiscard",
  357. .fn = test_sync_op_pdiscard,
  358. .blkfn = test_sync_op_blk_pdiscard,
  359. }, {
  360. .name = "/sync-op/truncate",
  361. .fn = test_sync_op_truncate,
  362. .blkfn = test_sync_op_blk_truncate,
  363. }, {
  364. .name = "/sync-op/block_status",
  365. .fn = test_sync_op_block_status,
  366. }, {
  367. .name = "/sync-op/flush",
  368. .fn = test_sync_op_flush,
  369. .blkfn = test_sync_op_blk_flush,
  370. }, {
  371. .name = "/sync-op/check",
  372. .fn = test_sync_op_check,
  373. }, {
  374. .name = "/sync-op/activate",
  375. .fn = test_sync_op_activate,
  376. },
  377. };
  378. /* Test synchronous operations that run in a different iothread, so we have to
  379. * poll for the coroutine there to return. */
  380. static void test_sync_op(const void *opaque)
  381. {
  382. const SyncOpTest *t = opaque;
  383. IOThread *iothread = iothread_new();
  384. AioContext *ctx = iothread_get_aio_context(iothread);
  385. BlockBackend *blk;
  386. BlockDriverState *bs;
  387. BdrvChild *c;
  388. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  389. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  390. bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
  391. blk_insert_bs(blk, bs, &error_abort);
  392. c = QLIST_FIRST(&bs->parents);
  393. blk_set_aio_context(blk, ctx, &error_abort);
  394. aio_context_acquire(ctx);
  395. if (t->fn) {
  396. t->fn(c);
  397. }
  398. if (t->blkfn) {
  399. t->blkfn(blk);
  400. }
  401. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  402. aio_context_release(ctx);
  403. bdrv_unref(bs);
  404. blk_unref(blk);
  405. }
  406. typedef struct TestBlockJob {
  407. BlockJob common;
  408. bool should_complete;
  409. int n;
  410. } TestBlockJob;
  411. static int test_job_prepare(Job *job)
  412. {
  413. g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
  414. return 0;
  415. }
  416. static int coroutine_fn test_job_run(Job *job, Error **errp)
  417. {
  418. TestBlockJob *s = container_of(job, TestBlockJob, common.job);
  419. job_transition_to_ready(&s->common.job);
  420. while (!s->should_complete) {
  421. s->n++;
  422. g_assert(qemu_get_current_aio_context() == job->aio_context);
  423. /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
  424. * emulate some actual activity (probably some I/O) here so that the
  425. * drain involved in AioContext switches has to wait for this activity
  426. * to stop. */
  427. qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
  428. job_pause_point(&s->common.job);
  429. }
  430. g_assert(qemu_get_current_aio_context() == job->aio_context);
  431. return 0;
  432. }
  433. static void test_job_complete(Job *job, Error **errp)
  434. {
  435. TestBlockJob *s = container_of(job, TestBlockJob, common.job);
  436. s->should_complete = true;
  437. }
  438. BlockJobDriver test_job_driver = {
  439. .job_driver = {
  440. .instance_size = sizeof(TestBlockJob),
  441. .free = block_job_free,
  442. .user_resume = block_job_user_resume,
  443. .run = test_job_run,
  444. .complete = test_job_complete,
  445. .prepare = test_job_prepare,
  446. },
  447. };
  448. static void test_attach_blockjob(void)
  449. {
  450. IOThread *iothread = iothread_new();
  451. AioContext *ctx = iothread_get_aio_context(iothread);
  452. BlockBackend *blk;
  453. BlockDriverState *bs;
  454. TestBlockJob *tjob;
  455. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  456. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  457. blk_insert_bs(blk, bs, &error_abort);
  458. tjob = block_job_create("job0", &test_job_driver, NULL, bs,
  459. 0, BLK_PERM_ALL,
  460. 0, 0, NULL, NULL, &error_abort);
  461. job_start(&tjob->common.job);
  462. while (tjob->n == 0) {
  463. aio_poll(qemu_get_aio_context(), false);
  464. }
  465. blk_set_aio_context(blk, ctx, &error_abort);
  466. tjob->n = 0;
  467. while (tjob->n == 0) {
  468. aio_poll(qemu_get_aio_context(), false);
  469. }
  470. aio_context_acquire(ctx);
  471. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  472. aio_context_release(ctx);
  473. tjob->n = 0;
  474. while (tjob->n == 0) {
  475. aio_poll(qemu_get_aio_context(), false);
  476. }
  477. blk_set_aio_context(blk, ctx, &error_abort);
  478. tjob->n = 0;
  479. while (tjob->n == 0) {
  480. aio_poll(qemu_get_aio_context(), false);
  481. }
  482. WITH_JOB_LOCK_GUARD() {
  483. job_complete_sync_locked(&tjob->common.job, &error_abort);
  484. }
  485. aio_context_acquire(ctx);
  486. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  487. aio_context_release(ctx);
  488. bdrv_unref(bs);
  489. blk_unref(blk);
  490. }
  491. /*
  492. * Test that changing the AioContext for one node in a tree (here through blk)
  493. * changes all other nodes as well:
  494. *
  495. * blk
  496. * |
  497. * | bs_verify [blkverify]
  498. * | / \
  499. * | / \
  500. * bs_a [bdrv_test] bs_b [bdrv_test]
  501. *
  502. */
  503. static void test_propagate_basic(void)
  504. {
  505. IOThread *iothread = iothread_new();
  506. AioContext *ctx = iothread_get_aio_context(iothread);
  507. AioContext *main_ctx;
  508. BlockBackend *blk;
  509. BlockDriverState *bs_a, *bs_b, *bs_verify;
  510. QDict *options;
  511. /*
  512. * Create bs_a and its BlockBackend. We cannot take the RESIZE
  513. * permission because blkverify will not share it on the test
  514. * image.
  515. */
  516. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
  517. BLK_PERM_ALL);
  518. bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
  519. blk_insert_bs(blk, bs_a, &error_abort);
  520. /* Create bs_b */
  521. bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
  522. /* Create blkverify filter that references both bs_a and bs_b */
  523. options = qdict_new();
  524. qdict_put_str(options, "driver", "blkverify");
  525. qdict_put_str(options, "test", "bs_a");
  526. qdict_put_str(options, "raw", "bs_b");
  527. bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  528. /* Switch the AioContext */
  529. blk_set_aio_context(blk, ctx, &error_abort);
  530. g_assert(blk_get_aio_context(blk) == ctx);
  531. g_assert(bdrv_get_aio_context(bs_a) == ctx);
  532. g_assert(bdrv_get_aio_context(bs_verify) == ctx);
  533. g_assert(bdrv_get_aio_context(bs_b) == ctx);
  534. /* Switch the AioContext back */
  535. main_ctx = qemu_get_aio_context();
  536. aio_context_acquire(ctx);
  537. blk_set_aio_context(blk, main_ctx, &error_abort);
  538. aio_context_release(ctx);
  539. g_assert(blk_get_aio_context(blk) == main_ctx);
  540. g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
  541. g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
  542. g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
  543. bdrv_unref(bs_verify);
  544. bdrv_unref(bs_b);
  545. bdrv_unref(bs_a);
  546. blk_unref(blk);
  547. }
  548. /*
  549. * Test that diamonds in the graph don't lead to endless recursion:
  550. *
  551. * blk
  552. * |
  553. * bs_verify [blkverify]
  554. * / \
  555. * / \
  556. * bs_b [raw] bs_c[raw]
  557. * \ /
  558. * \ /
  559. * bs_a [bdrv_test]
  560. */
  561. static void test_propagate_diamond(void)
  562. {
  563. IOThread *iothread = iothread_new();
  564. AioContext *ctx = iothread_get_aio_context(iothread);
  565. AioContext *main_ctx;
  566. BlockBackend *blk;
  567. BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
  568. QDict *options;
  569. /* Create bs_a */
  570. bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
  571. /* Create bs_b and bc_c */
  572. options = qdict_new();
  573. qdict_put_str(options, "driver", "raw");
  574. qdict_put_str(options, "file", "bs_a");
  575. qdict_put_str(options, "node-name", "bs_b");
  576. bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  577. options = qdict_new();
  578. qdict_put_str(options, "driver", "raw");
  579. qdict_put_str(options, "file", "bs_a");
  580. qdict_put_str(options, "node-name", "bs_c");
  581. bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  582. /* Create blkverify filter that references both bs_b and bs_c */
  583. options = qdict_new();
  584. qdict_put_str(options, "driver", "blkverify");
  585. qdict_put_str(options, "test", "bs_b");
  586. qdict_put_str(options, "raw", "bs_c");
  587. bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  588. /*
  589. * Do not take the RESIZE permission: This would require the same
  590. * from bs_c and thus from bs_a; however, blkverify will not share
  591. * it on bs_b, and thus it will not be available for bs_a.
  592. */
  593. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
  594. BLK_PERM_ALL);
  595. blk_insert_bs(blk, bs_verify, &error_abort);
  596. /* Switch the AioContext */
  597. blk_set_aio_context(blk, ctx, &error_abort);
  598. g_assert(blk_get_aio_context(blk) == ctx);
  599. g_assert(bdrv_get_aio_context(bs_verify) == ctx);
  600. g_assert(bdrv_get_aio_context(bs_a) == ctx);
  601. g_assert(bdrv_get_aio_context(bs_b) == ctx);
  602. g_assert(bdrv_get_aio_context(bs_c) == ctx);
  603. /* Switch the AioContext back */
  604. main_ctx = qemu_get_aio_context();
  605. aio_context_acquire(ctx);
  606. blk_set_aio_context(blk, main_ctx, &error_abort);
  607. aio_context_release(ctx);
  608. g_assert(blk_get_aio_context(blk) == main_ctx);
  609. g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
  610. g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
  611. g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
  612. g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
  613. blk_unref(blk);
  614. bdrv_unref(bs_verify);
  615. bdrv_unref(bs_c);
  616. bdrv_unref(bs_b);
  617. bdrv_unref(bs_a);
  618. }
  619. static void test_propagate_mirror(void)
  620. {
  621. IOThread *iothread = iothread_new();
  622. AioContext *ctx = iothread_get_aio_context(iothread);
  623. AioContext *main_ctx = qemu_get_aio_context();
  624. BlockDriverState *src, *target, *filter;
  625. BlockBackend *blk;
  626. Job *job;
  627. Error *local_err = NULL;
  628. /* Create src and target*/
  629. src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
  630. target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
  631. &error_abort);
  632. /* Start a mirror job */
  633. mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
  634. MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
  635. BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
  636. false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
  637. &error_abort);
  638. WITH_JOB_LOCK_GUARD() {
  639. job = job_get_locked("job0");
  640. }
  641. filter = bdrv_find_node("filter_node");
  642. /* Change the AioContext of src */
  643. bdrv_try_change_aio_context(src, ctx, NULL, &error_abort);
  644. g_assert(bdrv_get_aio_context(src) == ctx);
  645. g_assert(bdrv_get_aio_context(target) == ctx);
  646. g_assert(bdrv_get_aio_context(filter) == ctx);
  647. g_assert(job->aio_context == ctx);
  648. /* Change the AioContext of target */
  649. aio_context_acquire(ctx);
  650. bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
  651. aio_context_release(ctx);
  652. g_assert(bdrv_get_aio_context(src) == main_ctx);
  653. g_assert(bdrv_get_aio_context(target) == main_ctx);
  654. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  655. /* With a BlockBackend on src, changing target must fail */
  656. blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
  657. blk_insert_bs(blk, src, &error_abort);
  658. bdrv_try_change_aio_context(target, ctx, NULL, &local_err);
  659. error_free_or_abort(&local_err);
  660. g_assert(blk_get_aio_context(blk) == main_ctx);
  661. g_assert(bdrv_get_aio_context(src) == main_ctx);
  662. g_assert(bdrv_get_aio_context(target) == main_ctx);
  663. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  664. /* ...unless we explicitly allow it */
  665. aio_context_acquire(ctx);
  666. blk_set_allow_aio_context_change(blk, true);
  667. bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
  668. aio_context_release(ctx);
  669. g_assert(blk_get_aio_context(blk) == ctx);
  670. g_assert(bdrv_get_aio_context(src) == ctx);
  671. g_assert(bdrv_get_aio_context(target) == ctx);
  672. g_assert(bdrv_get_aio_context(filter) == ctx);
  673. job_cancel_sync_all();
  674. aio_context_acquire(ctx);
  675. blk_set_aio_context(blk, main_ctx, &error_abort);
  676. bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
  677. aio_context_release(ctx);
  678. blk_unref(blk);
  679. bdrv_unref(src);
  680. bdrv_unref(target);
  681. }
  682. static void test_attach_second_node(void)
  683. {
  684. IOThread *iothread = iothread_new();
  685. AioContext *ctx = iothread_get_aio_context(iothread);
  686. AioContext *main_ctx = qemu_get_aio_context();
  687. BlockBackend *blk;
  688. BlockDriverState *bs, *filter;
  689. QDict *options;
  690. blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
  691. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  692. blk_insert_bs(blk, bs, &error_abort);
  693. options = qdict_new();
  694. qdict_put_str(options, "driver", "raw");
  695. qdict_put_str(options, "file", "base");
  696. filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  697. g_assert(blk_get_aio_context(blk) == ctx);
  698. g_assert(bdrv_get_aio_context(bs) == ctx);
  699. g_assert(bdrv_get_aio_context(filter) == ctx);
  700. aio_context_acquire(ctx);
  701. blk_set_aio_context(blk, main_ctx, &error_abort);
  702. aio_context_release(ctx);
  703. g_assert(blk_get_aio_context(blk) == main_ctx);
  704. g_assert(bdrv_get_aio_context(bs) == main_ctx);
  705. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  706. bdrv_unref(filter);
  707. bdrv_unref(bs);
  708. blk_unref(blk);
  709. }
  710. static void test_attach_preserve_blk_ctx(void)
  711. {
  712. IOThread *iothread = iothread_new();
  713. AioContext *ctx = iothread_get_aio_context(iothread);
  714. BlockBackend *blk;
  715. BlockDriverState *bs;
  716. blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
  717. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  718. bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
  719. /* Add node to BlockBackend that has an iothread context assigned */
  720. blk_insert_bs(blk, bs, &error_abort);
  721. g_assert(blk_get_aio_context(blk) == ctx);
  722. g_assert(bdrv_get_aio_context(bs) == ctx);
  723. /* Remove the node again */
  724. aio_context_acquire(ctx);
  725. blk_remove_bs(blk);
  726. aio_context_release(ctx);
  727. g_assert(blk_get_aio_context(blk) == ctx);
  728. g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
  729. /* Re-attach the node */
  730. blk_insert_bs(blk, bs, &error_abort);
  731. g_assert(blk_get_aio_context(blk) == ctx);
  732. g_assert(bdrv_get_aio_context(bs) == ctx);
  733. aio_context_acquire(ctx);
  734. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  735. aio_context_release(ctx);
  736. bdrv_unref(bs);
  737. blk_unref(blk);
  738. }
  739. int main(int argc, char **argv)
  740. {
  741. int i;
  742. bdrv_init();
  743. qemu_init_main_loop(&error_abort);
  744. g_test_init(&argc, &argv, NULL);
  745. for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
  746. const SyncOpTest *t = &sync_op_tests[i];
  747. g_test_add_data_func(t->name, t, test_sync_op);
  748. }
  749. g_test_add_func("/attach/blockjob", test_attach_blockjob);
  750. g_test_add_func("/attach/second_node", test_attach_second_node);
  751. g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
  752. g_test_add_func("/propagate/basic", test_propagate_basic);
  753. g_test_add_func("/propagate/diamond", test_propagate_diamond);
  754. g_test_add_func("/propagate/mirror", test_propagate_mirror);
  755. return g_test_run();
  756. }