test-block-iothread.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * Block tests for iothreads
  3. *
  4. * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "block/block.h"
  26. #include "block/blockjob_int.h"
  27. #include "sysemu/block-backend.h"
  28. #include "qapi/error.h"
  29. #include "qapi/qmp/qdict.h"
  30. #include "qemu/main-loop.h"
  31. #include "iothread.h"
  32. static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs,
  33. uint64_t offset, uint64_t bytes,
  34. QEMUIOVector *qiov, int flags)
  35. {
  36. return 0;
  37. }
  38. static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
  39. int64_t offset, int bytes)
  40. {
  41. return 0;
  42. }
  43. static int coroutine_fn
  44. bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
  45. PreallocMode prealloc, Error **errp)
  46. {
  47. return 0;
  48. }
  49. static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
  50. bool want_zero,
  51. int64_t offset, int64_t count,
  52. int64_t *pnum, int64_t *map,
  53. BlockDriverState **file)
  54. {
  55. *pnum = count;
  56. return 0;
  57. }
  58. static BlockDriver bdrv_test = {
  59. .format_name = "test",
  60. .instance_size = 1,
  61. .bdrv_co_preadv = bdrv_test_co_prwv,
  62. .bdrv_co_pwritev = bdrv_test_co_prwv,
  63. .bdrv_co_pdiscard = bdrv_test_co_pdiscard,
  64. .bdrv_co_truncate = bdrv_test_co_truncate,
  65. .bdrv_co_block_status = bdrv_test_co_block_status,
  66. };
  67. static void test_sync_op_pread(BdrvChild *c)
  68. {
  69. uint8_t buf[512];
  70. int ret;
  71. /* Success */
  72. ret = bdrv_pread(c, 0, buf, sizeof(buf));
  73. g_assert_cmpint(ret, ==, 512);
  74. /* Early error: Negative offset */
  75. ret = bdrv_pread(c, -2, buf, sizeof(buf));
  76. g_assert_cmpint(ret, ==, -EIO);
  77. }
  78. static void test_sync_op_pwrite(BdrvChild *c)
  79. {
  80. uint8_t buf[512];
  81. int ret;
  82. /* Success */
  83. ret = bdrv_pwrite(c, 0, buf, sizeof(buf));
  84. g_assert_cmpint(ret, ==, 512);
  85. /* Early error: Negative offset */
  86. ret = bdrv_pwrite(c, -2, buf, sizeof(buf));
  87. g_assert_cmpint(ret, ==, -EIO);
  88. }
  89. static void test_sync_op_blk_pread(BlockBackend *blk)
  90. {
  91. uint8_t buf[512];
  92. int ret;
  93. /* Success */
  94. ret = blk_pread(blk, 0, buf, sizeof(buf));
  95. g_assert_cmpint(ret, ==, 512);
  96. /* Early error: Negative offset */
  97. ret = blk_pread(blk, -2, buf, sizeof(buf));
  98. g_assert_cmpint(ret, ==, -EIO);
  99. }
  100. static void test_sync_op_blk_pwrite(BlockBackend *blk)
  101. {
  102. uint8_t buf[512];
  103. int ret;
  104. /* Success */
  105. ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0);
  106. g_assert_cmpint(ret, ==, 512);
  107. /* Early error: Negative offset */
  108. ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0);
  109. g_assert_cmpint(ret, ==, -EIO);
  110. }
  111. static void test_sync_op_load_vmstate(BdrvChild *c)
  112. {
  113. uint8_t buf[512];
  114. int ret;
  115. /* Error: Driver does not support snapshots */
  116. ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
  117. g_assert_cmpint(ret, ==, -ENOTSUP);
  118. }
  119. static void test_sync_op_save_vmstate(BdrvChild *c)
  120. {
  121. uint8_t buf[512];
  122. int ret;
  123. /* Error: Driver does not support snapshots */
  124. ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
  125. g_assert_cmpint(ret, ==, -ENOTSUP);
  126. }
  127. static void test_sync_op_pdiscard(BdrvChild *c)
  128. {
  129. int ret;
  130. /* Normal success path */
  131. c->bs->open_flags |= BDRV_O_UNMAP;
  132. ret = bdrv_pdiscard(c, 0, 512);
  133. g_assert_cmpint(ret, ==, 0);
  134. /* Early success: UNMAP not supported */
  135. c->bs->open_flags &= ~BDRV_O_UNMAP;
  136. ret = bdrv_pdiscard(c, 0, 512);
  137. g_assert_cmpint(ret, ==, 0);
  138. /* Early error: Negative offset */
  139. ret = bdrv_pdiscard(c, -2, 512);
  140. g_assert_cmpint(ret, ==, -EIO);
  141. }
  142. static void test_sync_op_blk_pdiscard(BlockBackend *blk)
  143. {
  144. int ret;
  145. /* Early success: UNMAP not supported */
  146. ret = blk_pdiscard(blk, 0, 512);
  147. g_assert_cmpint(ret, ==, 0);
  148. /* Early error: Negative offset */
  149. ret = blk_pdiscard(blk, -2, 512);
  150. g_assert_cmpint(ret, ==, -EIO);
  151. }
  152. static void test_sync_op_truncate(BdrvChild *c)
  153. {
  154. int ret;
  155. /* Normal success path */
  156. ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, NULL);
  157. g_assert_cmpint(ret, ==, 0);
  158. /* Early error: Negative offset */
  159. ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, NULL);
  160. g_assert_cmpint(ret, ==, -EINVAL);
  161. /* Error: Read-only image */
  162. c->bs->read_only = true;
  163. c->bs->open_flags &= ~BDRV_O_RDWR;
  164. ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, NULL);
  165. g_assert_cmpint(ret, ==, -EACCES);
  166. c->bs->read_only = false;
  167. c->bs->open_flags |= BDRV_O_RDWR;
  168. }
  169. static void test_sync_op_block_status(BdrvChild *c)
  170. {
  171. int ret;
  172. int64_t n;
  173. /* Normal success path */
  174. ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
  175. g_assert_cmpint(ret, ==, 0);
  176. /* Early success: No driver support */
  177. bdrv_test.bdrv_co_block_status = NULL;
  178. ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
  179. g_assert_cmpint(ret, ==, 1);
  180. /* Early success: bytes = 0 */
  181. ret = bdrv_is_allocated(c->bs, 0, 0, &n);
  182. g_assert_cmpint(ret, ==, 0);
  183. /* Early success: Offset > image size*/
  184. ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
  185. g_assert_cmpint(ret, ==, 0);
  186. }
  187. static void test_sync_op_flush(BdrvChild *c)
  188. {
  189. int ret;
  190. /* Normal success path */
  191. ret = bdrv_flush(c->bs);
  192. g_assert_cmpint(ret, ==, 0);
  193. /* Early success: Read-only image */
  194. c->bs->read_only = true;
  195. c->bs->open_flags &= ~BDRV_O_RDWR;
  196. ret = bdrv_flush(c->bs);
  197. g_assert_cmpint(ret, ==, 0);
  198. c->bs->read_only = false;
  199. c->bs->open_flags |= BDRV_O_RDWR;
  200. }
  201. static void test_sync_op_blk_flush(BlockBackend *blk)
  202. {
  203. BlockDriverState *bs = blk_bs(blk);
  204. int ret;
  205. /* Normal success path */
  206. ret = blk_flush(blk);
  207. g_assert_cmpint(ret, ==, 0);
  208. /* Early success: Read-only image */
  209. bs->read_only = true;
  210. bs->open_flags &= ~BDRV_O_RDWR;
  211. ret = blk_flush(blk);
  212. g_assert_cmpint(ret, ==, 0);
  213. bs->read_only = false;
  214. bs->open_flags |= BDRV_O_RDWR;
  215. }
  216. static void test_sync_op_check(BdrvChild *c)
  217. {
  218. BdrvCheckResult result;
  219. int ret;
  220. /* Error: Driver does not implement check */
  221. ret = bdrv_check(c->bs, &result, 0);
  222. g_assert_cmpint(ret, ==, -ENOTSUP);
  223. }
  224. static void test_sync_op_invalidate_cache(BdrvChild *c)
  225. {
  226. /* Early success: Image is not inactive */
  227. bdrv_invalidate_cache(c->bs, NULL);
  228. }
  229. typedef struct SyncOpTest {
  230. const char *name;
  231. void (*fn)(BdrvChild *c);
  232. void (*blkfn)(BlockBackend *blk);
  233. } SyncOpTest;
  234. const SyncOpTest sync_op_tests[] = {
  235. {
  236. .name = "/sync-op/pread",
  237. .fn = test_sync_op_pread,
  238. .blkfn = test_sync_op_blk_pread,
  239. }, {
  240. .name = "/sync-op/pwrite",
  241. .fn = test_sync_op_pwrite,
  242. .blkfn = test_sync_op_blk_pwrite,
  243. }, {
  244. .name = "/sync-op/load_vmstate",
  245. .fn = test_sync_op_load_vmstate,
  246. }, {
  247. .name = "/sync-op/save_vmstate",
  248. .fn = test_sync_op_save_vmstate,
  249. }, {
  250. .name = "/sync-op/pdiscard",
  251. .fn = test_sync_op_pdiscard,
  252. .blkfn = test_sync_op_blk_pdiscard,
  253. }, {
  254. .name = "/sync-op/truncate",
  255. .fn = test_sync_op_truncate,
  256. }, {
  257. .name = "/sync-op/block_status",
  258. .fn = test_sync_op_block_status,
  259. }, {
  260. .name = "/sync-op/flush",
  261. .fn = test_sync_op_flush,
  262. .blkfn = test_sync_op_blk_flush,
  263. }, {
  264. .name = "/sync-op/check",
  265. .fn = test_sync_op_check,
  266. }, {
  267. .name = "/sync-op/invalidate_cache",
  268. .fn = test_sync_op_invalidate_cache,
  269. },
  270. };
  271. /* Test synchronous operations that run in a different iothread, so we have to
  272. * poll for the coroutine there to return. */
  273. static void test_sync_op(const void *opaque)
  274. {
  275. const SyncOpTest *t = opaque;
  276. IOThread *iothread = iothread_new();
  277. AioContext *ctx = iothread_get_aio_context(iothread);
  278. BlockBackend *blk;
  279. BlockDriverState *bs;
  280. BdrvChild *c;
  281. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  282. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  283. bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
  284. blk_insert_bs(blk, bs, &error_abort);
  285. c = QLIST_FIRST(&bs->parents);
  286. blk_set_aio_context(blk, ctx, &error_abort);
  287. aio_context_acquire(ctx);
  288. t->fn(c);
  289. if (t->blkfn) {
  290. t->blkfn(blk);
  291. }
  292. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  293. aio_context_release(ctx);
  294. bdrv_unref(bs);
  295. blk_unref(blk);
  296. }
  297. typedef struct TestBlockJob {
  298. BlockJob common;
  299. bool should_complete;
  300. int n;
  301. } TestBlockJob;
  302. static int test_job_prepare(Job *job)
  303. {
  304. g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
  305. return 0;
  306. }
  307. static int coroutine_fn test_job_run(Job *job, Error **errp)
  308. {
  309. TestBlockJob *s = container_of(job, TestBlockJob, common.job);
  310. job_transition_to_ready(&s->common.job);
  311. while (!s->should_complete) {
  312. s->n++;
  313. g_assert(qemu_get_current_aio_context() == job->aio_context);
  314. /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
  315. * emulate some actual activity (probably some I/O) here so that the
  316. * drain involved in AioContext switches has to wait for this activity
  317. * to stop. */
  318. qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
  319. job_pause_point(&s->common.job);
  320. }
  321. g_assert(qemu_get_current_aio_context() == job->aio_context);
  322. return 0;
  323. }
  324. static void test_job_complete(Job *job, Error **errp)
  325. {
  326. TestBlockJob *s = container_of(job, TestBlockJob, common.job);
  327. s->should_complete = true;
  328. }
  329. BlockJobDriver test_job_driver = {
  330. .job_driver = {
  331. .instance_size = sizeof(TestBlockJob),
  332. .free = block_job_free,
  333. .user_resume = block_job_user_resume,
  334. .run = test_job_run,
  335. .complete = test_job_complete,
  336. .prepare = test_job_prepare,
  337. },
  338. };
  339. static void test_attach_blockjob(void)
  340. {
  341. IOThread *iothread = iothread_new();
  342. AioContext *ctx = iothread_get_aio_context(iothread);
  343. BlockBackend *blk;
  344. BlockDriverState *bs;
  345. TestBlockJob *tjob;
  346. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  347. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  348. blk_insert_bs(blk, bs, &error_abort);
  349. tjob = block_job_create("job0", &test_job_driver, NULL, bs,
  350. 0, BLK_PERM_ALL,
  351. 0, 0, NULL, NULL, &error_abort);
  352. job_start(&tjob->common.job);
  353. while (tjob->n == 0) {
  354. aio_poll(qemu_get_aio_context(), false);
  355. }
  356. blk_set_aio_context(blk, ctx, &error_abort);
  357. tjob->n = 0;
  358. while (tjob->n == 0) {
  359. aio_poll(qemu_get_aio_context(), false);
  360. }
  361. aio_context_acquire(ctx);
  362. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  363. aio_context_release(ctx);
  364. tjob->n = 0;
  365. while (tjob->n == 0) {
  366. aio_poll(qemu_get_aio_context(), false);
  367. }
  368. blk_set_aio_context(blk, ctx, &error_abort);
  369. tjob->n = 0;
  370. while (tjob->n == 0) {
  371. aio_poll(qemu_get_aio_context(), false);
  372. }
  373. aio_context_acquire(ctx);
  374. job_complete_sync(&tjob->common.job, &error_abort);
  375. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  376. aio_context_release(ctx);
  377. bdrv_unref(bs);
  378. blk_unref(blk);
  379. }
  380. /*
  381. * Test that changing the AioContext for one node in a tree (here through blk)
  382. * changes all other nodes as well:
  383. *
  384. * blk
  385. * |
  386. * | bs_verify [blkverify]
  387. * | / \
  388. * | / \
  389. * bs_a [bdrv_test] bs_b [bdrv_test]
  390. *
  391. */
  392. static void test_propagate_basic(void)
  393. {
  394. IOThread *iothread = iothread_new();
  395. AioContext *ctx = iothread_get_aio_context(iothread);
  396. AioContext *main_ctx;
  397. BlockBackend *blk;
  398. BlockDriverState *bs_a, *bs_b, *bs_verify;
  399. QDict *options;
  400. /* Create bs_a and its BlockBackend */
  401. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  402. bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
  403. blk_insert_bs(blk, bs_a, &error_abort);
  404. /* Create bs_b */
  405. bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
  406. /* Create blkverify filter that references both bs_a and bs_b */
  407. options = qdict_new();
  408. qdict_put_str(options, "driver", "blkverify");
  409. qdict_put_str(options, "test", "bs_a");
  410. qdict_put_str(options, "raw", "bs_b");
  411. bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  412. /* Switch the AioContext */
  413. blk_set_aio_context(blk, ctx, &error_abort);
  414. g_assert(blk_get_aio_context(blk) == ctx);
  415. g_assert(bdrv_get_aio_context(bs_a) == ctx);
  416. g_assert(bdrv_get_aio_context(bs_verify) == ctx);
  417. g_assert(bdrv_get_aio_context(bs_b) == ctx);
  418. /* Switch the AioContext back */
  419. main_ctx = qemu_get_aio_context();
  420. aio_context_acquire(ctx);
  421. blk_set_aio_context(blk, main_ctx, &error_abort);
  422. aio_context_release(ctx);
  423. g_assert(blk_get_aio_context(blk) == main_ctx);
  424. g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
  425. g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
  426. g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
  427. bdrv_unref(bs_verify);
  428. bdrv_unref(bs_b);
  429. bdrv_unref(bs_a);
  430. blk_unref(blk);
  431. }
  432. /*
  433. * Test that diamonds in the graph don't lead to endless recursion:
  434. *
  435. * blk
  436. * |
  437. * bs_verify [blkverify]
  438. * / \
  439. * / \
  440. * bs_b [raw] bs_c[raw]
  441. * \ /
  442. * \ /
  443. * bs_a [bdrv_test]
  444. */
  445. static void test_propagate_diamond(void)
  446. {
  447. IOThread *iothread = iothread_new();
  448. AioContext *ctx = iothread_get_aio_context(iothread);
  449. AioContext *main_ctx;
  450. BlockBackend *blk;
  451. BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
  452. QDict *options;
  453. /* Create bs_a */
  454. bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
  455. /* Create bs_b and bc_c */
  456. options = qdict_new();
  457. qdict_put_str(options, "driver", "raw");
  458. qdict_put_str(options, "file", "bs_a");
  459. qdict_put_str(options, "node-name", "bs_b");
  460. bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  461. options = qdict_new();
  462. qdict_put_str(options, "driver", "raw");
  463. qdict_put_str(options, "file", "bs_a");
  464. qdict_put_str(options, "node-name", "bs_c");
  465. bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  466. /* Create blkverify filter that references both bs_b and bs_c */
  467. options = qdict_new();
  468. qdict_put_str(options, "driver", "blkverify");
  469. qdict_put_str(options, "test", "bs_b");
  470. qdict_put_str(options, "raw", "bs_c");
  471. bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  472. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  473. blk_insert_bs(blk, bs_verify, &error_abort);
  474. /* Switch the AioContext */
  475. blk_set_aio_context(blk, ctx, &error_abort);
  476. g_assert(blk_get_aio_context(blk) == ctx);
  477. g_assert(bdrv_get_aio_context(bs_verify) == ctx);
  478. g_assert(bdrv_get_aio_context(bs_a) == ctx);
  479. g_assert(bdrv_get_aio_context(bs_b) == ctx);
  480. g_assert(bdrv_get_aio_context(bs_c) == ctx);
  481. /* Switch the AioContext back */
  482. main_ctx = qemu_get_aio_context();
  483. aio_context_acquire(ctx);
  484. blk_set_aio_context(blk, main_ctx, &error_abort);
  485. aio_context_release(ctx);
  486. g_assert(blk_get_aio_context(blk) == main_ctx);
  487. g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
  488. g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
  489. g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
  490. g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
  491. blk_unref(blk);
  492. bdrv_unref(bs_verify);
  493. bdrv_unref(bs_c);
  494. bdrv_unref(bs_b);
  495. bdrv_unref(bs_a);
  496. }
  497. static void test_propagate_mirror(void)
  498. {
  499. IOThread *iothread = iothread_new();
  500. AioContext *ctx = iothread_get_aio_context(iothread);
  501. AioContext *main_ctx = qemu_get_aio_context();
  502. BlockDriverState *src, *target, *filter;
  503. BlockBackend *blk;
  504. Job *job;
  505. Error *local_err = NULL;
  506. /* Create src and target*/
  507. src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
  508. target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
  509. &error_abort);
  510. /* Start a mirror job */
  511. mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
  512. MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
  513. BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
  514. false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
  515. &error_abort);
  516. job = job_get("job0");
  517. filter = bdrv_find_node("filter_node");
  518. /* Change the AioContext of src */
  519. bdrv_try_set_aio_context(src, ctx, &error_abort);
  520. g_assert(bdrv_get_aio_context(src) == ctx);
  521. g_assert(bdrv_get_aio_context(target) == ctx);
  522. g_assert(bdrv_get_aio_context(filter) == ctx);
  523. g_assert(job->aio_context == ctx);
  524. /* Change the AioContext of target */
  525. aio_context_acquire(ctx);
  526. bdrv_try_set_aio_context(target, main_ctx, &error_abort);
  527. aio_context_release(ctx);
  528. g_assert(bdrv_get_aio_context(src) == main_ctx);
  529. g_assert(bdrv_get_aio_context(target) == main_ctx);
  530. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  531. /* With a BlockBackend on src, changing target must fail */
  532. blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
  533. blk_insert_bs(blk, src, &error_abort);
  534. bdrv_try_set_aio_context(target, ctx, &local_err);
  535. g_assert(local_err);
  536. error_free(local_err);
  537. g_assert(blk_get_aio_context(blk) == main_ctx);
  538. g_assert(bdrv_get_aio_context(src) == main_ctx);
  539. g_assert(bdrv_get_aio_context(target) == main_ctx);
  540. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  541. /* ...unless we explicitly allow it */
  542. aio_context_acquire(ctx);
  543. blk_set_allow_aio_context_change(blk, true);
  544. bdrv_try_set_aio_context(target, ctx, &error_abort);
  545. aio_context_release(ctx);
  546. g_assert(blk_get_aio_context(blk) == ctx);
  547. g_assert(bdrv_get_aio_context(src) == ctx);
  548. g_assert(bdrv_get_aio_context(target) == ctx);
  549. g_assert(bdrv_get_aio_context(filter) == ctx);
  550. job_cancel_sync_all();
  551. aio_context_acquire(ctx);
  552. blk_set_aio_context(blk, main_ctx, &error_abort);
  553. bdrv_try_set_aio_context(target, main_ctx, &error_abort);
  554. aio_context_release(ctx);
  555. blk_unref(blk);
  556. bdrv_unref(src);
  557. bdrv_unref(target);
  558. }
  559. static void test_attach_second_node(void)
  560. {
  561. IOThread *iothread = iothread_new();
  562. AioContext *ctx = iothread_get_aio_context(iothread);
  563. AioContext *main_ctx = qemu_get_aio_context();
  564. BlockBackend *blk;
  565. BlockDriverState *bs, *filter;
  566. QDict *options;
  567. blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
  568. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  569. blk_insert_bs(blk, bs, &error_abort);
  570. options = qdict_new();
  571. qdict_put_str(options, "driver", "raw");
  572. qdict_put_str(options, "file", "base");
  573. filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  574. g_assert(blk_get_aio_context(blk) == ctx);
  575. g_assert(bdrv_get_aio_context(bs) == ctx);
  576. g_assert(bdrv_get_aio_context(filter) == ctx);
  577. aio_context_acquire(ctx);
  578. blk_set_aio_context(blk, main_ctx, &error_abort);
  579. aio_context_release(ctx);
  580. g_assert(blk_get_aio_context(blk) == main_ctx);
  581. g_assert(bdrv_get_aio_context(bs) == main_ctx);
  582. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  583. bdrv_unref(filter);
  584. bdrv_unref(bs);
  585. blk_unref(blk);
  586. }
  587. static void test_attach_preserve_blk_ctx(void)
  588. {
  589. IOThread *iothread = iothread_new();
  590. AioContext *ctx = iothread_get_aio_context(iothread);
  591. BlockBackend *blk;
  592. BlockDriverState *bs;
  593. blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
  594. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  595. bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
  596. /* Add node to BlockBackend that has an iothread context assigned */
  597. blk_insert_bs(blk, bs, &error_abort);
  598. g_assert(blk_get_aio_context(blk) == ctx);
  599. g_assert(bdrv_get_aio_context(bs) == ctx);
  600. /* Remove the node again */
  601. aio_context_acquire(ctx);
  602. blk_remove_bs(blk);
  603. aio_context_release(ctx);
  604. g_assert(blk_get_aio_context(blk) == ctx);
  605. g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
  606. /* Re-attach the node */
  607. blk_insert_bs(blk, bs, &error_abort);
  608. g_assert(blk_get_aio_context(blk) == ctx);
  609. g_assert(bdrv_get_aio_context(bs) == ctx);
  610. aio_context_acquire(ctx);
  611. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  612. aio_context_release(ctx);
  613. bdrv_unref(bs);
  614. blk_unref(blk);
  615. }
  616. int main(int argc, char **argv)
  617. {
  618. int i;
  619. bdrv_init();
  620. qemu_init_main_loop(&error_abort);
  621. g_test_init(&argc, &argv, NULL);
  622. for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
  623. const SyncOpTest *t = &sync_op_tests[i];
  624. g_test_add_data_func(t->name, t, test_sync_op);
  625. }
  626. g_test_add_func("/attach/blockjob", test_attach_blockjob);
  627. g_test_add_func("/attach/second_node", test_attach_second_node);
  628. g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
  629. g_test_add_func("/propagate/basic", test_propagate_basic);
  630. g_test_add_func("/propagate/diamond", test_propagate_diamond);
  631. g_test_add_func("/propagate/mirror", test_propagate_mirror);
  632. return g_test_run();
  633. }