test-block-iothread.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. /*
  2. * Block tests for iothreads
  3. *
  4. * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "block/block.h"
  26. #include "block/block_int-global-state.h"
  27. #include "block/blockjob_int.h"
  28. #include "sysemu/block-backend.h"
  29. #include "qapi/error.h"
  30. #include "qapi/qmp/qdict.h"
  31. #include "qemu/main-loop.h"
  32. #include "iothread.h"
  33. static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
  34. int64_t offset, int64_t bytes,
  35. QEMUIOVector *qiov,
  36. BdrvRequestFlags flags)
  37. {
  38. return 0;
  39. }
  40. static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
  41. int64_t offset, int64_t bytes,
  42. QEMUIOVector *qiov,
  43. BdrvRequestFlags flags)
  44. {
  45. return 0;
  46. }
  47. static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
  48. int64_t offset, int64_t bytes)
  49. {
  50. return 0;
  51. }
  52. static int coroutine_fn
  53. bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
  54. PreallocMode prealloc, BdrvRequestFlags flags,
  55. Error **errp)
  56. {
  57. return 0;
  58. }
  59. static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
  60. bool want_zero,
  61. int64_t offset, int64_t count,
  62. int64_t *pnum, int64_t *map,
  63. BlockDriverState **file)
  64. {
  65. *pnum = count;
  66. return 0;
  67. }
  68. static BlockDriver bdrv_test = {
  69. .format_name = "test",
  70. .instance_size = 1,
  71. .bdrv_co_preadv = bdrv_test_co_preadv,
  72. .bdrv_co_pwritev = bdrv_test_co_pwritev,
  73. .bdrv_co_pdiscard = bdrv_test_co_pdiscard,
  74. .bdrv_co_truncate = bdrv_test_co_truncate,
  75. .bdrv_co_block_status = bdrv_test_co_block_status,
  76. };
  77. static void test_sync_op_pread(BdrvChild *c)
  78. {
  79. uint8_t buf[512];
  80. int ret;
  81. /* Success */
  82. ret = bdrv_pread(c, 0, sizeof(buf), buf, 0);
  83. g_assert_cmpint(ret, ==, 0);
  84. /* Early error: Negative offset */
  85. ret = bdrv_pread(c, -2, sizeof(buf), buf, 0);
  86. g_assert_cmpint(ret, ==, -EIO);
  87. }
  88. static void test_sync_op_pwrite(BdrvChild *c)
  89. {
  90. uint8_t buf[512] = { 0 };
  91. int ret;
  92. /* Success */
  93. ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0);
  94. g_assert_cmpint(ret, ==, 0);
  95. /* Early error: Negative offset */
  96. ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0);
  97. g_assert_cmpint(ret, ==, -EIO);
  98. }
  99. static void test_sync_op_blk_pread(BlockBackend *blk)
  100. {
  101. uint8_t buf[512];
  102. int ret;
  103. /* Success */
  104. ret = blk_pread(blk, 0, sizeof(buf), buf, 0);
  105. g_assert_cmpint(ret, ==, 0);
  106. /* Early error: Negative offset */
  107. ret = blk_pread(blk, -2, sizeof(buf), buf, 0);
  108. g_assert_cmpint(ret, ==, -EIO);
  109. }
  110. static void test_sync_op_blk_pwrite(BlockBackend *blk)
  111. {
  112. uint8_t buf[512] = { 0 };
  113. int ret;
  114. /* Success */
  115. ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0);
  116. g_assert_cmpint(ret, ==, 0);
  117. /* Early error: Negative offset */
  118. ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0);
  119. g_assert_cmpint(ret, ==, -EIO);
  120. }
  121. static void test_sync_op_blk_preadv(BlockBackend *blk)
  122. {
  123. uint8_t buf[512];
  124. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  125. int ret;
  126. /* Success */
  127. ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0);
  128. g_assert_cmpint(ret, ==, 0);
  129. /* Early error: Negative offset */
  130. ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0);
  131. g_assert_cmpint(ret, ==, -EIO);
  132. }
  133. static void test_sync_op_blk_pwritev(BlockBackend *blk)
  134. {
  135. uint8_t buf[512] = { 0 };
  136. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  137. int ret;
  138. /* Success */
  139. ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0);
  140. g_assert_cmpint(ret, ==, 0);
  141. /* Early error: Negative offset */
  142. ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0);
  143. g_assert_cmpint(ret, ==, -EIO);
  144. }
  145. static void test_sync_op_blk_preadv_part(BlockBackend *blk)
  146. {
  147. uint8_t buf[512];
  148. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  149. int ret;
  150. /* Success */
  151. ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0);
  152. g_assert_cmpint(ret, ==, 0);
  153. /* Early error: Negative offset */
  154. ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0);
  155. g_assert_cmpint(ret, ==, -EIO);
  156. }
  157. static void test_sync_op_blk_pwritev_part(BlockBackend *blk)
  158. {
  159. uint8_t buf[512] = { 0 };
  160. QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
  161. int ret;
  162. /* Success */
  163. ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0);
  164. g_assert_cmpint(ret, ==, 0);
  165. /* Early error: Negative offset */
  166. ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0);
  167. g_assert_cmpint(ret, ==, -EIO);
  168. }
  169. static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk)
  170. {
  171. uint8_t buf[512] = { 0 };
  172. int ret;
  173. /* Late error: Not supported */
  174. ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf);
  175. g_assert_cmpint(ret, ==, -ENOTSUP);
  176. /* Early error: Negative offset */
  177. ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf);
  178. g_assert_cmpint(ret, ==, -EIO);
  179. }
  180. static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk)
  181. {
  182. int ret;
  183. /* Success */
  184. ret = blk_pwrite_zeroes(blk, 0, 512, 0);
  185. g_assert_cmpint(ret, ==, 0);
  186. /* Early error: Negative offset */
  187. ret = blk_pwrite_zeroes(blk, -2, 512, 0);
  188. g_assert_cmpint(ret, ==, -EIO);
  189. }
  190. static void test_sync_op_load_vmstate(BdrvChild *c)
  191. {
  192. uint8_t buf[512];
  193. int ret;
  194. /* Error: Driver does not support snapshots */
  195. ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
  196. g_assert_cmpint(ret, ==, -ENOTSUP);
  197. }
  198. static void test_sync_op_save_vmstate(BdrvChild *c)
  199. {
  200. uint8_t buf[512] = { 0 };
  201. int ret;
  202. /* Error: Driver does not support snapshots */
  203. ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
  204. g_assert_cmpint(ret, ==, -ENOTSUP);
  205. }
  206. static void test_sync_op_pdiscard(BdrvChild *c)
  207. {
  208. int ret;
  209. /* Normal success path */
  210. c->bs->open_flags |= BDRV_O_UNMAP;
  211. ret = bdrv_pdiscard(c, 0, 512);
  212. g_assert_cmpint(ret, ==, 0);
  213. /* Early success: UNMAP not supported */
  214. c->bs->open_flags &= ~BDRV_O_UNMAP;
  215. ret = bdrv_pdiscard(c, 0, 512);
  216. g_assert_cmpint(ret, ==, 0);
  217. /* Early error: Negative offset */
  218. ret = bdrv_pdiscard(c, -2, 512);
  219. g_assert_cmpint(ret, ==, -EIO);
  220. }
  221. static void test_sync_op_blk_pdiscard(BlockBackend *blk)
  222. {
  223. int ret;
  224. /* Early success: UNMAP not supported */
  225. ret = blk_pdiscard(blk, 0, 512);
  226. g_assert_cmpint(ret, ==, 0);
  227. /* Early error: Negative offset */
  228. ret = blk_pdiscard(blk, -2, 512);
  229. g_assert_cmpint(ret, ==, -EIO);
  230. }
  231. static void test_sync_op_truncate(BdrvChild *c)
  232. {
  233. int ret;
  234. /* Normal success path */
  235. ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
  236. g_assert_cmpint(ret, ==, 0);
  237. /* Early error: Negative offset */
  238. ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
  239. g_assert_cmpint(ret, ==, -EINVAL);
  240. /* Error: Read-only image */
  241. c->bs->open_flags &= ~BDRV_O_RDWR;
  242. ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
  243. g_assert_cmpint(ret, ==, -EACCES);
  244. c->bs->open_flags |= BDRV_O_RDWR;
  245. }
  246. static void test_sync_op_blk_truncate(BlockBackend *blk)
  247. {
  248. int ret;
  249. /* Normal success path */
  250. ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
  251. g_assert_cmpint(ret, ==, 0);
  252. /* Early error: Negative offset */
  253. ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL);
  254. g_assert_cmpint(ret, ==, -EINVAL);
  255. }
  256. /* Disable TSA to make bdrv_test.bdrv_co_block_status writable */
  257. static void TSA_NO_TSA test_sync_op_block_status(BdrvChild *c)
  258. {
  259. int ret;
  260. int64_t n;
  261. /* Normal success path */
  262. ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
  263. g_assert_cmpint(ret, ==, 0);
  264. /* Early success: No driver support */
  265. bdrv_test.bdrv_co_block_status = NULL;
  266. ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
  267. g_assert_cmpint(ret, ==, 1);
  268. /* Early success: bytes = 0 */
  269. ret = bdrv_is_allocated(c->bs, 0, 0, &n);
  270. g_assert_cmpint(ret, ==, 0);
  271. /* Early success: Offset > image size*/
  272. ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
  273. g_assert_cmpint(ret, ==, 0);
  274. }
  275. static void test_sync_op_flush(BdrvChild *c)
  276. {
  277. int ret;
  278. /* Normal success path */
  279. ret = bdrv_flush(c->bs);
  280. g_assert_cmpint(ret, ==, 0);
  281. /* Early success: Read-only image */
  282. c->bs->open_flags &= ~BDRV_O_RDWR;
  283. ret = bdrv_flush(c->bs);
  284. g_assert_cmpint(ret, ==, 0);
  285. c->bs->open_flags |= BDRV_O_RDWR;
  286. }
  287. static void test_sync_op_blk_flush(BlockBackend *blk)
  288. {
  289. BlockDriverState *bs = blk_bs(blk);
  290. int ret;
  291. /* Normal success path */
  292. ret = blk_flush(blk);
  293. g_assert_cmpint(ret, ==, 0);
  294. /* Early success: Read-only image */
  295. bs->open_flags &= ~BDRV_O_RDWR;
  296. ret = blk_flush(blk);
  297. g_assert_cmpint(ret, ==, 0);
  298. bs->open_flags |= BDRV_O_RDWR;
  299. }
  300. static void test_sync_op_check(BdrvChild *c)
  301. {
  302. BdrvCheckResult result;
  303. int ret;
  304. /* Error: Driver does not implement check */
  305. ret = bdrv_check(c->bs, &result, 0);
  306. g_assert_cmpint(ret, ==, -ENOTSUP);
  307. }
  308. static void test_sync_op_activate(BdrvChild *c)
  309. {
  310. GLOBAL_STATE_CODE();
  311. GRAPH_RDLOCK_GUARD_MAINLOOP();
  312. /* Early success: Image is not inactive */
  313. bdrv_activate(c->bs, NULL);
  314. }
  315. typedef struct SyncOpTest {
  316. const char *name;
  317. void (*fn)(BdrvChild *c);
  318. void (*blkfn)(BlockBackend *blk);
  319. } SyncOpTest;
  320. const SyncOpTest sync_op_tests[] = {
  321. {
  322. .name = "/sync-op/pread",
  323. .fn = test_sync_op_pread,
  324. .blkfn = test_sync_op_blk_pread,
  325. }, {
  326. .name = "/sync-op/pwrite",
  327. .fn = test_sync_op_pwrite,
  328. .blkfn = test_sync_op_blk_pwrite,
  329. }, {
  330. .name = "/sync-op/preadv",
  331. .fn = NULL,
  332. .blkfn = test_sync_op_blk_preadv,
  333. }, {
  334. .name = "/sync-op/pwritev",
  335. .fn = NULL,
  336. .blkfn = test_sync_op_blk_pwritev,
  337. }, {
  338. .name = "/sync-op/preadv_part",
  339. .fn = NULL,
  340. .blkfn = test_sync_op_blk_preadv_part,
  341. }, {
  342. .name = "/sync-op/pwritev_part",
  343. .fn = NULL,
  344. .blkfn = test_sync_op_blk_pwritev_part,
  345. }, {
  346. .name = "/sync-op/pwrite_compressed",
  347. .fn = NULL,
  348. .blkfn = test_sync_op_blk_pwrite_compressed,
  349. }, {
  350. .name = "/sync-op/pwrite_zeroes",
  351. .fn = NULL,
  352. .blkfn = test_sync_op_blk_pwrite_zeroes,
  353. }, {
  354. .name = "/sync-op/load_vmstate",
  355. .fn = test_sync_op_load_vmstate,
  356. }, {
  357. .name = "/sync-op/save_vmstate",
  358. .fn = test_sync_op_save_vmstate,
  359. }, {
  360. .name = "/sync-op/pdiscard",
  361. .fn = test_sync_op_pdiscard,
  362. .blkfn = test_sync_op_blk_pdiscard,
  363. }, {
  364. .name = "/sync-op/truncate",
  365. .fn = test_sync_op_truncate,
  366. .blkfn = test_sync_op_blk_truncate,
  367. }, {
  368. .name = "/sync-op/block_status",
  369. .fn = test_sync_op_block_status,
  370. }, {
  371. .name = "/sync-op/flush",
  372. .fn = test_sync_op_flush,
  373. .blkfn = test_sync_op_blk_flush,
  374. }, {
  375. .name = "/sync-op/check",
  376. .fn = test_sync_op_check,
  377. }, {
  378. .name = "/sync-op/activate",
  379. .fn = test_sync_op_activate,
  380. },
  381. };
  382. /* Test synchronous operations that run in a different iothread, so we have to
  383. * poll for the coroutine there to return. */
  384. static void test_sync_op(const void *opaque)
  385. {
  386. const SyncOpTest *t = opaque;
  387. IOThread *iothread = iothread_new();
  388. AioContext *ctx = iothread_get_aio_context(iothread);
  389. BlockBackend *blk;
  390. BlockDriverState *bs;
  391. BdrvChild *c;
  392. GLOBAL_STATE_CODE();
  393. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  394. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  395. bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
  396. blk_insert_bs(blk, bs, &error_abort);
  397. bdrv_graph_rdlock_main_loop();
  398. c = QLIST_FIRST(&bs->parents);
  399. bdrv_graph_rdunlock_main_loop();
  400. blk_set_aio_context(blk, ctx, &error_abort);
  401. if (t->fn) {
  402. t->fn(c);
  403. }
  404. if (t->blkfn) {
  405. t->blkfn(blk);
  406. }
  407. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  408. bdrv_unref(bs);
  409. blk_unref(blk);
  410. }
  411. typedef struct TestBlockJob {
  412. BlockJob common;
  413. bool should_complete;
  414. int n;
  415. } TestBlockJob;
  416. static int test_job_prepare(Job *job)
  417. {
  418. g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
  419. return 0;
  420. }
  421. static int coroutine_fn test_job_run(Job *job, Error **errp)
  422. {
  423. TestBlockJob *s = container_of(job, TestBlockJob, common.job);
  424. job_transition_to_ready(&s->common.job);
  425. while (!s->should_complete) {
  426. s->n++;
  427. g_assert(qemu_get_current_aio_context() == job->aio_context);
  428. /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
  429. * emulate some actual activity (probably some I/O) here so that the
  430. * drain involved in AioContext switches has to wait for this activity
  431. * to stop. */
  432. qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
  433. job_pause_point(&s->common.job);
  434. }
  435. g_assert(qemu_get_current_aio_context() == job->aio_context);
  436. return 0;
  437. }
  438. static void test_job_complete(Job *job, Error **errp)
  439. {
  440. TestBlockJob *s = container_of(job, TestBlockJob, common.job);
  441. s->should_complete = true;
  442. }
  443. BlockJobDriver test_job_driver = {
  444. .job_driver = {
  445. .instance_size = sizeof(TestBlockJob),
  446. .free = block_job_free,
  447. .user_resume = block_job_user_resume,
  448. .run = test_job_run,
  449. .complete = test_job_complete,
  450. .prepare = test_job_prepare,
  451. },
  452. };
  453. static void test_attach_blockjob(void)
  454. {
  455. IOThread *iothread = iothread_new();
  456. AioContext *ctx = iothread_get_aio_context(iothread);
  457. BlockBackend *blk;
  458. BlockDriverState *bs;
  459. TestBlockJob *tjob;
  460. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
  461. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  462. blk_insert_bs(blk, bs, &error_abort);
  463. tjob = block_job_create("job0", &test_job_driver, NULL, bs,
  464. 0, BLK_PERM_ALL,
  465. 0, 0, NULL, NULL, &error_abort);
  466. job_start(&tjob->common.job);
  467. while (tjob->n == 0) {
  468. aio_poll(qemu_get_aio_context(), false);
  469. }
  470. blk_set_aio_context(blk, ctx, &error_abort);
  471. tjob->n = 0;
  472. while (tjob->n == 0) {
  473. aio_poll(qemu_get_aio_context(), false);
  474. }
  475. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  476. tjob->n = 0;
  477. while (tjob->n == 0) {
  478. aio_poll(qemu_get_aio_context(), false);
  479. }
  480. blk_set_aio_context(blk, ctx, &error_abort);
  481. tjob->n = 0;
  482. while (tjob->n == 0) {
  483. aio_poll(qemu_get_aio_context(), false);
  484. }
  485. WITH_JOB_LOCK_GUARD() {
  486. job_complete_sync_locked(&tjob->common.job, &error_abort);
  487. }
  488. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  489. bdrv_unref(bs);
  490. blk_unref(blk);
  491. }
  492. /*
  493. * Test that changing the AioContext for one node in a tree (here through blk)
  494. * changes all other nodes as well:
  495. *
  496. * blk
  497. * |
  498. * | bs_verify [blkverify]
  499. * | / \
  500. * | / \
  501. * bs_a [bdrv_test] bs_b [bdrv_test]
  502. *
  503. */
  504. static void test_propagate_basic(void)
  505. {
  506. IOThread *iothread = iothread_new();
  507. AioContext *ctx = iothread_get_aio_context(iothread);
  508. AioContext *main_ctx;
  509. BlockBackend *blk;
  510. BlockDriverState *bs_a, *bs_b, *bs_verify;
  511. QDict *options;
  512. /*
  513. * Create bs_a and its BlockBackend. We cannot take the RESIZE
  514. * permission because blkverify will not share it on the test
  515. * image.
  516. */
  517. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
  518. BLK_PERM_ALL);
  519. bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
  520. blk_insert_bs(blk, bs_a, &error_abort);
  521. /* Create bs_b */
  522. bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
  523. /* Create blkverify filter that references both bs_a and bs_b */
  524. options = qdict_new();
  525. qdict_put_str(options, "driver", "blkverify");
  526. qdict_put_str(options, "test", "bs_a");
  527. qdict_put_str(options, "raw", "bs_b");
  528. bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  529. /* Switch the AioContext */
  530. blk_set_aio_context(blk, ctx, &error_abort);
  531. g_assert(blk_get_aio_context(blk) == ctx);
  532. g_assert(bdrv_get_aio_context(bs_a) == ctx);
  533. g_assert(bdrv_get_aio_context(bs_verify) == ctx);
  534. g_assert(bdrv_get_aio_context(bs_b) == ctx);
  535. /* Switch the AioContext back */
  536. main_ctx = qemu_get_aio_context();
  537. blk_set_aio_context(blk, main_ctx, &error_abort);
  538. g_assert(blk_get_aio_context(blk) == main_ctx);
  539. g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
  540. g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
  541. g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
  542. bdrv_unref(bs_verify);
  543. bdrv_unref(bs_b);
  544. bdrv_unref(bs_a);
  545. blk_unref(blk);
  546. }
  547. /*
  548. * Test that diamonds in the graph don't lead to endless recursion:
  549. *
  550. * blk
  551. * |
  552. * bs_verify [blkverify]
  553. * / \
  554. * / \
  555. * bs_b [raw] bs_c[raw]
  556. * \ /
  557. * \ /
  558. * bs_a [bdrv_test]
  559. */
  560. static void test_propagate_diamond(void)
  561. {
  562. IOThread *iothread = iothread_new();
  563. AioContext *ctx = iothread_get_aio_context(iothread);
  564. AioContext *main_ctx;
  565. BlockBackend *blk;
  566. BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
  567. QDict *options;
  568. /* Create bs_a */
  569. bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
  570. /* Create bs_b and bc_c */
  571. options = qdict_new();
  572. qdict_put_str(options, "driver", "raw");
  573. qdict_put_str(options, "file", "bs_a");
  574. qdict_put_str(options, "node-name", "bs_b");
  575. bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  576. options = qdict_new();
  577. qdict_put_str(options, "driver", "raw");
  578. qdict_put_str(options, "file", "bs_a");
  579. qdict_put_str(options, "node-name", "bs_c");
  580. bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  581. /* Create blkverify filter that references both bs_b and bs_c */
  582. options = qdict_new();
  583. qdict_put_str(options, "driver", "blkverify");
  584. qdict_put_str(options, "test", "bs_b");
  585. qdict_put_str(options, "raw", "bs_c");
  586. bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  587. /*
  588. * Do not take the RESIZE permission: This would require the same
  589. * from bs_c and thus from bs_a; however, blkverify will not share
  590. * it on bs_b, and thus it will not be available for bs_a.
  591. */
  592. blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
  593. BLK_PERM_ALL);
  594. blk_insert_bs(blk, bs_verify, &error_abort);
  595. /* Switch the AioContext */
  596. blk_set_aio_context(blk, ctx, &error_abort);
  597. g_assert(blk_get_aio_context(blk) == ctx);
  598. g_assert(bdrv_get_aio_context(bs_verify) == ctx);
  599. g_assert(bdrv_get_aio_context(bs_a) == ctx);
  600. g_assert(bdrv_get_aio_context(bs_b) == ctx);
  601. g_assert(bdrv_get_aio_context(bs_c) == ctx);
  602. /* Switch the AioContext back */
  603. main_ctx = qemu_get_aio_context();
  604. blk_set_aio_context(blk, main_ctx, &error_abort);
  605. g_assert(blk_get_aio_context(blk) == main_ctx);
  606. g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
  607. g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
  608. g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
  609. g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
  610. blk_unref(blk);
  611. bdrv_unref(bs_verify);
  612. bdrv_unref(bs_c);
  613. bdrv_unref(bs_b);
  614. bdrv_unref(bs_a);
  615. }
  616. static void test_propagate_mirror(void)
  617. {
  618. IOThread *iothread = iothread_new();
  619. AioContext *ctx = iothread_get_aio_context(iothread);
  620. AioContext *main_ctx = qemu_get_aio_context();
  621. BlockDriverState *src, *target, *filter;
  622. BlockBackend *blk;
  623. Job *job;
  624. Error *local_err = NULL;
  625. /* Create src and target*/
  626. src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
  627. target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
  628. &error_abort);
  629. /* Start a mirror job */
  630. mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
  631. MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
  632. BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
  633. false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
  634. &error_abort);
  635. WITH_JOB_LOCK_GUARD() {
  636. job = job_get_locked("job0");
  637. }
  638. filter = bdrv_find_node("filter_node");
  639. /* Change the AioContext of src */
  640. bdrv_try_change_aio_context(src, ctx, NULL, &error_abort);
  641. g_assert(bdrv_get_aio_context(src) == ctx);
  642. g_assert(bdrv_get_aio_context(target) == ctx);
  643. g_assert(bdrv_get_aio_context(filter) == ctx);
  644. g_assert(job->aio_context == ctx);
  645. /* Change the AioContext of target */
  646. bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
  647. g_assert(bdrv_get_aio_context(src) == main_ctx);
  648. g_assert(bdrv_get_aio_context(target) == main_ctx);
  649. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  650. /* With a BlockBackend on src, changing target must fail */
  651. blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
  652. blk_insert_bs(blk, src, &error_abort);
  653. bdrv_try_change_aio_context(target, ctx, NULL, &local_err);
  654. error_free_or_abort(&local_err);
  655. g_assert(blk_get_aio_context(blk) == main_ctx);
  656. g_assert(bdrv_get_aio_context(src) == main_ctx);
  657. g_assert(bdrv_get_aio_context(target) == main_ctx);
  658. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  659. /* ...unless we explicitly allow it */
  660. blk_set_allow_aio_context_change(blk, true);
  661. bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
  662. g_assert(blk_get_aio_context(blk) == ctx);
  663. g_assert(bdrv_get_aio_context(src) == ctx);
  664. g_assert(bdrv_get_aio_context(target) == ctx);
  665. g_assert(bdrv_get_aio_context(filter) == ctx);
  666. job_cancel_sync_all();
  667. blk_set_aio_context(blk, main_ctx, &error_abort);
  668. bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
  669. blk_unref(blk);
  670. bdrv_unref(src);
  671. bdrv_unref(target);
  672. }
  673. static void test_attach_second_node(void)
  674. {
  675. IOThread *iothread = iothread_new();
  676. AioContext *ctx = iothread_get_aio_context(iothread);
  677. AioContext *main_ctx = qemu_get_aio_context();
  678. BlockBackend *blk;
  679. BlockDriverState *bs, *filter;
  680. QDict *options;
  681. blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
  682. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  683. blk_insert_bs(blk, bs, &error_abort);
  684. options = qdict_new();
  685. qdict_put_str(options, "driver", "raw");
  686. qdict_put_str(options, "file", "base");
  687. filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
  688. g_assert(blk_get_aio_context(blk) == ctx);
  689. g_assert(bdrv_get_aio_context(bs) == ctx);
  690. g_assert(bdrv_get_aio_context(filter) == ctx);
  691. blk_set_aio_context(blk, main_ctx, &error_abort);
  692. g_assert(blk_get_aio_context(blk) == main_ctx);
  693. g_assert(bdrv_get_aio_context(bs) == main_ctx);
  694. g_assert(bdrv_get_aio_context(filter) == main_ctx);
  695. bdrv_unref(filter);
  696. bdrv_unref(bs);
  697. blk_unref(blk);
  698. }
  699. static void test_attach_preserve_blk_ctx(void)
  700. {
  701. IOThread *iothread = iothread_new();
  702. AioContext *ctx = iothread_get_aio_context(iothread);
  703. BlockBackend *blk;
  704. BlockDriverState *bs;
  705. blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
  706. bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
  707. bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
  708. /* Add node to BlockBackend that has an iothread context assigned */
  709. blk_insert_bs(blk, bs, &error_abort);
  710. g_assert(blk_get_aio_context(blk) == ctx);
  711. g_assert(bdrv_get_aio_context(bs) == ctx);
  712. /* Remove the node again */
  713. blk_remove_bs(blk);
  714. g_assert(blk_get_aio_context(blk) == ctx);
  715. g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
  716. /* Re-attach the node */
  717. blk_insert_bs(blk, bs, &error_abort);
  718. g_assert(blk_get_aio_context(blk) == ctx);
  719. g_assert(bdrv_get_aio_context(bs) == ctx);
  720. blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
  721. bdrv_unref(bs);
  722. blk_unref(blk);
  723. }
  724. int main(int argc, char **argv)
  725. {
  726. int i;
  727. bdrv_init();
  728. qemu_init_main_loop(&error_abort);
  729. g_test_init(&argc, &argv, NULL);
  730. for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
  731. const SyncOpTest *t = &sync_op_tests[i];
  732. g_test_add_data_func(t->name, t, test_sync_op);
  733. }
  734. g_test_add_func("/attach/blockjob", test_attach_blockjob);
  735. g_test_add_func("/attach/second_node", test_attach_second_node);
  736. g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
  737. g_test_add_func("/propagate/basic", test_propagate_basic);
  738. g_test_add_func("/propagate/diamond", test_propagate_diamond);
  739. g_test_add_func("/propagate/mirror", test_propagate_mirror);
  740. return g_test_run();
  741. }