2
0

block-migration.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * QEMU live block migration
  3. *
  4. * Copyright IBM, Corp. 2009
  5. *
  6. * Authors:
  7. * Liran Schour <lirans@il.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "qemu-common.h"
  16. #include "block_int.h"
  17. #include "hw/hw.h"
  18. #include "qemu-queue.h"
  19. #include "qemu-timer.h"
  20. #include "block-migration.h"
  21. #include "migration.h"
  22. #include "blockdev.h"
  23. #include <assert.h>
  24. #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
  25. #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
  26. #define BLK_MIG_FLAG_EOS 0x02
  27. #define BLK_MIG_FLAG_PROGRESS 0x04
  28. #define MAX_IS_ALLOCATED_SEARCH 65536
  29. //#define DEBUG_BLK_MIGRATION
  30. #ifdef DEBUG_BLK_MIGRATION
  31. #define DPRINTF(fmt, ...) \
  32. do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
  33. #else
  34. #define DPRINTF(fmt, ...) \
  35. do { } while (0)
  36. #endif
  37. typedef struct BlkMigDevState {
  38. BlockDriverState *bs;
  39. int bulk_completed;
  40. int shared_base;
  41. int64_t cur_sector;
  42. int64_t cur_dirty;
  43. int64_t completed_sectors;
  44. int64_t total_sectors;
  45. int64_t dirty;
  46. QSIMPLEQ_ENTRY(BlkMigDevState) entry;
  47. unsigned long *aio_bitmap;
  48. } BlkMigDevState;
  49. typedef struct BlkMigBlock {
  50. uint8_t *buf;
  51. BlkMigDevState *bmds;
  52. int64_t sector;
  53. int nr_sectors;
  54. struct iovec iov;
  55. QEMUIOVector qiov;
  56. BlockDriverAIOCB *aiocb;
  57. int ret;
  58. QSIMPLEQ_ENTRY(BlkMigBlock) entry;
  59. } BlkMigBlock;
  60. typedef struct BlkMigState {
  61. int blk_enable;
  62. int shared_base;
  63. QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
  64. QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
  65. int submitted;
  66. int read_done;
  67. int transferred;
  68. int64_t total_sector_sum;
  69. int prev_progress;
  70. int bulk_completed;
  71. long double total_time;
  72. long double prev_time_offset;
  73. int reads;
  74. } BlkMigState;
  75. static BlkMigState block_mig_state;
  76. static void blk_send(QEMUFile *f, BlkMigBlock * blk)
  77. {
  78. int len;
  79. /* sector number and flags */
  80. qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
  81. | BLK_MIG_FLAG_DEVICE_BLOCK);
  82. /* device name */
  83. len = strlen(blk->bmds->bs->device_name);
  84. qemu_put_byte(f, len);
  85. qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
  86. qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
  87. }
  88. int blk_mig_active(void)
  89. {
  90. return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
  91. }
  92. uint64_t blk_mig_bytes_transferred(void)
  93. {
  94. BlkMigDevState *bmds;
  95. uint64_t sum = 0;
  96. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  97. sum += bmds->completed_sectors;
  98. }
  99. return sum << BDRV_SECTOR_BITS;
  100. }
  101. uint64_t blk_mig_bytes_remaining(void)
  102. {
  103. return blk_mig_bytes_total() - blk_mig_bytes_transferred();
  104. }
  105. uint64_t blk_mig_bytes_total(void)
  106. {
  107. BlkMigDevState *bmds;
  108. uint64_t sum = 0;
  109. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  110. sum += bmds->total_sectors;
  111. }
  112. return sum << BDRV_SECTOR_BITS;
  113. }
  114. static inline long double compute_read_bwidth(void)
  115. {
  116. assert(block_mig_state.total_time != 0);
  117. return (block_mig_state.reads / block_mig_state.total_time) * BLOCK_SIZE;
  118. }
  119. static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
  120. {
  121. int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
  122. if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
  123. return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
  124. (1UL << (chunk % (sizeof(unsigned long) * 8))));
  125. } else {
  126. return 0;
  127. }
  128. }
  129. static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
  130. int nb_sectors, int set)
  131. {
  132. int64_t start, end;
  133. unsigned long val, idx, bit;
  134. start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
  135. end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
  136. for (; start <= end; start++) {
  137. idx = start / (sizeof(unsigned long) * 8);
  138. bit = start % (sizeof(unsigned long) * 8);
  139. val = bmds->aio_bitmap[idx];
  140. if (set) {
  141. val |= 1UL << bit;
  142. } else {
  143. val &= ~(1UL << bit);
  144. }
  145. bmds->aio_bitmap[idx] = val;
  146. }
  147. }
  148. static void alloc_aio_bitmap(BlkMigDevState *bmds)
  149. {
  150. BlockDriverState *bs = bmds->bs;
  151. int64_t bitmap_size;
  152. bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
  153. BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
  154. bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
  155. bmds->aio_bitmap = g_malloc0(bitmap_size);
  156. }
  157. static void blk_mig_read_cb(void *opaque, int ret)
  158. {
  159. long double curr_time = qemu_get_clock_ns(rt_clock);
  160. BlkMigBlock *blk = opaque;
  161. blk->ret = ret;
  162. block_mig_state.reads++;
  163. block_mig_state.total_time += (curr_time - block_mig_state.prev_time_offset);
  164. block_mig_state.prev_time_offset = curr_time;
  165. QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
  166. bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
  167. block_mig_state.submitted--;
  168. block_mig_state.read_done++;
  169. assert(block_mig_state.submitted >= 0);
  170. }
  171. static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
  172. {
  173. int64_t total_sectors = bmds->total_sectors;
  174. int64_t cur_sector = bmds->cur_sector;
  175. BlockDriverState *bs = bmds->bs;
  176. BlkMigBlock *blk;
  177. int nr_sectors;
  178. if (bmds->shared_base) {
  179. while (cur_sector < total_sectors &&
  180. !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
  181. &nr_sectors)) {
  182. cur_sector += nr_sectors;
  183. }
  184. }
  185. if (cur_sector >= total_sectors) {
  186. bmds->cur_sector = bmds->completed_sectors = total_sectors;
  187. return 1;
  188. }
  189. bmds->completed_sectors = cur_sector;
  190. cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
  191. /* we are going to transfer a full block even if it is not allocated */
  192. nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
  193. if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
  194. nr_sectors = total_sectors - cur_sector;
  195. }
  196. blk = g_malloc(sizeof(BlkMigBlock));
  197. blk->buf = g_malloc(BLOCK_SIZE);
  198. blk->bmds = bmds;
  199. blk->sector = cur_sector;
  200. blk->nr_sectors = nr_sectors;
  201. blk->iov.iov_base = blk->buf;
  202. blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
  203. qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
  204. if (block_mig_state.submitted == 0) {
  205. block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
  206. }
  207. blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
  208. nr_sectors, blk_mig_read_cb, blk);
  209. block_mig_state.submitted++;
  210. bdrv_reset_dirty(bs, cur_sector, nr_sectors);
  211. bmds->cur_sector = cur_sector + nr_sectors;
  212. return (bmds->cur_sector >= total_sectors);
  213. }
  214. static void set_dirty_tracking(int enable)
  215. {
  216. BlkMigDevState *bmds;
  217. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  218. bdrv_set_dirty_tracking(bmds->bs, enable);
  219. }
  220. }
  221. static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
  222. {
  223. BlkMigDevState *bmds;
  224. int64_t sectors;
  225. if (!bdrv_is_read_only(bs)) {
  226. sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
  227. if (sectors <= 0) {
  228. return;
  229. }
  230. bmds = g_malloc0(sizeof(BlkMigDevState));
  231. bmds->bs = bs;
  232. bmds->bulk_completed = 0;
  233. bmds->total_sectors = sectors;
  234. bmds->completed_sectors = 0;
  235. bmds->shared_base = block_mig_state.shared_base;
  236. alloc_aio_bitmap(bmds);
  237. drive_get_ref(drive_get_by_blockdev(bs));
  238. bdrv_set_in_use(bs, 1);
  239. block_mig_state.total_sector_sum += sectors;
  240. if (bmds->shared_base) {
  241. DPRINTF("Start migration for %s with shared base image\n",
  242. bs->device_name);
  243. } else {
  244. DPRINTF("Start full migration for %s\n", bs->device_name);
  245. }
  246. QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
  247. }
  248. }
  249. static void init_blk_migration(QEMUFile *f)
  250. {
  251. block_mig_state.submitted = 0;
  252. block_mig_state.read_done = 0;
  253. block_mig_state.transferred = 0;
  254. block_mig_state.total_sector_sum = 0;
  255. block_mig_state.prev_progress = -1;
  256. block_mig_state.bulk_completed = 0;
  257. block_mig_state.total_time = 0;
  258. block_mig_state.reads = 0;
  259. bdrv_iterate(init_blk_migration_it, NULL);
  260. }
  261. static int blk_mig_save_bulked_block(QEMUFile *f)
  262. {
  263. int64_t completed_sector_sum = 0;
  264. BlkMigDevState *bmds;
  265. int progress;
  266. int ret = 0;
  267. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  268. if (bmds->bulk_completed == 0) {
  269. if (mig_save_device_bulk(f, bmds) == 1) {
  270. /* completed bulk section for this device */
  271. bmds->bulk_completed = 1;
  272. }
  273. completed_sector_sum += bmds->completed_sectors;
  274. ret = 1;
  275. break;
  276. } else {
  277. completed_sector_sum += bmds->completed_sectors;
  278. }
  279. }
  280. if (block_mig_state.total_sector_sum != 0) {
  281. progress = completed_sector_sum * 100 /
  282. block_mig_state.total_sector_sum;
  283. } else {
  284. progress = 100;
  285. }
  286. if (progress != block_mig_state.prev_progress) {
  287. block_mig_state.prev_progress = progress;
  288. qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
  289. | BLK_MIG_FLAG_PROGRESS);
  290. DPRINTF("Completed %d %%\r", progress);
  291. }
  292. return ret;
  293. }
  294. static void blk_mig_reset_dirty_cursor(void)
  295. {
  296. BlkMigDevState *bmds;
  297. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  298. bmds->cur_dirty = 0;
  299. }
  300. }
  301. static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
  302. int is_async)
  303. {
  304. BlkMigBlock *blk;
  305. int64_t total_sectors = bmds->total_sectors;
  306. int64_t sector;
  307. int nr_sectors;
  308. int ret = -EIO;
  309. for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
  310. if (bmds_aio_inflight(bmds, sector)) {
  311. bdrv_drain_all();
  312. }
  313. if (bdrv_get_dirty(bmds->bs, sector)) {
  314. if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
  315. nr_sectors = total_sectors - sector;
  316. } else {
  317. nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
  318. }
  319. blk = g_malloc(sizeof(BlkMigBlock));
  320. blk->buf = g_malloc(BLOCK_SIZE);
  321. blk->bmds = bmds;
  322. blk->sector = sector;
  323. blk->nr_sectors = nr_sectors;
  324. if (is_async) {
  325. blk->iov.iov_base = blk->buf;
  326. blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
  327. qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
  328. if (block_mig_state.submitted == 0) {
  329. block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
  330. }
  331. blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
  332. nr_sectors, blk_mig_read_cb, blk);
  333. block_mig_state.submitted++;
  334. bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
  335. } else {
  336. ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
  337. if (ret < 0) {
  338. goto error;
  339. }
  340. blk_send(f, blk);
  341. g_free(blk->buf);
  342. g_free(blk);
  343. }
  344. bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
  345. break;
  346. }
  347. sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
  348. bmds->cur_dirty = sector;
  349. }
  350. return (bmds->cur_dirty >= bmds->total_sectors);
  351. error:
  352. DPRINTF("Error reading sector %" PRId64 "\n", sector);
  353. qemu_file_set_error(f, ret);
  354. g_free(blk->buf);
  355. g_free(blk);
  356. return 0;
  357. }
  358. static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
  359. {
  360. BlkMigDevState *bmds;
  361. int ret = 0;
  362. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  363. if (mig_save_device_dirty(f, bmds, is_async) == 0) {
  364. ret = 1;
  365. break;
  366. }
  367. }
  368. return ret;
  369. }
  370. static void flush_blks(QEMUFile* f)
  371. {
  372. BlkMigBlock *blk;
  373. DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
  374. __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
  375. block_mig_state.transferred);
  376. while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
  377. if (qemu_file_rate_limit(f)) {
  378. break;
  379. }
  380. if (blk->ret < 0) {
  381. qemu_file_set_error(f, blk->ret);
  382. break;
  383. }
  384. blk_send(f, blk);
  385. QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
  386. g_free(blk->buf);
  387. g_free(blk);
  388. block_mig_state.read_done--;
  389. block_mig_state.transferred++;
  390. assert(block_mig_state.read_done >= 0);
  391. }
  392. DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
  393. block_mig_state.submitted, block_mig_state.read_done,
  394. block_mig_state.transferred);
  395. }
  396. static int64_t get_remaining_dirty(void)
  397. {
  398. BlkMigDevState *bmds;
  399. int64_t dirty = 0;
  400. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  401. dirty += bdrv_get_dirty_count(bmds->bs);
  402. }
  403. return dirty * BLOCK_SIZE;
  404. }
  405. static int is_stage2_completed(void)
  406. {
  407. int64_t remaining_dirty;
  408. long double bwidth;
  409. if (block_mig_state.bulk_completed == 1) {
  410. remaining_dirty = get_remaining_dirty();
  411. if (remaining_dirty == 0) {
  412. return 1;
  413. }
  414. bwidth = compute_read_bwidth();
  415. if ((remaining_dirty / bwidth) <=
  416. migrate_max_downtime()) {
  417. /* finish stage2 because we think that we can finish remaining work
  418. below max_downtime */
  419. return 1;
  420. }
  421. }
  422. return 0;
  423. }
  424. static void blk_mig_cleanup(void)
  425. {
  426. BlkMigDevState *bmds;
  427. BlkMigBlock *blk;
  428. set_dirty_tracking(0);
  429. while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
  430. QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
  431. bdrv_set_in_use(bmds->bs, 0);
  432. drive_put_ref(drive_get_by_blockdev(bmds->bs));
  433. g_free(bmds->aio_bitmap);
  434. g_free(bmds);
  435. }
  436. while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
  437. QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
  438. g_free(blk->buf);
  439. g_free(blk);
  440. }
  441. }
  442. static void block_migration_cancel(void *opaque)
  443. {
  444. blk_mig_cleanup();
  445. }
  446. static int block_save_setup(QEMUFile *f, void *opaque)
  447. {
  448. int ret;
  449. DPRINTF("Enter save live setup submitted %d transferred %d\n",
  450. block_mig_state.submitted, block_mig_state.transferred);
  451. init_blk_migration(f);
  452. /* start track dirty blocks */
  453. set_dirty_tracking(1);
  454. flush_blks(f);
  455. ret = qemu_file_get_error(f);
  456. if (ret) {
  457. blk_mig_cleanup();
  458. return ret;
  459. }
  460. blk_mig_reset_dirty_cursor();
  461. qemu_put_be64(f, BLK_MIG_FLAG_EOS);
  462. return 0;
  463. }
  464. static int block_save_iterate(QEMUFile *f, void *opaque)
  465. {
  466. int ret;
  467. DPRINTF("Enter save live iterate submitted %d transferred %d\n",
  468. block_mig_state.submitted, block_mig_state.transferred);
  469. flush_blks(f);
  470. ret = qemu_file_get_error(f);
  471. if (ret) {
  472. blk_mig_cleanup();
  473. return ret;
  474. }
  475. blk_mig_reset_dirty_cursor();
  476. /* control the rate of transfer */
  477. while ((block_mig_state.submitted +
  478. block_mig_state.read_done) * BLOCK_SIZE <
  479. qemu_file_get_rate_limit(f)) {
  480. if (block_mig_state.bulk_completed == 0) {
  481. /* first finish the bulk phase */
  482. if (blk_mig_save_bulked_block(f) == 0) {
  483. /* finished saving bulk on all devices */
  484. block_mig_state.bulk_completed = 1;
  485. }
  486. } else {
  487. if (blk_mig_save_dirty_block(f, 1) == 0) {
  488. /* no more dirty blocks */
  489. break;
  490. }
  491. }
  492. }
  493. flush_blks(f);
  494. ret = qemu_file_get_error(f);
  495. if (ret) {
  496. blk_mig_cleanup();
  497. return ret;
  498. }
  499. qemu_put_be64(f, BLK_MIG_FLAG_EOS);
  500. return is_stage2_completed();
  501. }
  502. static int block_save_complete(QEMUFile *f, void *opaque)
  503. {
  504. int ret;
  505. DPRINTF("Enter save live complete submitted %d transferred %d\n",
  506. block_mig_state.submitted, block_mig_state.transferred);
  507. flush_blks(f);
  508. ret = qemu_file_get_error(f);
  509. if (ret) {
  510. blk_mig_cleanup();
  511. return ret;
  512. }
  513. blk_mig_reset_dirty_cursor();
  514. /* we know for sure that save bulk is completed and
  515. all async read completed */
  516. assert(block_mig_state.submitted == 0);
  517. while (blk_mig_save_dirty_block(f, 0) != 0) {
  518. /* Do nothing */
  519. }
  520. blk_mig_cleanup();
  521. /* report completion */
  522. qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
  523. ret = qemu_file_get_error(f);
  524. if (ret) {
  525. return ret;
  526. }
  527. DPRINTF("Block migration completed\n");
  528. qemu_put_be64(f, BLK_MIG_FLAG_EOS);
  529. return 0;
  530. }
  531. static int block_load(QEMUFile *f, void *opaque, int version_id)
  532. {
  533. static int banner_printed;
  534. int len, flags;
  535. char device_name[256];
  536. int64_t addr;
  537. BlockDriverState *bs, *bs_prev = NULL;
  538. uint8_t *buf;
  539. int64_t total_sectors = 0;
  540. int nr_sectors;
  541. int ret;
  542. do {
  543. addr = qemu_get_be64(f);
  544. flags = addr & ~BDRV_SECTOR_MASK;
  545. addr >>= BDRV_SECTOR_BITS;
  546. if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
  547. /* get device name */
  548. len = qemu_get_byte(f);
  549. qemu_get_buffer(f, (uint8_t *)device_name, len);
  550. device_name[len] = '\0';
  551. bs = bdrv_find(device_name);
  552. if (!bs) {
  553. fprintf(stderr, "Error unknown block device %s\n",
  554. device_name);
  555. return -EINVAL;
  556. }
  557. if (bs != bs_prev) {
  558. bs_prev = bs;
  559. total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
  560. if (total_sectors <= 0) {
  561. error_report("Error getting length of block device %s",
  562. device_name);
  563. return -EINVAL;
  564. }
  565. }
  566. if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
  567. nr_sectors = total_sectors - addr;
  568. } else {
  569. nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
  570. }
  571. buf = g_malloc(BLOCK_SIZE);
  572. qemu_get_buffer(f, buf, BLOCK_SIZE);
  573. ret = bdrv_write(bs, addr, buf, nr_sectors);
  574. g_free(buf);
  575. if (ret < 0) {
  576. return ret;
  577. }
  578. } else if (flags & BLK_MIG_FLAG_PROGRESS) {
  579. if (!banner_printed) {
  580. printf("Receiving block device images\n");
  581. banner_printed = 1;
  582. }
  583. printf("Completed %d %%%c", (int)addr,
  584. (addr == 100) ? '\n' : '\r');
  585. fflush(stdout);
  586. } else if (!(flags & BLK_MIG_FLAG_EOS)) {
  587. fprintf(stderr, "Unknown flags\n");
  588. return -EINVAL;
  589. }
  590. ret = qemu_file_get_error(f);
  591. if (ret != 0) {
  592. return ret;
  593. }
  594. } while (!(flags & BLK_MIG_FLAG_EOS));
  595. return 0;
  596. }
  597. static void block_set_params(const MigrationParams *params, void *opaque)
  598. {
  599. block_mig_state.blk_enable = params->blk;
  600. block_mig_state.shared_base = params->shared;
  601. /* shared base means that blk_enable = 1 */
  602. block_mig_state.blk_enable |= params->shared;
  603. }
  604. static bool block_is_active(void *opaque)
  605. {
  606. return block_mig_state.blk_enable == 1;
  607. }
  608. SaveVMHandlers savevm_block_handlers = {
  609. .set_params = block_set_params,
  610. .save_live_setup = block_save_setup,
  611. .save_live_iterate = block_save_iterate,
  612. .save_live_complete = block_save_complete,
  613. .load_state = block_load,
  614. .cancel = block_migration_cancel,
  615. .is_active = block_is_active,
  616. };
  617. void blk_mig_init(void)
  618. {
  619. QSIMPLEQ_INIT(&block_mig_state.bmds_list);
  620. QSIMPLEQ_INIT(&block_mig_state.blk_list);
  621. register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
  622. &block_mig_state);
  623. }