block-migration.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. * QEMU live block migration
  3. *
  4. * Copyright IBM, Corp. 2009
  5. *
  6. * Authors:
  7. * Liran Schour <lirans@il.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "block_int.h"
  15. #include "hw/hw.h"
  16. #include "qemu-queue.h"
  17. #include "qemu-timer.h"
  18. #include "monitor.h"
  19. #include "block-migration.h"
  20. #include "migration.h"
  21. #include "blockdev.h"
  22. #include <assert.h>
  23. #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
  24. #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
  25. #define BLK_MIG_FLAG_EOS 0x02
  26. #define BLK_MIG_FLAG_PROGRESS 0x04
  27. #define MAX_IS_ALLOCATED_SEARCH 65536
  28. //#define DEBUG_BLK_MIGRATION
  29. #ifdef DEBUG_BLK_MIGRATION
  30. #define DPRINTF(fmt, ...) \
  31. do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
  32. #else
  33. #define DPRINTF(fmt, ...) \
  34. do { } while (0)
  35. #endif
  36. typedef struct BlkMigDevState {
  37. BlockDriverState *bs;
  38. int bulk_completed;
  39. int shared_base;
  40. int64_t cur_sector;
  41. int64_t cur_dirty;
  42. int64_t completed_sectors;
  43. int64_t total_sectors;
  44. int64_t dirty;
  45. QSIMPLEQ_ENTRY(BlkMigDevState) entry;
  46. unsigned long *aio_bitmap;
  47. } BlkMigDevState;
  48. typedef struct BlkMigBlock {
  49. uint8_t *buf;
  50. BlkMigDevState *bmds;
  51. int64_t sector;
  52. int nr_sectors;
  53. struct iovec iov;
  54. QEMUIOVector qiov;
  55. BlockDriverAIOCB *aiocb;
  56. int ret;
  57. QSIMPLEQ_ENTRY(BlkMigBlock) entry;
  58. } BlkMigBlock;
  59. typedef struct BlkMigState {
  60. int blk_enable;
  61. int shared_base;
  62. QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
  63. QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
  64. int submitted;
  65. int read_done;
  66. int transferred;
  67. int64_t total_sector_sum;
  68. int prev_progress;
  69. int bulk_completed;
  70. long double total_time;
  71. long double prev_time_offset;
  72. int reads;
  73. } BlkMigState;
  74. static BlkMigState block_mig_state;
  75. static void blk_send(QEMUFile *f, BlkMigBlock * blk)
  76. {
  77. int len;
  78. /* sector number and flags */
  79. qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
  80. | BLK_MIG_FLAG_DEVICE_BLOCK);
  81. /* device name */
  82. len = strlen(blk->bmds->bs->device_name);
  83. qemu_put_byte(f, len);
  84. qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
  85. qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
  86. }
  87. int blk_mig_active(void)
  88. {
  89. return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
  90. }
  91. uint64_t blk_mig_bytes_transferred(void)
  92. {
  93. BlkMigDevState *bmds;
  94. uint64_t sum = 0;
  95. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  96. sum += bmds->completed_sectors;
  97. }
  98. return sum << BDRV_SECTOR_BITS;
  99. }
  100. uint64_t blk_mig_bytes_remaining(void)
  101. {
  102. return blk_mig_bytes_total() - blk_mig_bytes_transferred();
  103. }
  104. uint64_t blk_mig_bytes_total(void)
  105. {
  106. BlkMigDevState *bmds;
  107. uint64_t sum = 0;
  108. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  109. sum += bmds->total_sectors;
  110. }
  111. return sum << BDRV_SECTOR_BITS;
  112. }
  113. static inline long double compute_read_bwidth(void)
  114. {
  115. assert(block_mig_state.total_time != 0);
  116. return (block_mig_state.reads / block_mig_state.total_time) * BLOCK_SIZE;
  117. }
  118. static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
  119. {
  120. int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
  121. if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
  122. return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
  123. (1UL << (chunk % (sizeof(unsigned long) * 8))));
  124. } else {
  125. return 0;
  126. }
  127. }
  128. static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
  129. int nb_sectors, int set)
  130. {
  131. int64_t start, end;
  132. unsigned long val, idx, bit;
  133. start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
  134. end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
  135. for (; start <= end; start++) {
  136. idx = start / (sizeof(unsigned long) * 8);
  137. bit = start % (sizeof(unsigned long) * 8);
  138. val = bmds->aio_bitmap[idx];
  139. if (set) {
  140. val |= 1UL << bit;
  141. } else {
  142. val &= ~(1UL << bit);
  143. }
  144. bmds->aio_bitmap[idx] = val;
  145. }
  146. }
  147. static void alloc_aio_bitmap(BlkMigDevState *bmds)
  148. {
  149. BlockDriverState *bs = bmds->bs;
  150. int64_t bitmap_size;
  151. bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
  152. BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
  153. bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
  154. bmds->aio_bitmap = g_malloc0(bitmap_size);
  155. }
  156. static void blk_mig_read_cb(void *opaque, int ret)
  157. {
  158. long double curr_time = qemu_get_clock_ns(rt_clock);
  159. BlkMigBlock *blk = opaque;
  160. blk->ret = ret;
  161. block_mig_state.reads++;
  162. block_mig_state.total_time += (curr_time - block_mig_state.prev_time_offset);
  163. block_mig_state.prev_time_offset = curr_time;
  164. QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
  165. bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
  166. block_mig_state.submitted--;
  167. block_mig_state.read_done++;
  168. assert(block_mig_state.submitted >= 0);
  169. }
  170. static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
  171. BlkMigDevState *bmds)
  172. {
  173. int64_t total_sectors = bmds->total_sectors;
  174. int64_t cur_sector = bmds->cur_sector;
  175. BlockDriverState *bs = bmds->bs;
  176. BlkMigBlock *blk;
  177. int nr_sectors;
  178. if (bmds->shared_base) {
  179. while (cur_sector < total_sectors &&
  180. !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
  181. &nr_sectors)) {
  182. cur_sector += nr_sectors;
  183. }
  184. }
  185. if (cur_sector >= total_sectors) {
  186. bmds->cur_sector = bmds->completed_sectors = total_sectors;
  187. return 1;
  188. }
  189. bmds->completed_sectors = cur_sector;
  190. cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
  191. /* we are going to transfer a full block even if it is not allocated */
  192. nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
  193. if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
  194. nr_sectors = total_sectors - cur_sector;
  195. }
  196. blk = g_malloc(sizeof(BlkMigBlock));
  197. blk->buf = g_malloc(BLOCK_SIZE);
  198. blk->bmds = bmds;
  199. blk->sector = cur_sector;
  200. blk->nr_sectors = nr_sectors;
  201. blk->iov.iov_base = blk->buf;
  202. blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
  203. qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
  204. if (block_mig_state.submitted == 0) {
  205. block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
  206. }
  207. blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
  208. nr_sectors, blk_mig_read_cb, blk);
  209. if (!blk->aiocb) {
  210. goto error;
  211. }
  212. block_mig_state.submitted++;
  213. bdrv_reset_dirty(bs, cur_sector, nr_sectors);
  214. bmds->cur_sector = cur_sector + nr_sectors;
  215. return (bmds->cur_sector >= total_sectors);
  216. error:
  217. monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector);
  218. qemu_file_set_error(f, -EIO);
  219. g_free(blk->buf);
  220. g_free(blk);
  221. return 0;
  222. }
  223. static void set_dirty_tracking(int enable)
  224. {
  225. BlkMigDevState *bmds;
  226. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  227. bdrv_set_dirty_tracking(bmds->bs, enable);
  228. }
  229. }
  230. static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
  231. {
  232. Monitor *mon = opaque;
  233. BlkMigDevState *bmds;
  234. int64_t sectors;
  235. if (!bdrv_is_read_only(bs)) {
  236. sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
  237. if (sectors <= 0) {
  238. return;
  239. }
  240. bmds = g_malloc0(sizeof(BlkMigDevState));
  241. bmds->bs = bs;
  242. bmds->bulk_completed = 0;
  243. bmds->total_sectors = sectors;
  244. bmds->completed_sectors = 0;
  245. bmds->shared_base = block_mig_state.shared_base;
  246. alloc_aio_bitmap(bmds);
  247. drive_get_ref(drive_get_by_blockdev(bs));
  248. bdrv_set_in_use(bs, 1);
  249. block_mig_state.total_sector_sum += sectors;
  250. if (bmds->shared_base) {
  251. monitor_printf(mon, "Start migration for %s with shared base "
  252. "image\n",
  253. bs->device_name);
  254. } else {
  255. monitor_printf(mon, "Start full migration for %s\n",
  256. bs->device_name);
  257. }
  258. QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
  259. }
  260. }
  261. static void init_blk_migration(Monitor *mon, QEMUFile *f)
  262. {
  263. block_mig_state.submitted = 0;
  264. block_mig_state.read_done = 0;
  265. block_mig_state.transferred = 0;
  266. block_mig_state.total_sector_sum = 0;
  267. block_mig_state.prev_progress = -1;
  268. block_mig_state.bulk_completed = 0;
  269. block_mig_state.total_time = 0;
  270. block_mig_state.reads = 0;
  271. bdrv_iterate(init_blk_migration_it, mon);
  272. }
  273. static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f)
  274. {
  275. int64_t completed_sector_sum = 0;
  276. BlkMigDevState *bmds;
  277. int progress;
  278. int ret = 0;
  279. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  280. if (bmds->bulk_completed == 0) {
  281. if (mig_save_device_bulk(mon, f, bmds) == 1) {
  282. /* completed bulk section for this device */
  283. bmds->bulk_completed = 1;
  284. }
  285. completed_sector_sum += bmds->completed_sectors;
  286. ret = 1;
  287. break;
  288. } else {
  289. completed_sector_sum += bmds->completed_sectors;
  290. }
  291. }
  292. if (block_mig_state.total_sector_sum != 0) {
  293. progress = completed_sector_sum * 100 /
  294. block_mig_state.total_sector_sum;
  295. } else {
  296. progress = 100;
  297. }
  298. if (progress != block_mig_state.prev_progress) {
  299. block_mig_state.prev_progress = progress;
  300. qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
  301. | BLK_MIG_FLAG_PROGRESS);
  302. monitor_printf(mon, "Completed %d %%\r", progress);
  303. monitor_flush(mon);
  304. }
  305. return ret;
  306. }
  307. static void blk_mig_reset_dirty_cursor(void)
  308. {
  309. BlkMigDevState *bmds;
  310. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  311. bmds->cur_dirty = 0;
  312. }
  313. }
  314. static int mig_save_device_dirty(Monitor *mon, QEMUFile *f,
  315. BlkMigDevState *bmds, int is_async)
  316. {
  317. BlkMigBlock *blk;
  318. int64_t total_sectors = bmds->total_sectors;
  319. int64_t sector;
  320. int nr_sectors;
  321. int ret = -EIO;
  322. for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
  323. if (bmds_aio_inflight(bmds, sector)) {
  324. qemu_aio_flush();
  325. }
  326. if (bdrv_get_dirty(bmds->bs, sector)) {
  327. if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
  328. nr_sectors = total_sectors - sector;
  329. } else {
  330. nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
  331. }
  332. blk = g_malloc(sizeof(BlkMigBlock));
  333. blk->buf = g_malloc(BLOCK_SIZE);
  334. blk->bmds = bmds;
  335. blk->sector = sector;
  336. blk->nr_sectors = nr_sectors;
  337. if (is_async) {
  338. blk->iov.iov_base = blk->buf;
  339. blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
  340. qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
  341. if (block_mig_state.submitted == 0) {
  342. block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
  343. }
  344. blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
  345. nr_sectors, blk_mig_read_cb, blk);
  346. if (!blk->aiocb) {
  347. goto error;
  348. }
  349. block_mig_state.submitted++;
  350. bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
  351. } else {
  352. ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
  353. if (ret < 0) {
  354. goto error;
  355. }
  356. blk_send(f, blk);
  357. g_free(blk->buf);
  358. g_free(blk);
  359. }
  360. bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
  361. break;
  362. }
  363. sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
  364. bmds->cur_dirty = sector;
  365. }
  366. return (bmds->cur_dirty >= bmds->total_sectors);
  367. error:
  368. monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector);
  369. qemu_file_set_error(f, ret);
  370. g_free(blk->buf);
  371. g_free(blk);
  372. return 0;
  373. }
  374. static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async)
  375. {
  376. BlkMigDevState *bmds;
  377. int ret = 0;
  378. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  379. if (mig_save_device_dirty(mon, f, bmds, is_async) == 0) {
  380. ret = 1;
  381. break;
  382. }
  383. }
  384. return ret;
  385. }
  386. static void flush_blks(QEMUFile* f)
  387. {
  388. BlkMigBlock *blk;
  389. DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
  390. __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
  391. block_mig_state.transferred);
  392. while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
  393. if (qemu_file_rate_limit(f)) {
  394. break;
  395. }
  396. if (blk->ret < 0) {
  397. qemu_file_set_error(f, blk->ret);
  398. break;
  399. }
  400. blk_send(f, blk);
  401. QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
  402. g_free(blk->buf);
  403. g_free(blk);
  404. block_mig_state.read_done--;
  405. block_mig_state.transferred++;
  406. assert(block_mig_state.read_done >= 0);
  407. }
  408. DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
  409. block_mig_state.submitted, block_mig_state.read_done,
  410. block_mig_state.transferred);
  411. }
  412. static int64_t get_remaining_dirty(void)
  413. {
  414. BlkMigDevState *bmds;
  415. int64_t dirty = 0;
  416. QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
  417. dirty += bdrv_get_dirty_count(bmds->bs);
  418. }
  419. return dirty * BLOCK_SIZE;
  420. }
  421. static int is_stage2_completed(void)
  422. {
  423. int64_t remaining_dirty;
  424. long double bwidth;
  425. if (block_mig_state.bulk_completed == 1) {
  426. remaining_dirty = get_remaining_dirty();
  427. if (remaining_dirty == 0) {
  428. return 1;
  429. }
  430. bwidth = compute_read_bwidth();
  431. if ((remaining_dirty / bwidth) <=
  432. migrate_max_downtime()) {
  433. /* finish stage2 because we think that we can finish remaining work
  434. below max_downtime */
  435. return 1;
  436. }
  437. }
  438. return 0;
  439. }
  440. static void blk_mig_cleanup(Monitor *mon)
  441. {
  442. BlkMigDevState *bmds;
  443. BlkMigBlock *blk;
  444. set_dirty_tracking(0);
  445. while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
  446. QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
  447. bdrv_set_in_use(bmds->bs, 0);
  448. drive_put_ref(drive_get_by_blockdev(bmds->bs));
  449. g_free(bmds->aio_bitmap);
  450. g_free(bmds);
  451. }
  452. while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
  453. QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
  454. g_free(blk->buf);
  455. g_free(blk);
  456. }
  457. monitor_printf(mon, "\n");
  458. }
  459. static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
  460. {
  461. int ret;
  462. DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
  463. stage, block_mig_state.submitted, block_mig_state.transferred);
  464. if (stage < 0) {
  465. blk_mig_cleanup(mon);
  466. return 0;
  467. }
  468. if (block_mig_state.blk_enable != 1) {
  469. /* no need to migrate storage */
  470. qemu_put_be64(f, BLK_MIG_FLAG_EOS);
  471. return 1;
  472. }
  473. if (stage == 1) {
  474. init_blk_migration(mon, f);
  475. /* start track dirty blocks */
  476. set_dirty_tracking(1);
  477. }
  478. flush_blks(f);
  479. ret = qemu_file_get_error(f);
  480. if (ret) {
  481. blk_mig_cleanup(mon);
  482. return ret;
  483. }
  484. blk_mig_reset_dirty_cursor();
  485. if (stage == 2) {
  486. /* control the rate of transfer */
  487. while ((block_mig_state.submitted +
  488. block_mig_state.read_done) * BLOCK_SIZE <
  489. qemu_file_get_rate_limit(f)) {
  490. if (block_mig_state.bulk_completed == 0) {
  491. /* first finish the bulk phase */
  492. if (blk_mig_save_bulked_block(mon, f) == 0) {
  493. /* finished saving bulk on all devices */
  494. block_mig_state.bulk_completed = 1;
  495. }
  496. } else {
  497. if (blk_mig_save_dirty_block(mon, f, 1) == 0) {
  498. /* no more dirty blocks */
  499. break;
  500. }
  501. }
  502. }
  503. flush_blks(f);
  504. ret = qemu_file_get_error(f);
  505. if (ret) {
  506. blk_mig_cleanup(mon);
  507. return ret;
  508. }
  509. }
  510. if (stage == 3) {
  511. /* we know for sure that save bulk is completed and
  512. all async read completed */
  513. assert(block_mig_state.submitted == 0);
  514. while (blk_mig_save_dirty_block(mon, f, 0) != 0);
  515. blk_mig_cleanup(mon);
  516. /* report completion */
  517. qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
  518. ret = qemu_file_get_error(f);
  519. if (ret) {
  520. return ret;
  521. }
  522. monitor_printf(mon, "Block migration completed\n");
  523. }
  524. qemu_put_be64(f, BLK_MIG_FLAG_EOS);
  525. return ((stage == 2) && is_stage2_completed());
  526. }
  527. static int block_load(QEMUFile *f, void *opaque, int version_id)
  528. {
  529. static int banner_printed;
  530. int len, flags;
  531. char device_name[256];
  532. int64_t addr;
  533. BlockDriverState *bs, *bs_prev = NULL;
  534. uint8_t *buf;
  535. int64_t total_sectors = 0;
  536. int nr_sectors;
  537. int ret;
  538. do {
  539. addr = qemu_get_be64(f);
  540. flags = addr & ~BDRV_SECTOR_MASK;
  541. addr >>= BDRV_SECTOR_BITS;
  542. if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
  543. /* get device name */
  544. len = qemu_get_byte(f);
  545. qemu_get_buffer(f, (uint8_t *)device_name, len);
  546. device_name[len] = '\0';
  547. bs = bdrv_find(device_name);
  548. if (!bs) {
  549. fprintf(stderr, "Error unknown block device %s\n",
  550. device_name);
  551. return -EINVAL;
  552. }
  553. if (bs != bs_prev) {
  554. bs_prev = bs;
  555. total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
  556. if (total_sectors <= 0) {
  557. error_report("Error getting length of block device %s",
  558. device_name);
  559. return -EINVAL;
  560. }
  561. }
  562. if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
  563. nr_sectors = total_sectors - addr;
  564. } else {
  565. nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
  566. }
  567. buf = g_malloc(BLOCK_SIZE);
  568. qemu_get_buffer(f, buf, BLOCK_SIZE);
  569. ret = bdrv_write(bs, addr, buf, nr_sectors);
  570. g_free(buf);
  571. if (ret < 0) {
  572. return ret;
  573. }
  574. } else if (flags & BLK_MIG_FLAG_PROGRESS) {
  575. if (!banner_printed) {
  576. printf("Receiving block device images\n");
  577. banner_printed = 1;
  578. }
  579. printf("Completed %d %%%c", (int)addr,
  580. (addr == 100) ? '\n' : '\r');
  581. fflush(stdout);
  582. } else if (!(flags & BLK_MIG_FLAG_EOS)) {
  583. fprintf(stderr, "Unknown flags\n");
  584. return -EINVAL;
  585. }
  586. ret = qemu_file_get_error(f);
  587. if (ret != 0) {
  588. return ret;
  589. }
  590. } while (!(flags & BLK_MIG_FLAG_EOS));
  591. return 0;
  592. }
  593. static void block_set_params(int blk_enable, int shared_base, void *opaque)
  594. {
  595. block_mig_state.blk_enable = blk_enable;
  596. block_mig_state.shared_base = shared_base;
  597. /* shared base means that blk_enable = 1 */
  598. block_mig_state.blk_enable |= shared_base;
  599. }
  600. void blk_mig_init(void)
  601. {
  602. QSIMPLEQ_INIT(&block_mig_state.bmds_list);
  603. QSIMPLEQ_INIT(&block_mig_state.blk_list);
  604. register_savevm_live(NULL, "block", 0, 1, block_set_params,
  605. block_save_live, NULL, block_load, &block_mig_state);
  606. }