migration.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. /*
  2. * QEMU live migration
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "qemu-common.h"
  16. #include "migration/migration.h"
  17. #include "monitor/monitor.h"
  18. #include "migration/qemu-file.h"
  19. #include "sysemu/sysemu.h"
  20. #include "block/block.h"
  21. #include "qemu/sockets.h"
  22. #include "migration/block.h"
  23. #include "qemu/thread.h"
  24. #include "qmp-commands.h"
  25. //#define DEBUG_MIGRATION
  26. #ifdef DEBUG_MIGRATION
  27. #define DPRINTF(fmt, ...) \
  28. do { printf("migration: " fmt, ## __VA_ARGS__); } while (0)
  29. #else
  30. #define DPRINTF(fmt, ...) \
  31. do { } while (0)
  32. #endif
  33. enum {
  34. MIG_STATE_ERROR,
  35. MIG_STATE_SETUP,
  36. MIG_STATE_CANCELLED,
  37. MIG_STATE_ACTIVE,
  38. MIG_STATE_COMPLETED,
  39. };
  40. #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
  41. /* Amount of time to allocate to each "chunk" of bandwidth-throttled
  42. * data. */
  43. #define BUFFER_DELAY 100
  44. #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
  45. /* Migration XBZRLE default cache size */
  46. #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
  47. static NotifierList migration_state_notifiers =
  48. NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
  49. /* When we add fault tolerance, we could have several
  50. migrations at once. For now we don't need to add
  51. dynamic creation of migration */
  52. MigrationState *migrate_get_current(void)
  53. {
  54. static MigrationState current_migration = {
  55. .state = MIG_STATE_SETUP,
  56. .bandwidth_limit = MAX_THROTTLE,
  57. .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
  58. };
  59. return &current_migration;
  60. }
  61. void qemu_start_incoming_migration(const char *uri, Error **errp)
  62. {
  63. const char *p;
  64. if (strstart(uri, "tcp:", &p))
  65. tcp_start_incoming_migration(p, errp);
  66. #if !defined(WIN32)
  67. else if (strstart(uri, "exec:", &p))
  68. exec_start_incoming_migration(p, errp);
  69. else if (strstart(uri, "unix:", &p))
  70. unix_start_incoming_migration(p, errp);
  71. else if (strstart(uri, "fd:", &p))
  72. fd_start_incoming_migration(p, errp);
  73. #endif
  74. else {
  75. error_setg(errp, "unknown migration protocol: %s", uri);
  76. }
  77. }
  78. static void process_incoming_migration_co(void *opaque)
  79. {
  80. QEMUFile *f = opaque;
  81. int ret;
  82. ret = qemu_loadvm_state(f);
  83. qemu_fclose(f);
  84. if (ret < 0) {
  85. fprintf(stderr, "load of migration failed\n");
  86. exit(0);
  87. }
  88. qemu_announce_self();
  89. DPRINTF("successfully loaded vm state\n");
  90. bdrv_clear_incoming_migration_all();
  91. /* Make sure all file formats flush their mutable metadata */
  92. bdrv_invalidate_cache_all();
  93. if (autostart) {
  94. vm_start();
  95. } else {
  96. runstate_set(RUN_STATE_PAUSED);
  97. }
  98. }
  99. void process_incoming_migration(QEMUFile *f)
  100. {
  101. Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
  102. int fd = qemu_get_fd(f);
  103. assert(fd != -1);
  104. qemu_set_nonblock(fd);
  105. qemu_coroutine_enter(co, f);
  106. }
  107. /* amount of nanoseconds we are willing to wait for migration to be down.
  108. * the choice of nanoseconds is because it is the maximum resolution that
  109. * get_clock() can achieve. It is an internal measure. All user-visible
  110. * units must be in seconds */
  111. static uint64_t max_downtime = 30000000;
  112. uint64_t migrate_max_downtime(void)
  113. {
  114. return max_downtime;
  115. }
  116. MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
  117. {
  118. MigrationCapabilityStatusList *head = NULL;
  119. MigrationCapabilityStatusList *caps;
  120. MigrationState *s = migrate_get_current();
  121. int i;
  122. for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
  123. if (head == NULL) {
  124. head = g_malloc0(sizeof(*caps));
  125. caps = head;
  126. } else {
  127. caps->next = g_malloc0(sizeof(*caps));
  128. caps = caps->next;
  129. }
  130. caps->value =
  131. g_malloc(sizeof(*caps->value));
  132. caps->value->capability = i;
  133. caps->value->state = s->enabled_capabilities[i];
  134. }
  135. return head;
  136. }
  137. static void get_xbzrle_cache_stats(MigrationInfo *info)
  138. {
  139. if (migrate_use_xbzrle()) {
  140. info->has_xbzrle_cache = true;
  141. info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
  142. info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
  143. info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
  144. info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
  145. info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
  146. info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
  147. }
  148. }
  149. MigrationInfo *qmp_query_migrate(Error **errp)
  150. {
  151. MigrationInfo *info = g_malloc0(sizeof(*info));
  152. MigrationState *s = migrate_get_current();
  153. switch (s->state) {
  154. case MIG_STATE_SETUP:
  155. /* no migration has happened ever */
  156. break;
  157. case MIG_STATE_ACTIVE:
  158. info->has_status = true;
  159. info->status = g_strdup("active");
  160. info->has_total_time = true;
  161. info->total_time = qemu_get_clock_ms(rt_clock)
  162. - s->total_time;
  163. info->has_expected_downtime = true;
  164. info->expected_downtime = s->expected_downtime;
  165. info->has_ram = true;
  166. info->ram = g_malloc0(sizeof(*info->ram));
  167. info->ram->transferred = ram_bytes_transferred();
  168. info->ram->remaining = ram_bytes_remaining();
  169. info->ram->total = ram_bytes_total();
  170. info->ram->duplicate = dup_mig_pages_transferred();
  171. info->ram->normal = norm_mig_pages_transferred();
  172. info->ram->normal_bytes = norm_mig_bytes_transferred();
  173. info->ram->dirty_pages_rate = s->dirty_pages_rate;
  174. if (blk_mig_active()) {
  175. info->has_disk = true;
  176. info->disk = g_malloc0(sizeof(*info->disk));
  177. info->disk->transferred = blk_mig_bytes_transferred();
  178. info->disk->remaining = blk_mig_bytes_remaining();
  179. info->disk->total = blk_mig_bytes_total();
  180. }
  181. get_xbzrle_cache_stats(info);
  182. break;
  183. case MIG_STATE_COMPLETED:
  184. get_xbzrle_cache_stats(info);
  185. info->has_status = true;
  186. info->status = g_strdup("completed");
  187. info->total_time = s->total_time;
  188. info->has_downtime = true;
  189. info->downtime = s->downtime;
  190. info->has_ram = true;
  191. info->ram = g_malloc0(sizeof(*info->ram));
  192. info->ram->transferred = ram_bytes_transferred();
  193. info->ram->remaining = 0;
  194. info->ram->total = ram_bytes_total();
  195. info->ram->duplicate = dup_mig_pages_transferred();
  196. info->ram->normal = norm_mig_pages_transferred();
  197. info->ram->normal_bytes = norm_mig_bytes_transferred();
  198. break;
  199. case MIG_STATE_ERROR:
  200. info->has_status = true;
  201. info->status = g_strdup("failed");
  202. break;
  203. case MIG_STATE_CANCELLED:
  204. info->has_status = true;
  205. info->status = g_strdup("cancelled");
  206. break;
  207. }
  208. return info;
  209. }
  210. void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
  211. Error **errp)
  212. {
  213. MigrationState *s = migrate_get_current();
  214. MigrationCapabilityStatusList *cap;
  215. if (s->state == MIG_STATE_ACTIVE) {
  216. error_set(errp, QERR_MIGRATION_ACTIVE);
  217. return;
  218. }
  219. for (cap = params; cap; cap = cap->next) {
  220. s->enabled_capabilities[cap->value->capability] = cap->value->state;
  221. }
  222. }
  223. /* shared migration helpers */
  224. static int migrate_fd_cleanup(MigrationState *s)
  225. {
  226. int ret = 0;
  227. if (s->file) {
  228. DPRINTF("closing file\n");
  229. ret = qemu_fclose(s->file);
  230. s->file = NULL;
  231. }
  232. assert(s->fd == -1);
  233. return ret;
  234. }
  235. void migrate_fd_error(MigrationState *s)
  236. {
  237. DPRINTF("setting error state\n");
  238. s->state = MIG_STATE_ERROR;
  239. notifier_list_notify(&migration_state_notifiers, s);
  240. migrate_fd_cleanup(s);
  241. }
  242. static void migrate_fd_completed(MigrationState *s)
  243. {
  244. DPRINTF("setting completed state\n");
  245. if (migrate_fd_cleanup(s) < 0) {
  246. s->state = MIG_STATE_ERROR;
  247. } else {
  248. s->state = MIG_STATE_COMPLETED;
  249. runstate_set(RUN_STATE_POSTMIGRATE);
  250. }
  251. notifier_list_notify(&migration_state_notifiers, s);
  252. }
  253. static ssize_t migrate_fd_put_buffer(MigrationState *s, const void *data,
  254. size_t size)
  255. {
  256. ssize_t ret;
  257. if (s->state != MIG_STATE_ACTIVE) {
  258. return -EIO;
  259. }
  260. do {
  261. ret = s->write(s, data, size);
  262. } while (ret == -1 && ((s->get_error(s)) == EINTR));
  263. if (ret == -1)
  264. ret = -(s->get_error(s));
  265. return ret;
  266. }
  267. static void migrate_fd_cancel(MigrationState *s)
  268. {
  269. if (s->state != MIG_STATE_ACTIVE)
  270. return;
  271. DPRINTF("cancelling migration\n");
  272. s->state = MIG_STATE_CANCELLED;
  273. notifier_list_notify(&migration_state_notifiers, s);
  274. qemu_savevm_state_cancel();
  275. migrate_fd_cleanup(s);
  276. }
  277. int migrate_fd_close(MigrationState *s)
  278. {
  279. int rc = 0;
  280. if (s->fd != -1) {
  281. rc = s->close(s);
  282. s->fd = -1;
  283. }
  284. return rc;
  285. }
  286. void add_migration_state_change_notifier(Notifier *notify)
  287. {
  288. notifier_list_add(&migration_state_notifiers, notify);
  289. }
  290. void remove_migration_state_change_notifier(Notifier *notify)
  291. {
  292. notifier_remove(notify);
  293. }
  294. bool migration_is_active(MigrationState *s)
  295. {
  296. return s->state == MIG_STATE_ACTIVE;
  297. }
  298. bool migration_has_finished(MigrationState *s)
  299. {
  300. return s->state == MIG_STATE_COMPLETED;
  301. }
  302. bool migration_has_failed(MigrationState *s)
  303. {
  304. return (s->state == MIG_STATE_CANCELLED ||
  305. s->state == MIG_STATE_ERROR);
  306. }
  307. static MigrationState *migrate_init(const MigrationParams *params)
  308. {
  309. MigrationState *s = migrate_get_current();
  310. int64_t bandwidth_limit = s->bandwidth_limit;
  311. bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
  312. int64_t xbzrle_cache_size = s->xbzrle_cache_size;
  313. memcpy(enabled_capabilities, s->enabled_capabilities,
  314. sizeof(enabled_capabilities));
  315. memset(s, 0, sizeof(*s));
  316. s->bandwidth_limit = bandwidth_limit;
  317. s->params = *params;
  318. memcpy(s->enabled_capabilities, enabled_capabilities,
  319. sizeof(enabled_capabilities));
  320. s->xbzrle_cache_size = xbzrle_cache_size;
  321. s->bandwidth_limit = bandwidth_limit;
  322. s->state = MIG_STATE_SETUP;
  323. s->total_time = qemu_get_clock_ms(rt_clock);
  324. return s;
  325. }
  326. static GSList *migration_blockers;
  327. void migrate_add_blocker(Error *reason)
  328. {
  329. migration_blockers = g_slist_prepend(migration_blockers, reason);
  330. }
  331. void migrate_del_blocker(Error *reason)
  332. {
  333. migration_blockers = g_slist_remove(migration_blockers, reason);
  334. }
  335. void qmp_migrate(const char *uri, bool has_blk, bool blk,
  336. bool has_inc, bool inc, bool has_detach, bool detach,
  337. Error **errp)
  338. {
  339. Error *local_err = NULL;
  340. MigrationState *s = migrate_get_current();
  341. MigrationParams params;
  342. const char *p;
  343. params.blk = blk;
  344. params.shared = inc;
  345. if (s->state == MIG_STATE_ACTIVE) {
  346. error_set(errp, QERR_MIGRATION_ACTIVE);
  347. return;
  348. }
  349. if (qemu_savevm_state_blocked(errp)) {
  350. return;
  351. }
  352. if (migration_blockers) {
  353. *errp = error_copy(migration_blockers->data);
  354. return;
  355. }
  356. s = migrate_init(&params);
  357. if (strstart(uri, "tcp:", &p)) {
  358. tcp_start_outgoing_migration(s, p, &local_err);
  359. #if !defined(WIN32)
  360. } else if (strstart(uri, "exec:", &p)) {
  361. exec_start_outgoing_migration(s, p, &local_err);
  362. } else if (strstart(uri, "unix:", &p)) {
  363. unix_start_outgoing_migration(s, p, &local_err);
  364. } else if (strstart(uri, "fd:", &p)) {
  365. fd_start_outgoing_migration(s, p, &local_err);
  366. #endif
  367. } else {
  368. error_set(errp, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol");
  369. return;
  370. }
  371. if (local_err) {
  372. migrate_fd_error(s);
  373. error_propagate(errp, local_err);
  374. return;
  375. }
  376. }
  377. void qmp_migrate_cancel(Error **errp)
  378. {
  379. migrate_fd_cancel(migrate_get_current());
  380. }
  381. void qmp_migrate_set_cache_size(int64_t value, Error **errp)
  382. {
  383. MigrationState *s = migrate_get_current();
  384. /* Check for truncation */
  385. if (value != (size_t)value) {
  386. error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
  387. "exceeding address space");
  388. return;
  389. }
  390. s->xbzrle_cache_size = xbzrle_cache_resize(value);
  391. }
  392. int64_t qmp_query_migrate_cache_size(Error **errp)
  393. {
  394. return migrate_xbzrle_cache_size();
  395. }
  396. void qmp_migrate_set_speed(int64_t value, Error **errp)
  397. {
  398. MigrationState *s;
  399. if (value < 0) {
  400. value = 0;
  401. }
  402. s = migrate_get_current();
  403. s->bandwidth_limit = value;
  404. qemu_file_set_rate_limit(s->file, s->bandwidth_limit);
  405. }
  406. void qmp_migrate_set_downtime(double value, Error **errp)
  407. {
  408. value *= 1e9;
  409. value = MAX(0, MIN(UINT64_MAX, value));
  410. max_downtime = (uint64_t)value;
  411. }
  412. int migrate_use_xbzrle(void)
  413. {
  414. MigrationState *s;
  415. s = migrate_get_current();
  416. return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
  417. }
  418. int64_t migrate_xbzrle_cache_size(void)
  419. {
  420. MigrationState *s;
  421. s = migrate_get_current();
  422. return s->xbzrle_cache_size;
  423. }
  424. /* migration thread support */
  425. static ssize_t buffered_flush(MigrationState *s)
  426. {
  427. size_t offset = 0;
  428. ssize_t ret = 0;
  429. DPRINTF("flushing %zu byte(s) of data\n", s->buffer_size);
  430. while (s->bytes_xfer < s->xfer_limit && offset < s->buffer_size) {
  431. size_t to_send = MIN(s->buffer_size - offset, s->xfer_limit - s->bytes_xfer);
  432. ret = migrate_fd_put_buffer(s, s->buffer + offset, to_send);
  433. if (ret <= 0) {
  434. DPRINTF("error flushing data, %zd\n", ret);
  435. break;
  436. } else {
  437. DPRINTF("flushed %zd byte(s)\n", ret);
  438. offset += ret;
  439. s->bytes_xfer += ret;
  440. }
  441. }
  442. DPRINTF("flushed %zu of %zu byte(s)\n", offset, s->buffer_size);
  443. memmove(s->buffer, s->buffer + offset, s->buffer_size - offset);
  444. s->buffer_size -= offset;
  445. if (ret < 0) {
  446. return ret;
  447. }
  448. return offset;
  449. }
  450. static int buffered_put_buffer(void *opaque, const uint8_t *buf,
  451. int64_t pos, int size)
  452. {
  453. MigrationState *s = opaque;
  454. ssize_t error;
  455. DPRINTF("putting %d bytes at %" PRId64 "\n", size, pos);
  456. error = qemu_file_get_error(s->file);
  457. if (error) {
  458. DPRINTF("flush when error, bailing: %s\n", strerror(-error));
  459. return error;
  460. }
  461. if (size <= 0) {
  462. return size;
  463. }
  464. if (size > (s->buffer_capacity - s->buffer_size)) {
  465. DPRINTF("increasing buffer capacity from %zu by %zu\n",
  466. s->buffer_capacity, size + 1024);
  467. s->buffer_capacity += size + 1024;
  468. s->buffer = g_realloc(s->buffer, s->buffer_capacity);
  469. }
  470. memcpy(s->buffer + s->buffer_size, buf, size);
  471. s->buffer_size += size;
  472. return size;
  473. }
  474. static int buffered_close(void *opaque)
  475. {
  476. MigrationState *s = opaque;
  477. ssize_t ret = 0;
  478. int ret2;
  479. DPRINTF("closing\n");
  480. s->xfer_limit = INT_MAX;
  481. while (!qemu_file_get_error(s->file) && s->buffer_size) {
  482. ret = buffered_flush(s);
  483. if (ret < 0) {
  484. break;
  485. }
  486. }
  487. ret2 = migrate_fd_close(s);
  488. if (ret >= 0) {
  489. ret = ret2;
  490. }
  491. s->complete = true;
  492. return ret;
  493. }
  494. static int buffered_get_fd(void *opaque)
  495. {
  496. MigrationState *s = opaque;
  497. return s->fd;
  498. }
  499. /*
  500. * The meaning of the return values is:
  501. * 0: We can continue sending
  502. * 1: Time to stop
  503. * negative: There has been an error
  504. */
  505. static int buffered_rate_limit(void *opaque)
  506. {
  507. MigrationState *s = opaque;
  508. int ret;
  509. ret = qemu_file_get_error(s->file);
  510. if (ret) {
  511. return ret;
  512. }
  513. if (s->bytes_xfer >= s->xfer_limit) {
  514. return 1;
  515. }
  516. return 0;
  517. }
  518. static int64_t buffered_set_rate_limit(void *opaque, int64_t new_rate)
  519. {
  520. MigrationState *s = opaque;
  521. if (qemu_file_get_error(s->file)) {
  522. goto out;
  523. }
  524. if (new_rate > SIZE_MAX) {
  525. new_rate = SIZE_MAX;
  526. }
  527. s->xfer_limit = new_rate / XFER_LIMIT_RATIO;
  528. out:
  529. return s->xfer_limit;
  530. }
  531. static int64_t buffered_get_rate_limit(void *opaque)
  532. {
  533. MigrationState *s = opaque;
  534. return s->xfer_limit;
  535. }
  536. static void *buffered_file_thread(void *opaque)
  537. {
  538. MigrationState *s = opaque;
  539. int64_t initial_time = qemu_get_clock_ms(rt_clock);
  540. int64_t max_size = 0;
  541. bool last_round = false;
  542. int ret;
  543. qemu_mutex_lock_iothread();
  544. DPRINTF("beginning savevm\n");
  545. ret = qemu_savevm_state_begin(s->file, &s->params);
  546. if (ret < 0) {
  547. DPRINTF("failed, %d\n", ret);
  548. qemu_mutex_unlock_iothread();
  549. goto out;
  550. }
  551. qemu_mutex_unlock_iothread();
  552. while (true) {
  553. int64_t current_time = qemu_get_clock_ms(rt_clock);
  554. uint64_t pending_size;
  555. qemu_mutex_lock_iothread();
  556. if (s->state != MIG_STATE_ACTIVE) {
  557. DPRINTF("put_ready returning because of non-active state\n");
  558. qemu_mutex_unlock_iothread();
  559. break;
  560. }
  561. if (s->complete) {
  562. qemu_mutex_unlock_iothread();
  563. break;
  564. }
  565. if (s->bytes_xfer < s->xfer_limit) {
  566. DPRINTF("iterate\n");
  567. pending_size = qemu_savevm_state_pending(s->file, max_size);
  568. DPRINTF("pending size %lu max %lu\n", pending_size, max_size);
  569. if (pending_size && pending_size >= max_size) {
  570. ret = qemu_savevm_state_iterate(s->file);
  571. if (ret < 0) {
  572. qemu_mutex_unlock_iothread();
  573. break;
  574. }
  575. } else {
  576. int old_vm_running = runstate_is_running();
  577. int64_t start_time, end_time;
  578. DPRINTF("done iterating\n");
  579. start_time = qemu_get_clock_ms(rt_clock);
  580. qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
  581. if (old_vm_running) {
  582. vm_stop(RUN_STATE_FINISH_MIGRATE);
  583. } else {
  584. vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
  585. }
  586. ret = qemu_savevm_state_complete(s->file);
  587. if (ret < 0) {
  588. qemu_mutex_unlock_iothread();
  589. break;
  590. } else {
  591. migrate_fd_completed(s);
  592. }
  593. end_time = qemu_get_clock_ms(rt_clock);
  594. s->total_time = end_time - s->total_time;
  595. s->downtime = end_time - start_time;
  596. if (s->state != MIG_STATE_COMPLETED) {
  597. if (old_vm_running) {
  598. vm_start();
  599. }
  600. }
  601. last_round = true;
  602. }
  603. }
  604. qemu_mutex_unlock_iothread();
  605. if (current_time >= initial_time + BUFFER_DELAY) {
  606. uint64_t transferred_bytes = s->bytes_xfer;
  607. uint64_t time_spent = current_time - initial_time;
  608. double bandwidth = transferred_bytes / time_spent;
  609. max_size = bandwidth * migrate_max_downtime() / 1000000;
  610. DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64
  611. " bandwidth %g max_size %" PRId64 "\n",
  612. transferred_bytes, time_spent, bandwidth, max_size);
  613. s->bytes_xfer = 0;
  614. initial_time = current_time;
  615. }
  616. if (!last_round && (s->bytes_xfer >= s->xfer_limit)) {
  617. /* usleep expects microseconds */
  618. g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
  619. }
  620. ret = buffered_flush(s);
  621. if (ret < 0) {
  622. break;
  623. }
  624. }
  625. out:
  626. if (ret < 0) {
  627. migrate_fd_error(s);
  628. }
  629. g_free(s->buffer);
  630. return NULL;
  631. }
  632. static const QEMUFileOps buffered_file_ops = {
  633. .get_fd = buffered_get_fd,
  634. .put_buffer = buffered_put_buffer,
  635. .close = buffered_close,
  636. .rate_limit = buffered_rate_limit,
  637. .get_rate_limit = buffered_get_rate_limit,
  638. .set_rate_limit = buffered_set_rate_limit,
  639. };
  640. void migrate_fd_connect(MigrationState *s)
  641. {
  642. s->state = MIG_STATE_ACTIVE;
  643. s->bytes_xfer = 0;
  644. s->buffer = NULL;
  645. s->buffer_size = 0;
  646. s->buffer_capacity = 0;
  647. s->xfer_limit = s->bandwidth_limit / XFER_LIMIT_RATIO;
  648. s->complete = false;
  649. s->file = qemu_fopen_ops(s, &buffered_file_ops);
  650. qemu_thread_create(&s->thread, buffered_file_thread, s,
  651. QEMU_THREAD_DETACHED);
  652. notifier_list_notify(&migration_state_notifiers, s);
  653. }