2
0

migration.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * QEMU live migration
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "qemu-common.h"
  16. #include "qemu/main-loop.h"
  17. #include "migration/migration.h"
  18. #include "monitor/monitor.h"
  19. #include "migration/qemu-file.h"
  20. #include "sysemu/sysemu.h"
  21. #include "block/block.h"
  22. #include "qemu/sockets.h"
  23. #include "migration/block.h"
  24. #include "qemu/thread.h"
  25. #include "qmp-commands.h"
  26. #include "trace.h"
  27. enum {
  28. MIG_STATE_ERROR = -1,
  29. MIG_STATE_NONE,
  30. MIG_STATE_SETUP,
  31. MIG_STATE_CANCELLING,
  32. MIG_STATE_CANCELLED,
  33. MIG_STATE_ACTIVE,
  34. MIG_STATE_COMPLETED,
  35. };
  36. #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
  37. /* Amount of time to allocate to each "chunk" of bandwidth-throttled
  38. * data. */
  39. #define BUFFER_DELAY 100
  40. #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
  41. /* Migration XBZRLE default cache size */
  42. #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
  43. static NotifierList migration_state_notifiers =
  44. NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
  45. /* When we add fault tolerance, we could have several
  46. migrations at once. For now we don't need to add
  47. dynamic creation of migration */
  48. MigrationState *migrate_get_current(void)
  49. {
  50. static MigrationState current_migration = {
  51. .state = MIG_STATE_NONE,
  52. .bandwidth_limit = MAX_THROTTLE,
  53. .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
  54. .mbps = -1,
  55. };
  56. return &current_migration;
  57. }
  58. void qemu_start_incoming_migration(const char *uri, Error **errp)
  59. {
  60. const char *p;
  61. if (strstart(uri, "tcp:", &p))
  62. tcp_start_incoming_migration(p, errp);
  63. #ifdef CONFIG_RDMA
  64. else if (strstart(uri, "rdma:", &p))
  65. rdma_start_incoming_migration(p, errp);
  66. #endif
  67. #if !defined(WIN32)
  68. else if (strstart(uri, "exec:", &p))
  69. exec_start_incoming_migration(p, errp);
  70. else if (strstart(uri, "unix:", &p))
  71. unix_start_incoming_migration(p, errp);
  72. else if (strstart(uri, "fd:", &p))
  73. fd_start_incoming_migration(p, errp);
  74. #endif
  75. else {
  76. error_setg(errp, "unknown migration protocol: %s", uri);
  77. }
  78. }
  79. static void process_incoming_migration_co(void *opaque)
  80. {
  81. QEMUFile *f = opaque;
  82. Error *local_err = NULL;
  83. int ret;
  84. ret = qemu_loadvm_state(f);
  85. qemu_fclose(f);
  86. free_xbzrle_decoded_buf();
  87. if (ret < 0) {
  88. error_report("load of migration failed: %s", strerror(-ret));
  89. exit(EXIT_FAILURE);
  90. }
  91. qemu_announce_self();
  92. /* Make sure all file formats flush their mutable metadata */
  93. bdrv_invalidate_cache_all(&local_err);
  94. if (local_err) {
  95. qerror_report_err(local_err);
  96. error_free(local_err);
  97. exit(EXIT_FAILURE);
  98. }
  99. if (autostart) {
  100. vm_start();
  101. } else {
  102. runstate_set(RUN_STATE_PAUSED);
  103. }
  104. }
  105. void process_incoming_migration(QEMUFile *f)
  106. {
  107. Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
  108. int fd = qemu_get_fd(f);
  109. assert(fd != -1);
  110. qemu_set_nonblock(fd);
  111. qemu_coroutine_enter(co, f);
  112. }
  113. /* amount of nanoseconds we are willing to wait for migration to be down.
  114. * the choice of nanoseconds is because it is the maximum resolution that
  115. * get_clock() can achieve. It is an internal measure. All user-visible
  116. * units must be in seconds */
  117. static uint64_t max_downtime = 300000000;
  118. uint64_t migrate_max_downtime(void)
  119. {
  120. return max_downtime;
  121. }
  122. MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
  123. {
  124. MigrationCapabilityStatusList *head = NULL;
  125. MigrationCapabilityStatusList *caps;
  126. MigrationState *s = migrate_get_current();
  127. int i;
  128. caps = NULL; /* silence compiler warning */
  129. for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
  130. if (head == NULL) {
  131. head = g_malloc0(sizeof(*caps));
  132. caps = head;
  133. } else {
  134. caps->next = g_malloc0(sizeof(*caps));
  135. caps = caps->next;
  136. }
  137. caps->value =
  138. g_malloc(sizeof(*caps->value));
  139. caps->value->capability = i;
  140. caps->value->state = s->enabled_capabilities[i];
  141. }
  142. return head;
  143. }
  144. static void get_xbzrle_cache_stats(MigrationInfo *info)
  145. {
  146. if (migrate_use_xbzrle()) {
  147. info->has_xbzrle_cache = true;
  148. info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
  149. info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
  150. info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
  151. info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
  152. info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
  153. info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
  154. info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
  155. }
  156. }
  157. MigrationInfo *qmp_query_migrate(Error **errp)
  158. {
  159. MigrationInfo *info = g_malloc0(sizeof(*info));
  160. MigrationState *s = migrate_get_current();
  161. switch (s->state) {
  162. case MIG_STATE_NONE:
  163. /* no migration has happened ever */
  164. break;
  165. case MIG_STATE_SETUP:
  166. info->has_status = true;
  167. info->status = g_strdup("setup");
  168. info->has_total_time = false;
  169. break;
  170. case MIG_STATE_ACTIVE:
  171. case MIG_STATE_CANCELLING:
  172. info->has_status = true;
  173. info->status = g_strdup("active");
  174. info->has_total_time = true;
  175. info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
  176. - s->total_time;
  177. info->has_expected_downtime = true;
  178. info->expected_downtime = s->expected_downtime;
  179. info->has_setup_time = true;
  180. info->setup_time = s->setup_time;
  181. info->has_ram = true;
  182. info->ram = g_malloc0(sizeof(*info->ram));
  183. info->ram->transferred = ram_bytes_transferred();
  184. info->ram->remaining = ram_bytes_remaining();
  185. info->ram->total = ram_bytes_total();
  186. info->ram->duplicate = dup_mig_pages_transferred();
  187. info->ram->skipped = skipped_mig_pages_transferred();
  188. info->ram->normal = norm_mig_pages_transferred();
  189. info->ram->normal_bytes = norm_mig_bytes_transferred();
  190. info->ram->dirty_pages_rate = s->dirty_pages_rate;
  191. info->ram->mbps = s->mbps;
  192. info->ram->dirty_sync_count = s->dirty_sync_count;
  193. if (blk_mig_active()) {
  194. info->has_disk = true;
  195. info->disk = g_malloc0(sizeof(*info->disk));
  196. info->disk->transferred = blk_mig_bytes_transferred();
  197. info->disk->remaining = blk_mig_bytes_remaining();
  198. info->disk->total = blk_mig_bytes_total();
  199. }
  200. get_xbzrle_cache_stats(info);
  201. break;
  202. case MIG_STATE_COMPLETED:
  203. get_xbzrle_cache_stats(info);
  204. info->has_status = true;
  205. info->status = g_strdup("completed");
  206. info->has_total_time = true;
  207. info->total_time = s->total_time;
  208. info->has_downtime = true;
  209. info->downtime = s->downtime;
  210. info->has_setup_time = true;
  211. info->setup_time = s->setup_time;
  212. info->has_ram = true;
  213. info->ram = g_malloc0(sizeof(*info->ram));
  214. info->ram->transferred = ram_bytes_transferred();
  215. info->ram->remaining = 0;
  216. info->ram->total = ram_bytes_total();
  217. info->ram->duplicate = dup_mig_pages_transferred();
  218. info->ram->skipped = skipped_mig_pages_transferred();
  219. info->ram->normal = norm_mig_pages_transferred();
  220. info->ram->normal_bytes = norm_mig_bytes_transferred();
  221. info->ram->mbps = s->mbps;
  222. info->ram->dirty_sync_count = s->dirty_sync_count;
  223. break;
  224. case MIG_STATE_ERROR:
  225. info->has_status = true;
  226. info->status = g_strdup("failed");
  227. break;
  228. case MIG_STATE_CANCELLED:
  229. info->has_status = true;
  230. info->status = g_strdup("cancelled");
  231. break;
  232. }
  233. return info;
  234. }
  235. void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
  236. Error **errp)
  237. {
  238. MigrationState *s = migrate_get_current();
  239. MigrationCapabilityStatusList *cap;
  240. if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP) {
  241. error_set(errp, QERR_MIGRATION_ACTIVE);
  242. return;
  243. }
  244. for (cap = params; cap; cap = cap->next) {
  245. s->enabled_capabilities[cap->value->capability] = cap->value->state;
  246. }
  247. }
  248. /* shared migration helpers */
  249. static void migrate_set_state(MigrationState *s, int old_state, int new_state)
  250. {
  251. if (atomic_cmpxchg(&s->state, old_state, new_state) == new_state) {
  252. trace_migrate_set_state(new_state);
  253. }
  254. }
  255. static void migrate_fd_cleanup(void *opaque)
  256. {
  257. MigrationState *s = opaque;
  258. qemu_bh_delete(s->cleanup_bh);
  259. s->cleanup_bh = NULL;
  260. if (s->file) {
  261. trace_migrate_fd_cleanup();
  262. qemu_mutex_unlock_iothread();
  263. qemu_thread_join(&s->thread);
  264. qemu_mutex_lock_iothread();
  265. qemu_fclose(s->file);
  266. s->file = NULL;
  267. }
  268. assert(s->state != MIG_STATE_ACTIVE);
  269. if (s->state != MIG_STATE_COMPLETED) {
  270. qemu_savevm_state_cancel();
  271. if (s->state == MIG_STATE_CANCELLING) {
  272. migrate_set_state(s, MIG_STATE_CANCELLING, MIG_STATE_CANCELLED);
  273. }
  274. }
  275. notifier_list_notify(&migration_state_notifiers, s);
  276. }
  277. void migrate_fd_error(MigrationState *s)
  278. {
  279. trace_migrate_fd_error();
  280. assert(s->file == NULL);
  281. s->state = MIG_STATE_ERROR;
  282. trace_migrate_set_state(MIG_STATE_ERROR);
  283. notifier_list_notify(&migration_state_notifiers, s);
  284. }
  285. static void migrate_fd_cancel(MigrationState *s)
  286. {
  287. int old_state ;
  288. trace_migrate_fd_cancel();
  289. do {
  290. old_state = s->state;
  291. if (old_state != MIG_STATE_SETUP && old_state != MIG_STATE_ACTIVE) {
  292. break;
  293. }
  294. migrate_set_state(s, old_state, MIG_STATE_CANCELLING);
  295. } while (s->state != MIG_STATE_CANCELLING);
  296. }
  297. void add_migration_state_change_notifier(Notifier *notify)
  298. {
  299. notifier_list_add(&migration_state_notifiers, notify);
  300. }
  301. void remove_migration_state_change_notifier(Notifier *notify)
  302. {
  303. notifier_remove(notify);
  304. }
  305. bool migration_in_setup(MigrationState *s)
  306. {
  307. return s->state == MIG_STATE_SETUP;
  308. }
  309. bool migration_has_finished(MigrationState *s)
  310. {
  311. return s->state == MIG_STATE_COMPLETED;
  312. }
  313. bool migration_has_failed(MigrationState *s)
  314. {
  315. return (s->state == MIG_STATE_CANCELLED ||
  316. s->state == MIG_STATE_ERROR);
  317. }
  318. static MigrationState *migrate_init(const MigrationParams *params)
  319. {
  320. MigrationState *s = migrate_get_current();
  321. int64_t bandwidth_limit = s->bandwidth_limit;
  322. bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
  323. int64_t xbzrle_cache_size = s->xbzrle_cache_size;
  324. memcpy(enabled_capabilities, s->enabled_capabilities,
  325. sizeof(enabled_capabilities));
  326. memset(s, 0, sizeof(*s));
  327. s->params = *params;
  328. memcpy(s->enabled_capabilities, enabled_capabilities,
  329. sizeof(enabled_capabilities));
  330. s->xbzrle_cache_size = xbzrle_cache_size;
  331. s->bandwidth_limit = bandwidth_limit;
  332. s->state = MIG_STATE_SETUP;
  333. trace_migrate_set_state(MIG_STATE_SETUP);
  334. s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  335. return s;
  336. }
  337. static GSList *migration_blockers;
  338. void migrate_add_blocker(Error *reason)
  339. {
  340. migration_blockers = g_slist_prepend(migration_blockers, reason);
  341. }
  342. void migrate_del_blocker(Error *reason)
  343. {
  344. migration_blockers = g_slist_remove(migration_blockers, reason);
  345. }
  346. void qmp_migrate(const char *uri, bool has_blk, bool blk,
  347. bool has_inc, bool inc, bool has_detach, bool detach,
  348. Error **errp)
  349. {
  350. Error *local_err = NULL;
  351. MigrationState *s = migrate_get_current();
  352. MigrationParams params;
  353. const char *p;
  354. params.blk = has_blk && blk;
  355. params.shared = has_inc && inc;
  356. if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP ||
  357. s->state == MIG_STATE_CANCELLING) {
  358. error_set(errp, QERR_MIGRATION_ACTIVE);
  359. return;
  360. }
  361. if (runstate_check(RUN_STATE_INMIGRATE)) {
  362. error_setg(errp, "Guest is waiting for an incoming migration");
  363. return;
  364. }
  365. if (qemu_savevm_state_blocked(errp)) {
  366. return;
  367. }
  368. if (migration_blockers) {
  369. *errp = error_copy(migration_blockers->data);
  370. return;
  371. }
  372. s = migrate_init(&params);
  373. if (strstart(uri, "tcp:", &p)) {
  374. tcp_start_outgoing_migration(s, p, &local_err);
  375. #ifdef CONFIG_RDMA
  376. } else if (strstart(uri, "rdma:", &p)) {
  377. rdma_start_outgoing_migration(s, p, &local_err);
  378. #endif
  379. #if !defined(WIN32)
  380. } else if (strstart(uri, "exec:", &p)) {
  381. exec_start_outgoing_migration(s, p, &local_err);
  382. } else if (strstart(uri, "unix:", &p)) {
  383. unix_start_outgoing_migration(s, p, &local_err);
  384. } else if (strstart(uri, "fd:", &p)) {
  385. fd_start_outgoing_migration(s, p, &local_err);
  386. #endif
  387. } else {
  388. error_set(errp, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol");
  389. s->state = MIG_STATE_ERROR;
  390. return;
  391. }
  392. if (local_err) {
  393. migrate_fd_error(s);
  394. error_propagate(errp, local_err);
  395. return;
  396. }
  397. }
  398. void qmp_migrate_cancel(Error **errp)
  399. {
  400. migrate_fd_cancel(migrate_get_current());
  401. }
  402. void qmp_migrate_set_cache_size(int64_t value, Error **errp)
  403. {
  404. MigrationState *s = migrate_get_current();
  405. int64_t new_size;
  406. /* Check for truncation */
  407. if (value != (size_t)value) {
  408. error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
  409. "exceeding address space");
  410. return;
  411. }
  412. /* Cache should not be larger than guest ram size */
  413. if (value > ram_bytes_total()) {
  414. error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
  415. "exceeds guest ram size ");
  416. return;
  417. }
  418. new_size = xbzrle_cache_resize(value);
  419. if (new_size < 0) {
  420. error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
  421. "is smaller than page size");
  422. return;
  423. }
  424. s->xbzrle_cache_size = new_size;
  425. }
  426. int64_t qmp_query_migrate_cache_size(Error **errp)
  427. {
  428. return migrate_xbzrle_cache_size();
  429. }
  430. void qmp_migrate_set_speed(int64_t value, Error **errp)
  431. {
  432. MigrationState *s;
  433. if (value < 0) {
  434. value = 0;
  435. }
  436. if (value > SIZE_MAX) {
  437. value = SIZE_MAX;
  438. }
  439. s = migrate_get_current();
  440. s->bandwidth_limit = value;
  441. if (s->file) {
  442. qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
  443. }
  444. }
  445. void qmp_migrate_set_downtime(double value, Error **errp)
  446. {
  447. value *= 1e9;
  448. value = MAX(0, MIN(UINT64_MAX, value));
  449. max_downtime = (uint64_t)value;
  450. }
  451. bool migrate_rdma_pin_all(void)
  452. {
  453. MigrationState *s;
  454. s = migrate_get_current();
  455. return s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
  456. }
  457. bool migrate_auto_converge(void)
  458. {
  459. MigrationState *s;
  460. s = migrate_get_current();
  461. return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
  462. }
  463. bool migrate_zero_blocks(void)
  464. {
  465. MigrationState *s;
  466. s = migrate_get_current();
  467. return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
  468. }
  469. int migrate_use_xbzrle(void)
  470. {
  471. MigrationState *s;
  472. s = migrate_get_current();
  473. return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
  474. }
  475. int64_t migrate_xbzrle_cache_size(void)
  476. {
  477. MigrationState *s;
  478. s = migrate_get_current();
  479. return s->xbzrle_cache_size;
  480. }
  481. /* migration thread support */
  482. static void *migration_thread(void *opaque)
  483. {
  484. MigrationState *s = opaque;
  485. int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  486. int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
  487. int64_t initial_bytes = 0;
  488. int64_t max_size = 0;
  489. int64_t start_time = initial_time;
  490. bool old_vm_running = false;
  491. qemu_savevm_state_begin(s->file, &s->params);
  492. s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
  493. migrate_set_state(s, MIG_STATE_SETUP, MIG_STATE_ACTIVE);
  494. while (s->state == MIG_STATE_ACTIVE) {
  495. int64_t current_time;
  496. uint64_t pending_size;
  497. if (!qemu_file_rate_limit(s->file)) {
  498. pending_size = qemu_savevm_state_pending(s->file, max_size);
  499. trace_migrate_pending(pending_size, max_size);
  500. if (pending_size && pending_size >= max_size) {
  501. qemu_savevm_state_iterate(s->file);
  502. } else {
  503. int ret;
  504. qemu_mutex_lock_iothread();
  505. start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  506. qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
  507. old_vm_running = runstate_is_running();
  508. ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
  509. if (ret >= 0) {
  510. qemu_file_set_rate_limit(s->file, INT64_MAX);
  511. qemu_savevm_state_complete(s->file);
  512. }
  513. qemu_mutex_unlock_iothread();
  514. if (ret < 0) {
  515. migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
  516. break;
  517. }
  518. if (!qemu_file_get_error(s->file)) {
  519. migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_COMPLETED);
  520. break;
  521. }
  522. }
  523. }
  524. if (qemu_file_get_error(s->file)) {
  525. migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
  526. break;
  527. }
  528. current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  529. if (current_time >= initial_time + BUFFER_DELAY) {
  530. uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
  531. uint64_t time_spent = current_time - initial_time;
  532. double bandwidth = transferred_bytes / time_spent;
  533. max_size = bandwidth * migrate_max_downtime() / 1000000;
  534. s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
  535. ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
  536. trace_migrate_transferred(transferred_bytes, time_spent,
  537. bandwidth, max_size);
  538. /* if we haven't sent anything, we don't want to recalculate
  539. 10000 is a small enough number for our purposes */
  540. if (s->dirty_bytes_rate && transferred_bytes > 10000) {
  541. s->expected_downtime = s->dirty_bytes_rate / bandwidth;
  542. }
  543. qemu_file_reset_rate_limit(s->file);
  544. initial_time = current_time;
  545. initial_bytes = qemu_ftell(s->file);
  546. }
  547. if (qemu_file_rate_limit(s->file)) {
  548. /* usleep expects microseconds */
  549. g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
  550. }
  551. }
  552. qemu_mutex_lock_iothread();
  553. if (s->state == MIG_STATE_COMPLETED) {
  554. int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  555. uint64_t transferred_bytes = qemu_ftell(s->file);
  556. s->total_time = end_time - s->total_time;
  557. s->downtime = end_time - start_time;
  558. if (s->total_time) {
  559. s->mbps = (((double) transferred_bytes * 8.0) /
  560. ((double) s->total_time)) / 1000;
  561. }
  562. runstate_set(RUN_STATE_POSTMIGRATE);
  563. } else {
  564. if (old_vm_running) {
  565. vm_start();
  566. }
  567. }
  568. qemu_bh_schedule(s->cleanup_bh);
  569. qemu_mutex_unlock_iothread();
  570. return NULL;
  571. }
  572. void migrate_fd_connect(MigrationState *s)
  573. {
  574. s->state = MIG_STATE_SETUP;
  575. trace_migrate_set_state(MIG_STATE_SETUP);
  576. /* This is a best 1st approximation. ns to ms */
  577. s->expected_downtime = max_downtime/1000000;
  578. s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
  579. qemu_file_set_rate_limit(s->file,
  580. s->bandwidth_limit / XFER_LIMIT_RATIO);
  581. /* Notify before starting migration thread */
  582. notifier_list_notify(&migration_state_notifiers, s);
  583. qemu_thread_create(&s->thread, "migration", migration_thread, s,
  584. QEMU_THREAD_JOINABLE);
  585. }