2
0

migration.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781
  1. /*
  2. * QEMU live migration
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "qemu-common.h"
  16. #include "qemu/error-report.h"
  17. #include "qemu/main-loop.h"
  18. #include "migration/migration.h"
  19. #include "migration/qemu-file.h"
  20. #include "sysemu/sysemu.h"
  21. #include "block/block.h"
  22. #include "qapi/qmp/qerror.h"
  23. #include "qapi/util.h"
  24. #include "qemu/sockets.h"
  25. #include "qemu/rcu.h"
  26. #include "migration/block.h"
  27. #include "migration/postcopy-ram.h"
  28. #include "qemu/thread.h"
  29. #include "qmp-commands.h"
  30. #include "trace.h"
  31. #include "qapi-event.h"
  32. #include "qom/cpu.h"
  33. #include "exec/memory.h"
  34. #include "exec/address-spaces.h"
  35. #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
  36. /* Amount of time to allocate to each "chunk" of bandwidth-throttled
  37. * data. */
  38. #define BUFFER_DELAY 100
  39. #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
  40. /* Default compression thread count */
  41. #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
  42. /* Default decompression thread count, usually decompression is at
  43. * least 4 times as fast as compression.*/
  44. #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
  45. /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
  46. #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
  47. /* Define default autoconverge cpu throttle migration parameters */
  48. #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL 20
  49. #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT 10
  50. /* Migration XBZRLE default cache size */
  51. #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
  52. static NotifierList migration_state_notifiers =
  53. NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
  54. static bool deferred_incoming;
  55. /*
  56. * Current state of incoming postcopy; note this is not part of
  57. * MigrationIncomingState since it's state is used during cleanup
  58. * at the end as MIS is being freed.
  59. */
  60. static PostcopyState incoming_postcopy_state;
  61. /* When we add fault tolerance, we could have several
  62. migrations at once. For now we don't need to add
  63. dynamic creation of migration */
  64. /* For outgoing */
  65. MigrationState *migrate_get_current(void)
  66. {
  67. static bool once;
  68. static MigrationState current_migration = {
  69. .state = MIGRATION_STATUS_NONE,
  70. .bandwidth_limit = MAX_THROTTLE,
  71. .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
  72. .mbps = -1,
  73. .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
  74. DEFAULT_MIGRATE_COMPRESS_LEVEL,
  75. .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
  76. DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
  77. .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
  78. DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
  79. .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
  80. DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL,
  81. .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
  82. DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT,
  83. };
  84. if (!once) {
  85. qemu_mutex_init(&current_migration.src_page_req_mutex);
  86. once = true;
  87. }
  88. return &current_migration;
  89. }
  90. /* For incoming */
  91. static MigrationIncomingState *mis_current;
  92. MigrationIncomingState *migration_incoming_get_current(void)
  93. {
  94. return mis_current;
  95. }
  96. MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
  97. {
  98. mis_current = g_new0(MigrationIncomingState, 1);
  99. mis_current->from_src_file = f;
  100. QLIST_INIT(&mis_current->loadvm_handlers);
  101. qemu_mutex_init(&mis_current->rp_mutex);
  102. qemu_event_init(&mis_current->main_thread_load_event, false);
  103. return mis_current;
  104. }
  105. void migration_incoming_state_destroy(void)
  106. {
  107. qemu_event_destroy(&mis_current->main_thread_load_event);
  108. loadvm_free_handlers(mis_current);
  109. g_free(mis_current);
  110. mis_current = NULL;
  111. }
  112. typedef struct {
  113. bool optional;
  114. uint32_t size;
  115. uint8_t runstate[100];
  116. RunState state;
  117. bool received;
  118. } GlobalState;
  119. static GlobalState global_state;
  120. int global_state_store(void)
  121. {
  122. if (!runstate_store((char *)global_state.runstate,
  123. sizeof(global_state.runstate))) {
  124. error_report("runstate name too big: %s", global_state.runstate);
  125. trace_migrate_state_too_big();
  126. return -EINVAL;
  127. }
  128. return 0;
  129. }
  130. void global_state_store_running(void)
  131. {
  132. const char *state = RunState_lookup[RUN_STATE_RUNNING];
  133. strncpy((char *)global_state.runstate,
  134. state, sizeof(global_state.runstate));
  135. }
  136. static bool global_state_received(void)
  137. {
  138. return global_state.received;
  139. }
  140. static RunState global_state_get_runstate(void)
  141. {
  142. return global_state.state;
  143. }
  144. void global_state_set_optional(void)
  145. {
  146. global_state.optional = true;
  147. }
  148. static bool global_state_needed(void *opaque)
  149. {
  150. GlobalState *s = opaque;
  151. char *runstate = (char *)s->runstate;
  152. /* If it is not optional, it is mandatory */
  153. if (s->optional == false) {
  154. return true;
  155. }
  156. /* If state is running or paused, it is not needed */
  157. if (strcmp(runstate, "running") == 0 ||
  158. strcmp(runstate, "paused") == 0) {
  159. return false;
  160. }
  161. /* for any other state it is needed */
  162. return true;
  163. }
  164. static int global_state_post_load(void *opaque, int version_id)
  165. {
  166. GlobalState *s = opaque;
  167. Error *local_err = NULL;
  168. int r;
  169. char *runstate = (char *)s->runstate;
  170. s->received = true;
  171. trace_migrate_global_state_post_load(runstate);
  172. r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE_MAX,
  173. -1, &local_err);
  174. if (r == -1) {
  175. if (local_err) {
  176. error_report_err(local_err);
  177. }
  178. return -EINVAL;
  179. }
  180. s->state = r;
  181. return 0;
  182. }
  183. static void global_state_pre_save(void *opaque)
  184. {
  185. GlobalState *s = opaque;
  186. trace_migrate_global_state_pre_save((char *)s->runstate);
  187. s->size = strlen((char *)s->runstate) + 1;
  188. }
  189. static const VMStateDescription vmstate_globalstate = {
  190. .name = "globalstate",
  191. .version_id = 1,
  192. .minimum_version_id = 1,
  193. .post_load = global_state_post_load,
  194. .pre_save = global_state_pre_save,
  195. .needed = global_state_needed,
  196. .fields = (VMStateField[]) {
  197. VMSTATE_UINT32(size, GlobalState),
  198. VMSTATE_BUFFER(runstate, GlobalState),
  199. VMSTATE_END_OF_LIST()
  200. },
  201. };
  202. void register_global_state(void)
  203. {
  204. /* We would use it independently that we receive it */
  205. strcpy((char *)&global_state.runstate, "");
  206. global_state.received = false;
  207. vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
  208. }
  209. static void migrate_generate_event(int new_state)
  210. {
  211. if (migrate_use_events()) {
  212. qapi_event_send_migration(new_state, &error_abort);
  213. }
  214. }
  215. /*
  216. * Called on -incoming with a defer: uri.
  217. * The migration can be started later after any parameters have been
  218. * changed.
  219. */
  220. static void deferred_incoming_migration(Error **errp)
  221. {
  222. if (deferred_incoming) {
  223. error_setg(errp, "Incoming migration already deferred");
  224. }
  225. deferred_incoming = true;
  226. }
  227. /* Request a range of pages from the source VM at the given
  228. * start address.
  229. * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
  230. * as the last request (a name must have been given previously)
  231. * Start: Address offset within the RB
  232. * Len: Length in bytes required - must be a multiple of pagesize
  233. */
  234. void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
  235. ram_addr_t start, size_t len)
  236. {
  237. uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname upto 256 */
  238. size_t msglen = 12; /* start + len */
  239. *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
  240. *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
  241. if (rbname) {
  242. int rbname_len = strlen(rbname);
  243. assert(rbname_len < 256);
  244. bufc[msglen++] = rbname_len;
  245. memcpy(bufc + msglen, rbname, rbname_len);
  246. msglen += rbname_len;
  247. migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
  248. } else {
  249. migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
  250. }
  251. }
  252. void qemu_start_incoming_migration(const char *uri, Error **errp)
  253. {
  254. const char *p;
  255. qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
  256. if (!strcmp(uri, "defer")) {
  257. deferred_incoming_migration(errp);
  258. } else if (strstart(uri, "tcp:", &p)) {
  259. tcp_start_incoming_migration(p, errp);
  260. #ifdef CONFIG_RDMA
  261. } else if (strstart(uri, "rdma:", &p)) {
  262. rdma_start_incoming_migration(p, errp);
  263. #endif
  264. #if !defined(WIN32)
  265. } else if (strstart(uri, "exec:", &p)) {
  266. exec_start_incoming_migration(p, errp);
  267. } else if (strstart(uri, "unix:", &p)) {
  268. unix_start_incoming_migration(p, errp);
  269. } else if (strstart(uri, "fd:", &p)) {
  270. fd_start_incoming_migration(p, errp);
  271. #endif
  272. } else {
  273. error_setg(errp, "unknown migration protocol: %s", uri);
  274. }
  275. }
  276. static void process_incoming_migration_co(void *opaque)
  277. {
  278. QEMUFile *f = opaque;
  279. Error *local_err = NULL;
  280. MigrationIncomingState *mis;
  281. PostcopyState ps;
  282. int ret;
  283. mis = migration_incoming_state_new(f);
  284. postcopy_state_set(POSTCOPY_INCOMING_NONE);
  285. migrate_generate_event(MIGRATION_STATUS_ACTIVE);
  286. ret = qemu_loadvm_state(f);
  287. ps = postcopy_state_get();
  288. trace_process_incoming_migration_co_end(ret, ps);
  289. if (ps != POSTCOPY_INCOMING_NONE) {
  290. if (ps == POSTCOPY_INCOMING_ADVISE) {
  291. /*
  292. * Where a migration had postcopy enabled (and thus went to advise)
  293. * but managed to complete within the precopy period, we can use
  294. * the normal exit.
  295. */
  296. postcopy_ram_incoming_cleanup(mis);
  297. } else if (ret >= 0) {
  298. /*
  299. * Postcopy was started, cleanup should happen at the end of the
  300. * postcopy thread.
  301. */
  302. trace_process_incoming_migration_co_postcopy_end_main();
  303. return;
  304. }
  305. /* Else if something went wrong then just fall out of the normal exit */
  306. }
  307. qemu_fclose(f);
  308. free_xbzrle_decoded_buf();
  309. migration_incoming_state_destroy();
  310. if (ret < 0) {
  311. migrate_generate_event(MIGRATION_STATUS_FAILED);
  312. error_report("load of migration failed: %s", strerror(-ret));
  313. migrate_decompress_threads_join();
  314. exit(EXIT_FAILURE);
  315. }
  316. /* Make sure all file formats flush their mutable metadata */
  317. bdrv_invalidate_cache_all(&local_err);
  318. if (local_err) {
  319. migrate_generate_event(MIGRATION_STATUS_FAILED);
  320. error_report_err(local_err);
  321. migrate_decompress_threads_join();
  322. exit(EXIT_FAILURE);
  323. }
  324. /*
  325. * This must happen after all error conditions are dealt with and
  326. * we're sure the VM is going to be running on this host.
  327. */
  328. qemu_announce_self();
  329. /* If global state section was not received or we are in running
  330. state, we need to obey autostart. Any other state is set with
  331. runstate_set. */
  332. if (!global_state_received() ||
  333. global_state_get_runstate() == RUN_STATE_RUNNING) {
  334. if (autostart) {
  335. vm_start();
  336. } else {
  337. runstate_set(RUN_STATE_PAUSED);
  338. }
  339. } else {
  340. runstate_set(global_state_get_runstate());
  341. }
  342. migrate_decompress_threads_join();
  343. /*
  344. * This must happen after any state changes since as soon as an external
  345. * observer sees this event they might start to prod at the VM assuming
  346. * it's ready to use.
  347. */
  348. migrate_generate_event(MIGRATION_STATUS_COMPLETED);
  349. }
  350. void process_incoming_migration(QEMUFile *f)
  351. {
  352. Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
  353. int fd = qemu_get_fd(f);
  354. assert(fd != -1);
  355. migrate_decompress_threads_create();
  356. qemu_set_nonblock(fd);
  357. qemu_coroutine_enter(co, f);
  358. }
  359. /*
  360. * Send a message on the return channel back to the source
  361. * of the migration.
  362. */
  363. void migrate_send_rp_message(MigrationIncomingState *mis,
  364. enum mig_rp_message_type message_type,
  365. uint16_t len, void *data)
  366. {
  367. trace_migrate_send_rp_message((int)message_type, len);
  368. qemu_mutex_lock(&mis->rp_mutex);
  369. qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
  370. qemu_put_be16(mis->to_src_file, len);
  371. qemu_put_buffer(mis->to_src_file, data, len);
  372. qemu_fflush(mis->to_src_file);
  373. qemu_mutex_unlock(&mis->rp_mutex);
  374. }
  375. /*
  376. * Send a 'SHUT' message on the return channel with the given value
  377. * to indicate that we've finished with the RP. Non-0 value indicates
  378. * error.
  379. */
  380. void migrate_send_rp_shut(MigrationIncomingState *mis,
  381. uint32_t value)
  382. {
  383. uint32_t buf;
  384. buf = cpu_to_be32(value);
  385. migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
  386. }
  387. /*
  388. * Send a 'PONG' message on the return channel with the given value
  389. * (normally in response to a 'PING')
  390. */
  391. void migrate_send_rp_pong(MigrationIncomingState *mis,
  392. uint32_t value)
  393. {
  394. uint32_t buf;
  395. buf = cpu_to_be32(value);
  396. migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
  397. }
  398. /* amount of nanoseconds we are willing to wait for migration to be down.
  399. * the choice of nanoseconds is because it is the maximum resolution that
  400. * get_clock() can achieve. It is an internal measure. All user-visible
  401. * units must be in seconds */
  402. static uint64_t max_downtime = 300000000;
  403. uint64_t migrate_max_downtime(void)
  404. {
  405. return max_downtime;
  406. }
  407. MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
  408. {
  409. MigrationCapabilityStatusList *head = NULL;
  410. MigrationCapabilityStatusList *caps;
  411. MigrationState *s = migrate_get_current();
  412. int i;
  413. caps = NULL; /* silence compiler warning */
  414. for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
  415. if (head == NULL) {
  416. head = g_malloc0(sizeof(*caps));
  417. caps = head;
  418. } else {
  419. caps->next = g_malloc0(sizeof(*caps));
  420. caps = caps->next;
  421. }
  422. caps->value =
  423. g_malloc(sizeof(*caps->value));
  424. caps->value->capability = i;
  425. caps->value->state = s->enabled_capabilities[i];
  426. }
  427. return head;
  428. }
  429. MigrationParameters *qmp_query_migrate_parameters(Error **errp)
  430. {
  431. MigrationParameters *params;
  432. MigrationState *s = migrate_get_current();
  433. params = g_malloc0(sizeof(*params));
  434. params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
  435. params->compress_threads =
  436. s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
  437. params->decompress_threads =
  438. s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
  439. params->x_cpu_throttle_initial =
  440. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
  441. params->x_cpu_throttle_increment =
  442. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
  443. return params;
  444. }
  445. /*
  446. * Return true if we're already in the middle of a migration
  447. * (i.e. any of the active or setup states)
  448. */
  449. static bool migration_is_setup_or_active(int state)
  450. {
  451. switch (state) {
  452. case MIGRATION_STATUS_ACTIVE:
  453. case MIGRATION_STATUS_POSTCOPY_ACTIVE:
  454. case MIGRATION_STATUS_SETUP:
  455. return true;
  456. default:
  457. return false;
  458. }
  459. }
  460. static void get_xbzrle_cache_stats(MigrationInfo *info)
  461. {
  462. if (migrate_use_xbzrle()) {
  463. info->has_xbzrle_cache = true;
  464. info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
  465. info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
  466. info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
  467. info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
  468. info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
  469. info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
  470. info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
  471. }
  472. }
  473. MigrationInfo *qmp_query_migrate(Error **errp)
  474. {
  475. MigrationInfo *info = g_malloc0(sizeof(*info));
  476. MigrationState *s = migrate_get_current();
  477. switch (s->state) {
  478. case MIGRATION_STATUS_NONE:
  479. /* no migration has happened ever */
  480. break;
  481. case MIGRATION_STATUS_SETUP:
  482. info->has_status = true;
  483. info->has_total_time = false;
  484. break;
  485. case MIGRATION_STATUS_ACTIVE:
  486. case MIGRATION_STATUS_CANCELLING:
  487. info->has_status = true;
  488. info->has_total_time = true;
  489. info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
  490. - s->total_time;
  491. info->has_expected_downtime = true;
  492. info->expected_downtime = s->expected_downtime;
  493. info->has_setup_time = true;
  494. info->setup_time = s->setup_time;
  495. info->has_ram = true;
  496. info->ram = g_malloc0(sizeof(*info->ram));
  497. info->ram->transferred = ram_bytes_transferred();
  498. info->ram->remaining = ram_bytes_remaining();
  499. info->ram->total = ram_bytes_total();
  500. info->ram->duplicate = dup_mig_pages_transferred();
  501. info->ram->skipped = skipped_mig_pages_transferred();
  502. info->ram->normal = norm_mig_pages_transferred();
  503. info->ram->normal_bytes = norm_mig_bytes_transferred();
  504. info->ram->dirty_pages_rate = s->dirty_pages_rate;
  505. info->ram->mbps = s->mbps;
  506. info->ram->dirty_sync_count = s->dirty_sync_count;
  507. if (blk_mig_active()) {
  508. info->has_disk = true;
  509. info->disk = g_malloc0(sizeof(*info->disk));
  510. info->disk->transferred = blk_mig_bytes_transferred();
  511. info->disk->remaining = blk_mig_bytes_remaining();
  512. info->disk->total = blk_mig_bytes_total();
  513. }
  514. if (cpu_throttle_active()) {
  515. info->has_x_cpu_throttle_percentage = true;
  516. info->x_cpu_throttle_percentage = cpu_throttle_get_percentage();
  517. }
  518. get_xbzrle_cache_stats(info);
  519. break;
  520. case MIGRATION_STATUS_POSTCOPY_ACTIVE:
  521. /* Mostly the same as active; TODO add some postcopy stats */
  522. info->has_status = true;
  523. info->has_total_time = true;
  524. info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
  525. - s->total_time;
  526. info->has_expected_downtime = true;
  527. info->expected_downtime = s->expected_downtime;
  528. info->has_setup_time = true;
  529. info->setup_time = s->setup_time;
  530. info->has_ram = true;
  531. info->ram = g_malloc0(sizeof(*info->ram));
  532. info->ram->transferred = ram_bytes_transferred();
  533. info->ram->remaining = ram_bytes_remaining();
  534. info->ram->total = ram_bytes_total();
  535. info->ram->duplicate = dup_mig_pages_transferred();
  536. info->ram->skipped = skipped_mig_pages_transferred();
  537. info->ram->normal = norm_mig_pages_transferred();
  538. info->ram->normal_bytes = norm_mig_bytes_transferred();
  539. info->ram->dirty_pages_rate = s->dirty_pages_rate;
  540. info->ram->mbps = s->mbps;
  541. if (blk_mig_active()) {
  542. info->has_disk = true;
  543. info->disk = g_malloc0(sizeof(*info->disk));
  544. info->disk->transferred = blk_mig_bytes_transferred();
  545. info->disk->remaining = blk_mig_bytes_remaining();
  546. info->disk->total = blk_mig_bytes_total();
  547. }
  548. get_xbzrle_cache_stats(info);
  549. break;
  550. case MIGRATION_STATUS_COMPLETED:
  551. get_xbzrle_cache_stats(info);
  552. info->has_status = true;
  553. info->has_total_time = true;
  554. info->total_time = s->total_time;
  555. info->has_downtime = true;
  556. info->downtime = s->downtime;
  557. info->has_setup_time = true;
  558. info->setup_time = s->setup_time;
  559. info->has_ram = true;
  560. info->ram = g_malloc0(sizeof(*info->ram));
  561. info->ram->transferred = ram_bytes_transferred();
  562. info->ram->remaining = 0;
  563. info->ram->total = ram_bytes_total();
  564. info->ram->duplicate = dup_mig_pages_transferred();
  565. info->ram->skipped = skipped_mig_pages_transferred();
  566. info->ram->normal = norm_mig_pages_transferred();
  567. info->ram->normal_bytes = norm_mig_bytes_transferred();
  568. info->ram->mbps = s->mbps;
  569. info->ram->dirty_sync_count = s->dirty_sync_count;
  570. break;
  571. case MIGRATION_STATUS_FAILED:
  572. info->has_status = true;
  573. break;
  574. case MIGRATION_STATUS_CANCELLED:
  575. info->has_status = true;
  576. break;
  577. }
  578. info->status = s->state;
  579. return info;
  580. }
  581. void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
  582. Error **errp)
  583. {
  584. MigrationState *s = migrate_get_current();
  585. MigrationCapabilityStatusList *cap;
  586. if (migration_is_setup_or_active(s->state)) {
  587. error_setg(errp, QERR_MIGRATION_ACTIVE);
  588. return;
  589. }
  590. for (cap = params; cap; cap = cap->next) {
  591. s->enabled_capabilities[cap->value->capability] = cap->value->state;
  592. }
  593. if (migrate_postcopy_ram()) {
  594. if (migrate_use_compression()) {
  595. /* The decompression threads asynchronously write into RAM
  596. * rather than use the atomic copies needed to avoid
  597. * userfaulting. It should be possible to fix the decompression
  598. * threads for compatibility in future.
  599. */
  600. error_report("Postcopy is not currently compatible with "
  601. "compression");
  602. s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM] =
  603. false;
  604. }
  605. }
  606. }
  607. void qmp_migrate_set_parameters(bool has_compress_level,
  608. int64_t compress_level,
  609. bool has_compress_threads,
  610. int64_t compress_threads,
  611. bool has_decompress_threads,
  612. int64_t decompress_threads,
  613. bool has_x_cpu_throttle_initial,
  614. int64_t x_cpu_throttle_initial,
  615. bool has_x_cpu_throttle_increment,
  616. int64_t x_cpu_throttle_increment, Error **errp)
  617. {
  618. MigrationState *s = migrate_get_current();
  619. if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
  620. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
  621. "is invalid, it should be in the range of 0 to 9");
  622. return;
  623. }
  624. if (has_compress_threads &&
  625. (compress_threads < 1 || compress_threads > 255)) {
  626. error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
  627. "compress_threads",
  628. "is invalid, it should be in the range of 1 to 255");
  629. return;
  630. }
  631. if (has_decompress_threads &&
  632. (decompress_threads < 1 || decompress_threads > 255)) {
  633. error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
  634. "decompress_threads",
  635. "is invalid, it should be in the range of 1 to 255");
  636. return;
  637. }
  638. if (has_x_cpu_throttle_initial &&
  639. (x_cpu_throttle_initial < 1 || x_cpu_throttle_initial > 99)) {
  640. error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
  641. "x_cpu_throttle_initial",
  642. "an integer in the range of 1 to 99");
  643. }
  644. if (has_x_cpu_throttle_increment &&
  645. (x_cpu_throttle_increment < 1 || x_cpu_throttle_increment > 99)) {
  646. error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
  647. "x_cpu_throttle_increment",
  648. "an integer in the range of 1 to 99");
  649. }
  650. if (has_compress_level) {
  651. s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
  652. }
  653. if (has_compress_threads) {
  654. s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
  655. }
  656. if (has_decompress_threads) {
  657. s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
  658. decompress_threads;
  659. }
  660. if (has_x_cpu_throttle_initial) {
  661. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
  662. x_cpu_throttle_initial;
  663. }
  664. if (has_x_cpu_throttle_increment) {
  665. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
  666. x_cpu_throttle_increment;
  667. }
  668. }
  669. void qmp_migrate_start_postcopy(Error **errp)
  670. {
  671. MigrationState *s = migrate_get_current();
  672. if (!migrate_postcopy_ram()) {
  673. error_setg(errp, "Enable postcopy with migrate_set_capability before"
  674. " the start of migration");
  675. return;
  676. }
  677. if (s->state == MIGRATION_STATUS_NONE) {
  678. error_setg(errp, "Postcopy must be started after migration has been"
  679. " started");
  680. return;
  681. }
  682. /*
  683. * we don't error if migration has finished since that would be racy
  684. * with issuing this command.
  685. */
  686. atomic_set(&s->start_postcopy, true);
  687. }
  688. /* shared migration helpers */
  689. static void migrate_set_state(MigrationState *s, int old_state, int new_state)
  690. {
  691. if (atomic_cmpxchg(&s->state, old_state, new_state) == old_state) {
  692. trace_migrate_set_state(new_state);
  693. migrate_generate_event(new_state);
  694. }
  695. }
  696. static void migrate_fd_cleanup(void *opaque)
  697. {
  698. MigrationState *s = opaque;
  699. qemu_bh_delete(s->cleanup_bh);
  700. s->cleanup_bh = NULL;
  701. flush_page_queue(s);
  702. if (s->file) {
  703. trace_migrate_fd_cleanup();
  704. qemu_mutex_unlock_iothread();
  705. if (s->migration_thread_running) {
  706. qemu_thread_join(&s->thread);
  707. s->migration_thread_running = false;
  708. }
  709. qemu_mutex_lock_iothread();
  710. migrate_compress_threads_join();
  711. qemu_fclose(s->file);
  712. s->file = NULL;
  713. }
  714. assert((s->state != MIGRATION_STATUS_ACTIVE) &&
  715. (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
  716. if (s->state == MIGRATION_STATUS_CANCELLING) {
  717. migrate_set_state(s, MIGRATION_STATUS_CANCELLING,
  718. MIGRATION_STATUS_CANCELLED);
  719. }
  720. notifier_list_notify(&migration_state_notifiers, s);
  721. }
  722. void migrate_fd_error(MigrationState *s)
  723. {
  724. trace_migrate_fd_error();
  725. assert(s->file == NULL);
  726. migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
  727. notifier_list_notify(&migration_state_notifiers, s);
  728. }
  729. static void migrate_fd_cancel(MigrationState *s)
  730. {
  731. int old_state ;
  732. QEMUFile *f = migrate_get_current()->file;
  733. trace_migrate_fd_cancel();
  734. if (s->rp_state.from_dst_file) {
  735. /* shutdown the rp socket, so causing the rp thread to shutdown */
  736. qemu_file_shutdown(s->rp_state.from_dst_file);
  737. }
  738. do {
  739. old_state = s->state;
  740. if (!migration_is_setup_or_active(old_state)) {
  741. break;
  742. }
  743. migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING);
  744. } while (s->state != MIGRATION_STATUS_CANCELLING);
  745. /*
  746. * If we're unlucky the migration code might be stuck somewhere in a
  747. * send/write while the network has failed and is waiting to timeout;
  748. * if we've got shutdown(2) available then we can force it to quit.
  749. * The outgoing qemu file gets closed in migrate_fd_cleanup that is
  750. * called in a bh, so there is no race against this cancel.
  751. */
  752. if (s->state == MIGRATION_STATUS_CANCELLING && f) {
  753. qemu_file_shutdown(f);
  754. }
  755. }
  756. void add_migration_state_change_notifier(Notifier *notify)
  757. {
  758. notifier_list_add(&migration_state_notifiers, notify);
  759. }
  760. void remove_migration_state_change_notifier(Notifier *notify)
  761. {
  762. notifier_remove(notify);
  763. }
  764. bool migration_in_setup(MigrationState *s)
  765. {
  766. return s->state == MIGRATION_STATUS_SETUP;
  767. }
  768. bool migration_has_finished(MigrationState *s)
  769. {
  770. return s->state == MIGRATION_STATUS_COMPLETED;
  771. }
  772. bool migration_has_failed(MigrationState *s)
  773. {
  774. return (s->state == MIGRATION_STATUS_CANCELLED ||
  775. s->state == MIGRATION_STATUS_FAILED);
  776. }
  777. bool migration_in_postcopy(MigrationState *s)
  778. {
  779. return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
  780. }
  781. MigrationState *migrate_init(const MigrationParams *params)
  782. {
  783. MigrationState *s = migrate_get_current();
  784. int64_t bandwidth_limit = s->bandwidth_limit;
  785. bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
  786. int64_t xbzrle_cache_size = s->xbzrle_cache_size;
  787. int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
  788. int compress_thread_count =
  789. s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
  790. int decompress_thread_count =
  791. s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
  792. int x_cpu_throttle_initial =
  793. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
  794. int x_cpu_throttle_increment =
  795. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
  796. memcpy(enabled_capabilities, s->enabled_capabilities,
  797. sizeof(enabled_capabilities));
  798. memset(s, 0, sizeof(*s));
  799. s->params = *params;
  800. memcpy(s->enabled_capabilities, enabled_capabilities,
  801. sizeof(enabled_capabilities));
  802. s->xbzrle_cache_size = xbzrle_cache_size;
  803. s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
  804. s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
  805. compress_thread_count;
  806. s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
  807. decompress_thread_count;
  808. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
  809. x_cpu_throttle_initial;
  810. s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
  811. x_cpu_throttle_increment;
  812. s->bandwidth_limit = bandwidth_limit;
  813. migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
  814. QSIMPLEQ_INIT(&s->src_page_requests);
  815. s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  816. return s;
  817. }
  818. static GSList *migration_blockers;
  819. void migrate_add_blocker(Error *reason)
  820. {
  821. migration_blockers = g_slist_prepend(migration_blockers, reason);
  822. }
  823. void migrate_del_blocker(Error *reason)
  824. {
  825. migration_blockers = g_slist_remove(migration_blockers, reason);
  826. }
  827. void qmp_migrate_incoming(const char *uri, Error **errp)
  828. {
  829. Error *local_err = NULL;
  830. static bool once = true;
  831. if (!deferred_incoming) {
  832. error_setg(errp, "For use with '-incoming defer'");
  833. return;
  834. }
  835. if (!once) {
  836. error_setg(errp, "The incoming migration has already been started");
  837. }
  838. qemu_start_incoming_migration(uri, &local_err);
  839. if (local_err) {
  840. error_propagate(errp, local_err);
  841. return;
  842. }
  843. once = false;
  844. }
  845. void qmp_migrate(const char *uri, bool has_blk, bool blk,
  846. bool has_inc, bool inc, bool has_detach, bool detach,
  847. Error **errp)
  848. {
  849. Error *local_err = NULL;
  850. MigrationState *s = migrate_get_current();
  851. MigrationParams params;
  852. const char *p;
  853. params.blk = has_blk && blk;
  854. params.shared = has_inc && inc;
  855. if (migration_is_setup_or_active(s->state) ||
  856. s->state == MIGRATION_STATUS_CANCELLING) {
  857. error_setg(errp, QERR_MIGRATION_ACTIVE);
  858. return;
  859. }
  860. if (runstate_check(RUN_STATE_INMIGRATE)) {
  861. error_setg(errp, "Guest is waiting for an incoming migration");
  862. return;
  863. }
  864. if (qemu_savevm_state_blocked(errp)) {
  865. return;
  866. }
  867. if (migration_blockers) {
  868. *errp = error_copy(migration_blockers->data);
  869. return;
  870. }
  871. /* We are starting a new migration, so we want to start in a clean
  872. state. This change is only needed if previous migration
  873. failed/was cancelled. We don't use migrate_set_state() because
  874. we are setting the initial state, not changing it. */
  875. s->state = MIGRATION_STATUS_NONE;
  876. s = migrate_init(&params);
  877. if (strstart(uri, "tcp:", &p)) {
  878. tcp_start_outgoing_migration(s, p, &local_err);
  879. #ifdef CONFIG_RDMA
  880. } else if (strstart(uri, "rdma:", &p)) {
  881. rdma_start_outgoing_migration(s, p, &local_err);
  882. #endif
  883. #if !defined(WIN32)
  884. } else if (strstart(uri, "exec:", &p)) {
  885. exec_start_outgoing_migration(s, p, &local_err);
  886. } else if (strstart(uri, "unix:", &p)) {
  887. unix_start_outgoing_migration(s, p, &local_err);
  888. } else if (strstart(uri, "fd:", &p)) {
  889. fd_start_outgoing_migration(s, p, &local_err);
  890. #endif
  891. } else {
  892. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
  893. "a valid migration protocol");
  894. migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
  895. return;
  896. }
  897. if (local_err) {
  898. migrate_fd_error(s);
  899. error_propagate(errp, local_err);
  900. return;
  901. }
  902. }
  903. void qmp_migrate_cancel(Error **errp)
  904. {
  905. migrate_fd_cancel(migrate_get_current());
  906. }
  907. void qmp_migrate_set_cache_size(int64_t value, Error **errp)
  908. {
  909. MigrationState *s = migrate_get_current();
  910. int64_t new_size;
  911. /* Check for truncation */
  912. if (value != (size_t)value) {
  913. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
  914. "exceeding address space");
  915. return;
  916. }
  917. /* Cache should not be larger than guest ram size */
  918. if (value > ram_bytes_total()) {
  919. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
  920. "exceeds guest ram size ");
  921. return;
  922. }
  923. new_size = xbzrle_cache_resize(value);
  924. if (new_size < 0) {
  925. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
  926. "is smaller than page size");
  927. return;
  928. }
  929. s->xbzrle_cache_size = new_size;
  930. }
  931. int64_t qmp_query_migrate_cache_size(Error **errp)
  932. {
  933. return migrate_xbzrle_cache_size();
  934. }
  935. void qmp_migrate_set_speed(int64_t value, Error **errp)
  936. {
  937. MigrationState *s;
  938. if (value < 0) {
  939. value = 0;
  940. }
  941. if (value > SIZE_MAX) {
  942. value = SIZE_MAX;
  943. }
  944. s = migrate_get_current();
  945. s->bandwidth_limit = value;
  946. if (s->file) {
  947. qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
  948. }
  949. }
  950. void qmp_migrate_set_downtime(double value, Error **errp)
  951. {
  952. value *= 1e9;
  953. value = MAX(0, MIN(UINT64_MAX, value));
  954. max_downtime = (uint64_t)value;
  955. }
  956. bool migrate_postcopy_ram(void)
  957. {
  958. MigrationState *s;
  959. s = migrate_get_current();
  960. return s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM];
  961. }
  962. bool migrate_auto_converge(void)
  963. {
  964. MigrationState *s;
  965. s = migrate_get_current();
  966. return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
  967. }
  968. bool migrate_zero_blocks(void)
  969. {
  970. MigrationState *s;
  971. s = migrate_get_current();
  972. return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
  973. }
  974. bool migrate_use_compression(void)
  975. {
  976. MigrationState *s;
  977. s = migrate_get_current();
  978. return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
  979. }
  980. int migrate_compress_level(void)
  981. {
  982. MigrationState *s;
  983. s = migrate_get_current();
  984. return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
  985. }
  986. int migrate_compress_threads(void)
  987. {
  988. MigrationState *s;
  989. s = migrate_get_current();
  990. return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
  991. }
  992. int migrate_decompress_threads(void)
  993. {
  994. MigrationState *s;
  995. s = migrate_get_current();
  996. return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
  997. }
  998. bool migrate_use_events(void)
  999. {
  1000. MigrationState *s;
  1001. s = migrate_get_current();
  1002. return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
  1003. }
  1004. int migrate_use_xbzrle(void)
  1005. {
  1006. MigrationState *s;
  1007. s = migrate_get_current();
  1008. return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
  1009. }
  1010. int64_t migrate_xbzrle_cache_size(void)
  1011. {
  1012. MigrationState *s;
  1013. s = migrate_get_current();
  1014. return s->xbzrle_cache_size;
  1015. }
  1016. /* migration thread support */
  1017. /*
  1018. * Something bad happened to the RP stream, mark an error
  1019. * The caller shall print or trace something to indicate why
  1020. */
  1021. static void mark_source_rp_bad(MigrationState *s)
  1022. {
  1023. s->rp_state.error = true;
  1024. }
  1025. static struct rp_cmd_args {
  1026. ssize_t len; /* -1 = variable */
  1027. const char *name;
  1028. } rp_cmd_args[] = {
  1029. [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
  1030. [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
  1031. [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
  1032. [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
  1033. [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
  1034. [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
  1035. };
  1036. /*
  1037. * Process a request for pages received on the return path,
  1038. * We're allowed to send more than requested (e.g. to round to our page size)
  1039. * and we don't need to send pages that have already been sent.
  1040. */
  1041. static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
  1042. ram_addr_t start, size_t len)
  1043. {
  1044. long our_host_ps = getpagesize();
  1045. trace_migrate_handle_rp_req_pages(rbname, start, len);
  1046. /*
  1047. * Since we currently insist on matching page sizes, just sanity check
  1048. * we're being asked for whole host pages.
  1049. */
  1050. if (start & (our_host_ps-1) ||
  1051. (len & (our_host_ps-1))) {
  1052. error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
  1053. " len: %zd", __func__, start, len);
  1054. mark_source_rp_bad(ms);
  1055. return;
  1056. }
  1057. if (ram_save_queue_pages(ms, rbname, start, len)) {
  1058. mark_source_rp_bad(ms);
  1059. }
  1060. }
  1061. /*
  1062. * Handles messages sent on the return path towards the source VM
  1063. *
  1064. */
  1065. static void *source_return_path_thread(void *opaque)
  1066. {
  1067. MigrationState *ms = opaque;
  1068. QEMUFile *rp = ms->rp_state.from_dst_file;
  1069. uint16_t header_len, header_type;
  1070. const int max_len = 512;
  1071. uint8_t buf[max_len];
  1072. uint32_t tmp32, sibling_error;
  1073. ram_addr_t start = 0; /* =0 to silence warning */
  1074. size_t len = 0, expected_len;
  1075. int res;
  1076. trace_source_return_path_thread_entry();
  1077. while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
  1078. migration_is_setup_or_active(ms->state)) {
  1079. trace_source_return_path_thread_loop_top();
  1080. header_type = qemu_get_be16(rp);
  1081. header_len = qemu_get_be16(rp);
  1082. if (header_type >= MIG_RP_MSG_MAX ||
  1083. header_type == MIG_RP_MSG_INVALID) {
  1084. error_report("RP: Received invalid message 0x%04x length 0x%04x",
  1085. header_type, header_len);
  1086. mark_source_rp_bad(ms);
  1087. goto out;
  1088. }
  1089. if ((rp_cmd_args[header_type].len != -1 &&
  1090. header_len != rp_cmd_args[header_type].len) ||
  1091. header_len > max_len) {
  1092. error_report("RP: Received '%s' message (0x%04x) with"
  1093. "incorrect length %d expecting %zu",
  1094. rp_cmd_args[header_type].name, header_type, header_len,
  1095. (size_t)rp_cmd_args[header_type].len);
  1096. mark_source_rp_bad(ms);
  1097. goto out;
  1098. }
  1099. /* We know we've got a valid header by this point */
  1100. res = qemu_get_buffer(rp, buf, header_len);
  1101. if (res != header_len) {
  1102. error_report("RP: Failed reading data for message 0x%04x"
  1103. " read %d expected %d",
  1104. header_type, res, header_len);
  1105. mark_source_rp_bad(ms);
  1106. goto out;
  1107. }
  1108. /* OK, we have the message and the data */
  1109. switch (header_type) {
  1110. case MIG_RP_MSG_SHUT:
  1111. sibling_error = be32_to_cpup((uint32_t *)buf);
  1112. trace_source_return_path_thread_shut(sibling_error);
  1113. if (sibling_error) {
  1114. error_report("RP: Sibling indicated error %d", sibling_error);
  1115. mark_source_rp_bad(ms);
  1116. }
  1117. /*
  1118. * We'll let the main thread deal with closing the RP
  1119. * we could do a shutdown(2) on it, but we're the only user
  1120. * anyway, so there's nothing gained.
  1121. */
  1122. goto out;
  1123. case MIG_RP_MSG_PONG:
  1124. tmp32 = be32_to_cpup((uint32_t *)buf);
  1125. trace_source_return_path_thread_pong(tmp32);
  1126. break;
  1127. case MIG_RP_MSG_REQ_PAGES:
  1128. start = be64_to_cpup((uint64_t *)buf);
  1129. len = be32_to_cpup((uint32_t *)(buf + 8));
  1130. migrate_handle_rp_req_pages(ms, NULL, start, len);
  1131. break;
  1132. case MIG_RP_MSG_REQ_PAGES_ID:
  1133. expected_len = 12 + 1; /* header + termination */
  1134. if (header_len >= expected_len) {
  1135. start = be64_to_cpup((uint64_t *)buf);
  1136. len = be32_to_cpup((uint32_t *)(buf + 8));
  1137. /* Now we expect an idstr */
  1138. tmp32 = buf[12]; /* Length of the following idstr */
  1139. buf[13 + tmp32] = '\0';
  1140. expected_len += tmp32;
  1141. }
  1142. if (header_len != expected_len) {
  1143. error_report("RP: Req_Page_id with length %d expecting %zd",
  1144. header_len, expected_len);
  1145. mark_source_rp_bad(ms);
  1146. goto out;
  1147. }
  1148. migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
  1149. break;
  1150. default:
  1151. break;
  1152. }
  1153. }
  1154. if (rp && qemu_file_get_error(rp)) {
  1155. trace_source_return_path_thread_bad_end();
  1156. mark_source_rp_bad(ms);
  1157. }
  1158. trace_source_return_path_thread_end();
  1159. out:
  1160. ms->rp_state.from_dst_file = NULL;
  1161. qemu_fclose(rp);
  1162. return NULL;
  1163. }
  1164. static int open_return_path_on_source(MigrationState *ms)
  1165. {
  1166. ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->file);
  1167. if (!ms->rp_state.from_dst_file) {
  1168. return -1;
  1169. }
  1170. trace_open_return_path_on_source();
  1171. qemu_thread_create(&ms->rp_state.rp_thread, "return path",
  1172. source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
  1173. trace_open_return_path_on_source_continue();
  1174. return 0;
  1175. }
  1176. /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
  1177. static int await_return_path_close_on_source(MigrationState *ms)
  1178. {
  1179. /*
  1180. * If this is a normal exit then the destination will send a SHUT and the
  1181. * rp_thread will exit, however if there's an error we need to cause
  1182. * it to exit.
  1183. */
  1184. if (qemu_file_get_error(ms->file) && ms->rp_state.from_dst_file) {
  1185. /*
  1186. * shutdown(2), if we have it, will cause it to unblock if it's stuck
  1187. * waiting for the destination.
  1188. */
  1189. qemu_file_shutdown(ms->rp_state.from_dst_file);
  1190. mark_source_rp_bad(ms);
  1191. }
  1192. trace_await_return_path_close_on_source_joining();
  1193. qemu_thread_join(&ms->rp_state.rp_thread);
  1194. trace_await_return_path_close_on_source_close();
  1195. return ms->rp_state.error;
  1196. }
  1197. /*
  1198. * Switch from normal iteration to postcopy
  1199. * Returns non-0 on error
  1200. */
  1201. static int postcopy_start(MigrationState *ms, bool *old_vm_running)
  1202. {
  1203. int ret;
  1204. const QEMUSizedBuffer *qsb;
  1205. int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  1206. migrate_set_state(ms, MIGRATION_STATUS_ACTIVE,
  1207. MIGRATION_STATUS_POSTCOPY_ACTIVE);
  1208. trace_postcopy_start();
  1209. qemu_mutex_lock_iothread();
  1210. trace_postcopy_start_set_run();
  1211. qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
  1212. *old_vm_running = runstate_is_running();
  1213. global_state_store();
  1214. ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
  1215. if (ret < 0) {
  1216. goto fail;
  1217. }
  1218. /*
  1219. * Cause any non-postcopiable, but iterative devices to
  1220. * send out their final data.
  1221. */
  1222. qemu_savevm_state_complete_precopy(ms->file, true);
  1223. /*
  1224. * in Finish migrate and with the io-lock held everything should
  1225. * be quiet, but we've potentially still got dirty pages and we
  1226. * need to tell the destination to throw any pages it's already received
  1227. * that are dirty
  1228. */
  1229. if (ram_postcopy_send_discard_bitmap(ms)) {
  1230. error_report("postcopy send discard bitmap failed");
  1231. goto fail;
  1232. }
  1233. /*
  1234. * send rest of state - note things that are doing postcopy
  1235. * will notice we're in POSTCOPY_ACTIVE and not actually
  1236. * wrap their state up here
  1237. */
  1238. qemu_file_set_rate_limit(ms->file, INT64_MAX);
  1239. /* Ping just for debugging, helps line traces up */
  1240. qemu_savevm_send_ping(ms->file, 2);
  1241. /*
  1242. * While loading the device state we may trigger page transfer
  1243. * requests and the fd must be free to process those, and thus
  1244. * the destination must read the whole device state off the fd before
  1245. * it starts processing it. Unfortunately the ad-hoc migration format
  1246. * doesn't allow the destination to know the size to read without fully
  1247. * parsing it through each devices load-state code (especially the open
  1248. * coded devices that use get/put).
  1249. * So we wrap the device state up in a package with a length at the start;
  1250. * to do this we use a qemu_buf to hold the whole of the device state.
  1251. */
  1252. QEMUFile *fb = qemu_bufopen("w", NULL);
  1253. if (!fb) {
  1254. error_report("Failed to create buffered file");
  1255. goto fail;
  1256. }
  1257. /*
  1258. * Make sure the receiver can get incoming pages before we send the rest
  1259. * of the state
  1260. */
  1261. qemu_savevm_send_postcopy_listen(fb);
  1262. qemu_savevm_state_complete_precopy(fb, false);
  1263. qemu_savevm_send_ping(fb, 3);
  1264. qemu_savevm_send_postcopy_run(fb);
  1265. /* <><> end of stuff going into the package */
  1266. qsb = qemu_buf_get(fb);
  1267. /* Now send that blob */
  1268. if (qemu_savevm_send_packaged(ms->file, qsb)) {
  1269. goto fail_closefb;
  1270. }
  1271. qemu_fclose(fb);
  1272. ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
  1273. qemu_mutex_unlock_iothread();
  1274. /*
  1275. * Although this ping is just for debug, it could potentially be
  1276. * used for getting a better measurement of downtime at the source.
  1277. */
  1278. qemu_savevm_send_ping(ms->file, 4);
  1279. ret = qemu_file_get_error(ms->file);
  1280. if (ret) {
  1281. error_report("postcopy_start: Migration stream errored");
  1282. migrate_set_state(ms, MIGRATION_STATUS_POSTCOPY_ACTIVE,
  1283. MIGRATION_STATUS_FAILED);
  1284. }
  1285. return ret;
  1286. fail_closefb:
  1287. qemu_fclose(fb);
  1288. fail:
  1289. migrate_set_state(ms, MIGRATION_STATUS_POSTCOPY_ACTIVE,
  1290. MIGRATION_STATUS_FAILED);
  1291. qemu_mutex_unlock_iothread();
  1292. return -1;
  1293. }
  1294. /**
  1295. * migration_completion: Used by migration_thread when there's not much left.
  1296. * The caller 'breaks' the loop when this returns.
  1297. *
  1298. * @s: Current migration state
  1299. * @current_active_state: The migration state we expect to be in
  1300. * @*old_vm_running: Pointer to old_vm_running flag
  1301. * @*start_time: Pointer to time to update
  1302. */
  1303. static void migration_completion(MigrationState *s, int current_active_state,
  1304. bool *old_vm_running,
  1305. int64_t *start_time)
  1306. {
  1307. int ret;
  1308. if (s->state == MIGRATION_STATUS_ACTIVE) {
  1309. qemu_mutex_lock_iothread();
  1310. *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  1311. qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
  1312. *old_vm_running = runstate_is_running();
  1313. ret = global_state_store();
  1314. if (!ret) {
  1315. ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
  1316. if (ret >= 0) {
  1317. qemu_file_set_rate_limit(s->file, INT64_MAX);
  1318. qemu_savevm_state_complete_precopy(s->file, false);
  1319. }
  1320. }
  1321. qemu_mutex_unlock_iothread();
  1322. if (ret < 0) {
  1323. goto fail;
  1324. }
  1325. } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
  1326. trace_migration_completion_postcopy_end();
  1327. qemu_savevm_state_complete_postcopy(s->file);
  1328. trace_migration_completion_postcopy_end_after_complete();
  1329. }
  1330. /*
  1331. * If rp was opened we must clean up the thread before
  1332. * cleaning everything else up (since if there are no failures
  1333. * it will wait for the destination to send it's status in
  1334. * a SHUT command).
  1335. * Postcopy opens rp if enabled (even if it's not avtivated)
  1336. */
  1337. if (migrate_postcopy_ram()) {
  1338. int rp_error;
  1339. trace_migration_completion_postcopy_end_before_rp();
  1340. rp_error = await_return_path_close_on_source(s);
  1341. trace_migration_completion_postcopy_end_after_rp(rp_error);
  1342. if (rp_error) {
  1343. goto fail;
  1344. }
  1345. }
  1346. if (qemu_file_get_error(s->file)) {
  1347. trace_migration_completion_file_err();
  1348. goto fail;
  1349. }
  1350. migrate_set_state(s, current_active_state, MIGRATION_STATUS_COMPLETED);
  1351. return;
  1352. fail:
  1353. migrate_set_state(s, current_active_state, MIGRATION_STATUS_FAILED);
  1354. }
  1355. /*
  1356. * Master migration thread on the source VM.
  1357. * It drives the migration and pumps the data down the outgoing channel.
  1358. */
  1359. static void *migration_thread(void *opaque)
  1360. {
  1361. MigrationState *s = opaque;
  1362. /* Used by the bandwidth calcs, updated later */
  1363. int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  1364. int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
  1365. int64_t initial_bytes = 0;
  1366. int64_t max_size = 0;
  1367. int64_t start_time = initial_time;
  1368. int64_t end_time;
  1369. bool old_vm_running = false;
  1370. bool entered_postcopy = false;
  1371. /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
  1372. enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
  1373. rcu_register_thread();
  1374. qemu_savevm_state_header(s->file);
  1375. if (migrate_postcopy_ram()) {
  1376. /* Now tell the dest that it should open its end so it can reply */
  1377. qemu_savevm_send_open_return_path(s->file);
  1378. /* And do a ping that will make stuff easier to debug */
  1379. qemu_savevm_send_ping(s->file, 1);
  1380. /*
  1381. * Tell the destination that we *might* want to do postcopy later;
  1382. * if the other end can't do postcopy it should fail now, nice and
  1383. * early.
  1384. */
  1385. qemu_savevm_send_postcopy_advise(s->file);
  1386. }
  1387. qemu_savevm_state_begin(s->file, &s->params);
  1388. s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
  1389. current_active_state = MIGRATION_STATUS_ACTIVE;
  1390. migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE);
  1391. trace_migration_thread_setup_complete();
  1392. while (s->state == MIGRATION_STATUS_ACTIVE ||
  1393. s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
  1394. int64_t current_time;
  1395. uint64_t pending_size;
  1396. if (!qemu_file_rate_limit(s->file)) {
  1397. uint64_t pend_post, pend_nonpost;
  1398. qemu_savevm_state_pending(s->file, max_size, &pend_nonpost,
  1399. &pend_post);
  1400. pending_size = pend_nonpost + pend_post;
  1401. trace_migrate_pending(pending_size, max_size,
  1402. pend_post, pend_nonpost);
  1403. if (pending_size && pending_size >= max_size) {
  1404. /* Still a significant amount to transfer */
  1405. current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  1406. if (migrate_postcopy_ram() &&
  1407. s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
  1408. pend_nonpost <= max_size &&
  1409. atomic_read(&s->start_postcopy)) {
  1410. if (!postcopy_start(s, &old_vm_running)) {
  1411. current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
  1412. entered_postcopy = true;
  1413. }
  1414. continue;
  1415. }
  1416. /* Just another iteration step */
  1417. qemu_savevm_state_iterate(s->file, entered_postcopy);
  1418. } else {
  1419. trace_migration_thread_low_pending(pending_size);
  1420. migration_completion(s, current_active_state,
  1421. &old_vm_running, &start_time);
  1422. break;
  1423. }
  1424. }
  1425. if (qemu_file_get_error(s->file)) {
  1426. migrate_set_state(s, current_active_state, MIGRATION_STATUS_FAILED);
  1427. trace_migration_thread_file_err();
  1428. break;
  1429. }
  1430. current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  1431. if (current_time >= initial_time + BUFFER_DELAY) {
  1432. uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
  1433. uint64_t time_spent = current_time - initial_time;
  1434. double bandwidth = transferred_bytes / time_spent;
  1435. max_size = bandwidth * migrate_max_downtime() / 1000000;
  1436. s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
  1437. ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
  1438. trace_migrate_transferred(transferred_bytes, time_spent,
  1439. bandwidth, max_size);
  1440. /* if we haven't sent anything, we don't want to recalculate
  1441. 10000 is a small enough number for our purposes */
  1442. if (s->dirty_bytes_rate && transferred_bytes > 10000) {
  1443. s->expected_downtime = s->dirty_bytes_rate / bandwidth;
  1444. }
  1445. qemu_file_reset_rate_limit(s->file);
  1446. initial_time = current_time;
  1447. initial_bytes = qemu_ftell(s->file);
  1448. }
  1449. if (qemu_file_rate_limit(s->file)) {
  1450. /* usleep expects microseconds */
  1451. g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
  1452. }
  1453. }
  1454. trace_migration_thread_after_loop();
  1455. /* If we enabled cpu throttling for auto-converge, turn it off. */
  1456. cpu_throttle_stop();
  1457. end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  1458. qemu_mutex_lock_iothread();
  1459. qemu_savevm_state_cleanup();
  1460. if (s->state == MIGRATION_STATUS_COMPLETED) {
  1461. uint64_t transferred_bytes = qemu_ftell(s->file);
  1462. s->total_time = end_time - s->total_time;
  1463. if (!entered_postcopy) {
  1464. s->downtime = end_time - start_time;
  1465. }
  1466. if (s->total_time) {
  1467. s->mbps = (((double) transferred_bytes * 8.0) /
  1468. ((double) s->total_time)) / 1000;
  1469. }
  1470. runstate_set(RUN_STATE_POSTMIGRATE);
  1471. } else {
  1472. if (old_vm_running && !entered_postcopy) {
  1473. vm_start();
  1474. }
  1475. }
  1476. qemu_bh_schedule(s->cleanup_bh);
  1477. qemu_mutex_unlock_iothread();
  1478. rcu_unregister_thread();
  1479. return NULL;
  1480. }
  1481. void migrate_fd_connect(MigrationState *s)
  1482. {
  1483. /* This is a best 1st approximation. ns to ms */
  1484. s->expected_downtime = max_downtime/1000000;
  1485. s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
  1486. qemu_file_set_rate_limit(s->file,
  1487. s->bandwidth_limit / XFER_LIMIT_RATIO);
  1488. /* Notify before starting migration thread */
  1489. notifier_list_notify(&migration_state_notifiers, s);
  1490. /*
  1491. * Open the return path; currently for postcopy but other things might
  1492. * also want it.
  1493. */
  1494. if (migrate_postcopy_ram()) {
  1495. if (open_return_path_on_source(s)) {
  1496. error_report("Unable to open return-path for postcopy");
  1497. migrate_set_state(s, MIGRATION_STATUS_SETUP,
  1498. MIGRATION_STATUS_FAILED);
  1499. migrate_fd_cleanup(s);
  1500. return;
  1501. }
  1502. }
  1503. migrate_compress_threads_create();
  1504. qemu_thread_create(&s->thread, "migration", migration_thread, s,
  1505. QEMU_THREAD_JOINABLE);
  1506. s->migration_thread_running = true;
  1507. }
  1508. PostcopyState postcopy_state_get(void)
  1509. {
  1510. return atomic_mb_read(&incoming_postcopy_state);
  1511. }
  1512. /* Set the state and return the old state */
  1513. PostcopyState postcopy_state_set(PostcopyState new_state)
  1514. {
  1515. return atomic_xchg(&incoming_postcopy_state, new_state);
  1516. }