block-dirty-bitmap.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. * Block dirty bitmap postcopy migration
  3. *
  4. * Copyright IBM, Corp. 2009
  5. * Copyright (c) 2016-2017 Virtuozzo International GmbH. All rights reserved.
  6. *
  7. * Authors:
  8. * Liran Schour <lirans@il.ibm.com>
  9. * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2. See
  12. * the COPYING file in the top-level directory.
  13. * This file is derived from migration/block.c, so it's author and IBM copyright
  14. * are here, although content is quite different.
  15. *
  16. * Contributions after 2012-01-13 are licensed under the terms of the
  17. * GNU GPL, version 2 or (at your option) any later version.
  18. *
  19. * ***
  20. *
  21. * Here postcopy migration of dirty bitmaps is realized. Only QMP-addressable
  22. * bitmaps are migrated.
  23. *
  24. * Bitmap migration implies creating bitmap with the same name and granularity
  25. * in destination QEMU. If the bitmap with the same name (for the same node)
  26. * already exists on destination an error will be generated.
  27. *
  28. * format of migration:
  29. *
  30. * # Header (shared for different chunk types)
  31. * 1, 2 or 4 bytes: flags (see qemu_{put,put}_flags)
  32. * [ 1 byte: node alias size ] \ flags & DEVICE_NAME
  33. * [ n bytes: node alias ] /
  34. * [ 1 byte: bitmap alias size ] \ flags & BITMAP_NAME
  35. * [ n bytes: bitmap alias ] /
  36. *
  37. * # Start of bitmap migration (flags & START)
  38. * header
  39. * be64: granularity
  40. * 1 byte: bitmap flags (corresponds to BdrvDirtyBitmap)
  41. * bit 0 - bitmap is enabled
  42. * bit 1 - bitmap is persistent
  43. * bit 2 - bitmap is autoloading
  44. * bits 3-7 - reserved, must be zero
  45. *
  46. * # Complete of bitmap migration (flags & COMPLETE)
  47. * header
  48. *
  49. * # Data chunk of bitmap migration
  50. * header
  51. * be64: start sector
  52. * be32: number of sectors
  53. * [ be64: buffer size ] \ ! (flags & ZEROES)
  54. * [ n bytes: buffer ] /
  55. *
  56. * The last chunk in stream should contain flags & EOS. The chunk may skip
  57. * device and/or bitmap names, assuming them to be the same with the previous
  58. * chunk.
  59. */
  60. #include "qemu/osdep.h"
  61. #include "block/block.h"
  62. #include "block/block_int.h"
  63. #include "block/dirty-bitmap.h"
  64. #include "sysemu/block-backend.h"
  65. #include "sysemu/runstate.h"
  66. #include "qemu/main-loop.h"
  67. #include "qemu/error-report.h"
  68. #include "migration/misc.h"
  69. #include "migration/migration.h"
  70. #include "qemu-file.h"
  71. #include "migration/vmstate.h"
  72. #include "migration/register.h"
  73. #include "qemu/hbitmap.h"
  74. #include "qemu/cutils.h"
  75. #include "qemu/id.h"
  76. #include "qapi/error.h"
  77. #include "qapi/qapi-commands-migration.h"
  78. #include "qapi/qapi-visit-migration.h"
  79. #include "qapi/clone-visitor.h"
  80. #include "trace.h"
  81. #define CHUNK_SIZE (1 << 10)
  82. /* Flags occupy one, two or four bytes (Big Endian). The size is determined as
  83. * follows:
  84. * in first (most significant) byte bit 8 is clear --> one byte
  85. * in first byte bit 8 is set --> two or four bytes, depending on second
  86. * byte:
  87. * | in second byte bit 8 is clear --> two bytes
  88. * | in second byte bit 8 is set --> four bytes
  89. */
  90. #define DIRTY_BITMAP_MIG_FLAG_EOS 0x01
  91. #define DIRTY_BITMAP_MIG_FLAG_ZEROES 0x02
  92. #define DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME 0x04
  93. #define DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME 0x08
  94. #define DIRTY_BITMAP_MIG_FLAG_START 0x10
  95. #define DIRTY_BITMAP_MIG_FLAG_COMPLETE 0x20
  96. #define DIRTY_BITMAP_MIG_FLAG_BITS 0x40
  97. #define DIRTY_BITMAP_MIG_EXTRA_FLAGS 0x80
  98. #define DIRTY_BITMAP_MIG_START_FLAG_ENABLED 0x01
  99. #define DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT 0x02
  100. /* 0x04 was "AUTOLOAD" flags on older versions, now it is ignored */
  101. #define DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK 0xf8
  102. /* State of one bitmap during save process */
  103. typedef struct SaveBitmapState {
  104. /* Written during setup phase. */
  105. BlockDriverState *bs;
  106. char *node_alias;
  107. char *bitmap_alias;
  108. BdrvDirtyBitmap *bitmap;
  109. uint64_t total_sectors;
  110. uint64_t sectors_per_chunk;
  111. QSIMPLEQ_ENTRY(SaveBitmapState) entry;
  112. uint8_t flags;
  113. /* For bulk phase. */
  114. bool bulk_completed;
  115. uint64_t cur_sector;
  116. } SaveBitmapState;
  117. /* State of the dirty bitmap migration (DBM) during save process */
  118. typedef struct DBMSaveState {
  119. QSIMPLEQ_HEAD(, SaveBitmapState) dbms_list;
  120. bool bulk_completed;
  121. bool no_bitmaps;
  122. /* for send_bitmap_bits() */
  123. BlockDriverState *prev_bs;
  124. BdrvDirtyBitmap *prev_bitmap;
  125. } DBMSaveState;
  126. typedef struct LoadBitmapState {
  127. BlockDriverState *bs;
  128. BdrvDirtyBitmap *bitmap;
  129. bool migrated;
  130. bool enabled;
  131. } LoadBitmapState;
  132. /* State of the dirty bitmap migration (DBM) during load process */
  133. typedef struct DBMLoadState {
  134. uint32_t flags;
  135. char node_alias[256];
  136. char bitmap_alias[256];
  137. char bitmap_name[BDRV_BITMAP_MAX_NAME_SIZE + 1];
  138. BlockDriverState *bs;
  139. BdrvDirtyBitmap *bitmap;
  140. bool before_vm_start_handled; /* set in dirty_bitmap_mig_before_vm_start */
  141. BitmapMigrationBitmapAlias *bmap_inner;
  142. /*
  143. * cancelled
  144. * Incoming migration is cancelled for some reason. That means that we
  145. * still should read our chunks from migration stream, to not affect other
  146. * migration objects (like RAM), but just ignore them and do not touch any
  147. * bitmaps or nodes.
  148. */
  149. bool cancelled;
  150. GSList *bitmaps;
  151. QemuMutex lock; /* protect bitmaps */
  152. } DBMLoadState;
  153. typedef struct DBMState {
  154. DBMSaveState save;
  155. DBMLoadState load;
  156. } DBMState;
  157. static DBMState dbm_state;
  158. /* For hash tables that map node/bitmap names to aliases */
  159. typedef struct AliasMapInnerNode {
  160. char *string;
  161. GHashTable *subtree;
  162. } AliasMapInnerNode;
  163. static void free_alias_map_inner_node(void *amin_ptr)
  164. {
  165. AliasMapInnerNode *amin = amin_ptr;
  166. g_free(amin->string);
  167. g_hash_table_unref(amin->subtree);
  168. g_free(amin);
  169. }
  170. /**
  171. * Construct an alias map based on the given QMP structure.
  172. *
  173. * (Note that we cannot store such maps in the MigrationParameters
  174. * object, because that struct is defined by the QAPI schema, which
  175. * makes it basically impossible to have dicts with arbitrary keys.
  176. * Therefore, we instead have to construct these maps when migration
  177. * starts.)
  178. *
  179. * @bbm is the block_bitmap_mapping from the migration parameters.
  180. *
  181. * If @name_to_alias is true, the returned hash table will map node
  182. * and bitmap names to their respective aliases (for outgoing
  183. * migration).
  184. *
  185. * If @name_to_alias is false, the returned hash table will map node
  186. * and bitmap aliases to their respective names (for incoming
  187. * migration).
  188. *
  189. * The hash table maps node names/aliases to AliasMapInnerNode
  190. * objects, whose .string is the respective node alias/name, and whose
  191. * .subtree table maps bitmap names/aliases to the respective bitmap
  192. * alias/name.
  193. */
  194. static GHashTable *construct_alias_map(const BitmapMigrationNodeAliasList *bbm,
  195. bool name_to_alias,
  196. Error **errp)
  197. {
  198. GHashTable *alias_map;
  199. size_t max_node_name_len = sizeof_field(BlockDriverState, node_name) - 1;
  200. alias_map = g_hash_table_new_full(g_str_hash, g_str_equal,
  201. g_free, free_alias_map_inner_node);
  202. for (; bbm; bbm = bbm->next) {
  203. const BitmapMigrationNodeAlias *bmna = bbm->value;
  204. const BitmapMigrationBitmapAliasList *bmbal;
  205. AliasMapInnerNode *amin;
  206. GHashTable *bitmaps_map;
  207. const char *node_map_from, *node_map_to;
  208. GDestroyNotify gdn;
  209. if (!id_wellformed(bmna->alias)) {
  210. error_setg(errp, "The node alias '%s' is not well-formed",
  211. bmna->alias);
  212. goto fail;
  213. }
  214. if (strlen(bmna->alias) > UINT8_MAX) {
  215. error_setg(errp, "The node alias '%s' is longer than %u bytes",
  216. bmna->alias, UINT8_MAX);
  217. goto fail;
  218. }
  219. if (strlen(bmna->node_name) > max_node_name_len) {
  220. error_setg(errp, "The node name '%s' is longer than %zu bytes",
  221. bmna->node_name, max_node_name_len);
  222. goto fail;
  223. }
  224. if (name_to_alias) {
  225. if (g_hash_table_contains(alias_map, bmna->node_name)) {
  226. error_setg(errp, "The node name '%s' is mapped twice",
  227. bmna->node_name);
  228. goto fail;
  229. }
  230. node_map_from = bmna->node_name;
  231. node_map_to = bmna->alias;
  232. } else {
  233. if (g_hash_table_contains(alias_map, bmna->alias)) {
  234. error_setg(errp, "The node alias '%s' is used twice",
  235. bmna->alias);
  236. goto fail;
  237. }
  238. node_map_from = bmna->alias;
  239. node_map_to = bmna->node_name;
  240. }
  241. gdn = (GDestroyNotify) qapi_free_BitmapMigrationBitmapAlias;
  242. bitmaps_map = g_hash_table_new_full(g_str_hash, g_str_equal, g_free,
  243. gdn);
  244. amin = g_new(AliasMapInnerNode, 1);
  245. *amin = (AliasMapInnerNode){
  246. .string = g_strdup(node_map_to),
  247. .subtree = bitmaps_map,
  248. };
  249. g_hash_table_insert(alias_map, g_strdup(node_map_from), amin);
  250. for (bmbal = bmna->bitmaps; bmbal; bmbal = bmbal->next) {
  251. const BitmapMigrationBitmapAlias *bmba = bmbal->value;
  252. const char *bmap_map_from;
  253. if (strlen(bmba->alias) > UINT8_MAX) {
  254. error_setg(errp,
  255. "The bitmap alias '%s' is longer than %u bytes",
  256. bmba->alias, UINT8_MAX);
  257. goto fail;
  258. }
  259. if (strlen(bmba->name) > BDRV_BITMAP_MAX_NAME_SIZE) {
  260. error_setg(errp, "The bitmap name '%s' is longer than %d bytes",
  261. bmba->name, BDRV_BITMAP_MAX_NAME_SIZE);
  262. goto fail;
  263. }
  264. if (name_to_alias) {
  265. bmap_map_from = bmba->name;
  266. if (g_hash_table_contains(bitmaps_map, bmba->name)) {
  267. error_setg(errp, "The bitmap '%s'/'%s' is mapped twice",
  268. bmna->node_name, bmba->name);
  269. goto fail;
  270. }
  271. } else {
  272. bmap_map_from = bmba->alias;
  273. if (g_hash_table_contains(bitmaps_map, bmba->alias)) {
  274. error_setg(errp, "The bitmap alias '%s'/'%s' is used twice",
  275. bmna->alias, bmba->alias);
  276. goto fail;
  277. }
  278. }
  279. g_hash_table_insert(bitmaps_map, g_strdup(bmap_map_from),
  280. QAPI_CLONE(BitmapMigrationBitmapAlias, bmba));
  281. }
  282. }
  283. return alias_map;
  284. fail:
  285. g_hash_table_destroy(alias_map);
  286. return NULL;
  287. }
  288. /**
  289. * Run construct_alias_map() in both directions to check whether @bbm
  290. * is valid.
  291. * (This function is to be used by migration/migration.c to validate
  292. * the user-specified block-bitmap-mapping migration parameter.)
  293. *
  294. * Returns true if and only if the mapping is valid.
  295. */
  296. bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
  297. Error **errp)
  298. {
  299. GHashTable *alias_map;
  300. alias_map = construct_alias_map(bbm, true, errp);
  301. if (!alias_map) {
  302. return false;
  303. }
  304. g_hash_table_destroy(alias_map);
  305. alias_map = construct_alias_map(bbm, false, errp);
  306. if (!alias_map) {
  307. return false;
  308. }
  309. g_hash_table_destroy(alias_map);
  310. return true;
  311. }
  312. static uint32_t qemu_get_bitmap_flags(QEMUFile *f)
  313. {
  314. uint8_t flags = qemu_get_byte(f);
  315. if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
  316. flags = flags << 8 | qemu_get_byte(f);
  317. if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
  318. flags = flags << 16 | qemu_get_be16(f);
  319. }
  320. }
  321. return flags;
  322. }
  323. static void qemu_put_bitmap_flags(QEMUFile *f, uint32_t flags)
  324. {
  325. /* The code currently does not send flags as more than one byte */
  326. assert(!(flags & (0xffffff00 | DIRTY_BITMAP_MIG_EXTRA_FLAGS)));
  327. qemu_put_byte(f, flags);
  328. }
  329. static void send_bitmap_header(QEMUFile *f, DBMSaveState *s,
  330. SaveBitmapState *dbms, uint32_t additional_flags)
  331. {
  332. BlockDriverState *bs = dbms->bs;
  333. BdrvDirtyBitmap *bitmap = dbms->bitmap;
  334. uint32_t flags = additional_flags;
  335. trace_send_bitmap_header_enter();
  336. if (bs != s->prev_bs) {
  337. s->prev_bs = bs;
  338. flags |= DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME;
  339. }
  340. if (bitmap != s->prev_bitmap) {
  341. s->prev_bitmap = bitmap;
  342. flags |= DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME;
  343. }
  344. qemu_put_bitmap_flags(f, flags);
  345. if (flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
  346. qemu_put_counted_string(f, dbms->node_alias);
  347. }
  348. if (flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
  349. qemu_put_counted_string(f, dbms->bitmap_alias);
  350. }
  351. }
  352. static void send_bitmap_start(QEMUFile *f, DBMSaveState *s,
  353. SaveBitmapState *dbms)
  354. {
  355. send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_START);
  356. qemu_put_be32(f, bdrv_dirty_bitmap_granularity(dbms->bitmap));
  357. qemu_put_byte(f, dbms->flags);
  358. }
  359. static void send_bitmap_complete(QEMUFile *f, DBMSaveState *s,
  360. SaveBitmapState *dbms)
  361. {
  362. send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_COMPLETE);
  363. }
  364. static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
  365. SaveBitmapState *dbms,
  366. uint64_t start_sector, uint32_t nr_sectors)
  367. {
  368. /* align for buffer_is_zero() */
  369. uint64_t align = 4 * sizeof(long);
  370. uint64_t unaligned_size =
  371. bdrv_dirty_bitmap_serialization_size(
  372. dbms->bitmap, start_sector << BDRV_SECTOR_BITS,
  373. (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
  374. uint64_t buf_size = QEMU_ALIGN_UP(unaligned_size, align);
  375. uint8_t *buf = g_malloc0(buf_size);
  376. uint32_t flags = DIRTY_BITMAP_MIG_FLAG_BITS;
  377. bdrv_dirty_bitmap_serialize_part(
  378. dbms->bitmap, buf, start_sector << BDRV_SECTOR_BITS,
  379. (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
  380. if (buffer_is_zero(buf, buf_size)) {
  381. g_free(buf);
  382. buf = NULL;
  383. flags |= DIRTY_BITMAP_MIG_FLAG_ZEROES;
  384. }
  385. trace_send_bitmap_bits(flags, start_sector, nr_sectors, buf_size);
  386. send_bitmap_header(f, s, dbms, flags);
  387. qemu_put_be64(f, start_sector);
  388. qemu_put_be32(f, nr_sectors);
  389. /* if a block is zero we need to flush here since the network
  390. * bandwidth is now a lot higher than the storage device bandwidth.
  391. * thus if we queue zero blocks we slow down the migration. */
  392. if (flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
  393. qemu_fflush(f);
  394. } else {
  395. qemu_put_be64(f, buf_size);
  396. qemu_put_buffer(f, buf, buf_size);
  397. }
  398. g_free(buf);
  399. }
  400. /* Called with iothread lock taken. */
  401. static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
  402. {
  403. SaveBitmapState *dbms;
  404. while ((dbms = QSIMPLEQ_FIRST(&s->dbms_list)) != NULL) {
  405. QSIMPLEQ_REMOVE_HEAD(&s->dbms_list, entry);
  406. bdrv_dirty_bitmap_set_busy(dbms->bitmap, false);
  407. bdrv_unref(dbms->bs);
  408. g_free(dbms->node_alias);
  409. g_free(dbms->bitmap_alias);
  410. g_free(dbms);
  411. }
  412. }
  413. /* Called with iothread lock taken. */
  414. static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
  415. const char *bs_name, GHashTable *alias_map)
  416. {
  417. BdrvDirtyBitmap *bitmap;
  418. SaveBitmapState *dbms;
  419. GHashTable *bitmap_aliases;
  420. const char *node_alias, *bitmap_name, *bitmap_alias;
  421. Error *local_err = NULL;
  422. /* When an alias map is given, @bs_name must be @bs's node name */
  423. assert(!alias_map || !strcmp(bs_name, bdrv_get_node_name(bs)));
  424. FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
  425. if (bdrv_dirty_bitmap_name(bitmap)) {
  426. break;
  427. }
  428. }
  429. if (!bitmap) {
  430. return 0;
  431. }
  432. bitmap_name = bdrv_dirty_bitmap_name(bitmap);
  433. if (!bs_name || strcmp(bs_name, "") == 0) {
  434. error_report("Bitmap '%s' in unnamed node can't be migrated",
  435. bitmap_name);
  436. return -1;
  437. }
  438. if (alias_map) {
  439. const AliasMapInnerNode *amin = g_hash_table_lookup(alias_map, bs_name);
  440. if (!amin) {
  441. /* Skip bitmaps on nodes with no alias */
  442. return 0;
  443. }
  444. node_alias = amin->string;
  445. bitmap_aliases = amin->subtree;
  446. } else {
  447. node_alias = bs_name;
  448. bitmap_aliases = NULL;
  449. }
  450. if (node_alias[0] == '#') {
  451. error_report("Bitmap '%s' in a node with auto-generated "
  452. "name '%s' can't be migrated",
  453. bitmap_name, node_alias);
  454. return -1;
  455. }
  456. FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
  457. BitmapMigrationBitmapAliasTransform *bitmap_transform = NULL;
  458. bitmap_name = bdrv_dirty_bitmap_name(bitmap);
  459. if (!bitmap_name) {
  460. continue;
  461. }
  462. if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_DEFAULT, &local_err)) {
  463. error_report_err(local_err);
  464. return -1;
  465. }
  466. if (bitmap_aliases) {
  467. BitmapMigrationBitmapAlias *bmap_inner;
  468. bmap_inner = g_hash_table_lookup(bitmap_aliases, bitmap_name);
  469. if (!bmap_inner) {
  470. /* Skip bitmaps with no alias */
  471. continue;
  472. }
  473. bitmap_alias = bmap_inner->alias;
  474. if (bmap_inner->transform) {
  475. bitmap_transform = bmap_inner->transform;
  476. }
  477. } else {
  478. if (strlen(bitmap_name) > UINT8_MAX) {
  479. error_report("Cannot migrate bitmap '%s' on node '%s': "
  480. "Name is longer than %u bytes",
  481. bitmap_name, bs_name, UINT8_MAX);
  482. return -1;
  483. }
  484. bitmap_alias = bitmap_name;
  485. }
  486. bdrv_ref(bs);
  487. bdrv_dirty_bitmap_set_busy(bitmap, true);
  488. dbms = g_new0(SaveBitmapState, 1);
  489. dbms->bs = bs;
  490. dbms->node_alias = g_strdup(node_alias);
  491. dbms->bitmap_alias = g_strdup(bitmap_alias);
  492. dbms->bitmap = bitmap;
  493. dbms->total_sectors = bdrv_nb_sectors(bs);
  494. dbms->sectors_per_chunk = CHUNK_SIZE * 8LLU *
  495. (bdrv_dirty_bitmap_granularity(bitmap) >> BDRV_SECTOR_BITS);
  496. assert(dbms->sectors_per_chunk != 0);
  497. if (bdrv_dirty_bitmap_enabled(bitmap)) {
  498. dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
  499. }
  500. if (bitmap_transform &&
  501. bitmap_transform->has_persistent) {
  502. if (bitmap_transform->persistent) {
  503. dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
  504. }
  505. } else {
  506. if (bdrv_dirty_bitmap_get_persistence(bitmap)) {
  507. dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
  508. }
  509. }
  510. QSIMPLEQ_INSERT_TAIL(&s->dbms_list, dbms, entry);
  511. }
  512. return 0;
  513. }
  514. /* Called with iothread lock taken. */
  515. static int init_dirty_bitmap_migration(DBMSaveState *s)
  516. {
  517. BlockDriverState *bs;
  518. SaveBitmapState *dbms;
  519. GHashTable *handled_by_blk = g_hash_table_new(NULL, NULL);
  520. BlockBackend *blk;
  521. const MigrationParameters *mig_params = &migrate_get_current()->parameters;
  522. GHashTable *alias_map = NULL;
  523. if (mig_params->has_block_bitmap_mapping) {
  524. alias_map = construct_alias_map(mig_params->block_bitmap_mapping, true,
  525. &error_abort);
  526. }
  527. s->bulk_completed = false;
  528. s->prev_bs = NULL;
  529. s->prev_bitmap = NULL;
  530. s->no_bitmaps = false;
  531. if (!alias_map) {
  532. /*
  533. * Use blockdevice name for direct (or filtered) children of named block
  534. * backends.
  535. */
  536. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  537. const char *name = blk_name(blk);
  538. if (!name || strcmp(name, "") == 0) {
  539. continue;
  540. }
  541. bs = blk_bs(blk);
  542. /* Skip filters without bitmaps */
  543. while (bs && bs->drv && bs->drv->is_filter &&
  544. !bdrv_has_named_bitmaps(bs))
  545. {
  546. bs = bdrv_filter_bs(bs);
  547. }
  548. if (bs && bs->drv && !bs->drv->is_filter) {
  549. if (add_bitmaps_to_list(s, bs, name, NULL)) {
  550. goto fail;
  551. }
  552. g_hash_table_add(handled_by_blk, bs);
  553. }
  554. }
  555. }
  556. for (bs = bdrv_next_all_states(NULL); bs; bs = bdrv_next_all_states(bs)) {
  557. if (g_hash_table_contains(handled_by_blk, bs)) {
  558. continue;
  559. }
  560. if (add_bitmaps_to_list(s, bs, bdrv_get_node_name(bs), alias_map)) {
  561. goto fail;
  562. }
  563. }
  564. /* unset migration flags here, to not roll back it */
  565. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  566. bdrv_dirty_bitmap_skip_store(dbms->bitmap, true);
  567. }
  568. if (QSIMPLEQ_EMPTY(&s->dbms_list)) {
  569. s->no_bitmaps = true;
  570. }
  571. g_hash_table_destroy(handled_by_blk);
  572. if (alias_map) {
  573. g_hash_table_destroy(alias_map);
  574. }
  575. return 0;
  576. fail:
  577. g_hash_table_destroy(handled_by_blk);
  578. if (alias_map) {
  579. g_hash_table_destroy(alias_map);
  580. }
  581. dirty_bitmap_do_save_cleanup(s);
  582. return -1;
  583. }
  584. /* Called with no lock taken. */
  585. static void bulk_phase_send_chunk(QEMUFile *f, DBMSaveState *s,
  586. SaveBitmapState *dbms)
  587. {
  588. uint32_t nr_sectors = MIN(dbms->total_sectors - dbms->cur_sector,
  589. dbms->sectors_per_chunk);
  590. send_bitmap_bits(f, s, dbms, dbms->cur_sector, nr_sectors);
  591. dbms->cur_sector += nr_sectors;
  592. if (dbms->cur_sector >= dbms->total_sectors) {
  593. dbms->bulk_completed = true;
  594. }
  595. }
  596. /* Called with no lock taken. */
  597. static void bulk_phase(QEMUFile *f, DBMSaveState *s, bool limit)
  598. {
  599. SaveBitmapState *dbms;
  600. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  601. while (!dbms->bulk_completed) {
  602. bulk_phase_send_chunk(f, s, dbms);
  603. if (limit && qemu_file_rate_limit(f)) {
  604. return;
  605. }
  606. }
  607. }
  608. s->bulk_completed = true;
  609. }
  610. /* for SaveVMHandlers */
  611. static void dirty_bitmap_save_cleanup(void *opaque)
  612. {
  613. DBMSaveState *s = &((DBMState *)opaque)->save;
  614. dirty_bitmap_do_save_cleanup(s);
  615. }
  616. static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
  617. {
  618. DBMSaveState *s = &((DBMState *)opaque)->save;
  619. trace_dirty_bitmap_save_iterate(migration_in_postcopy());
  620. if (migration_in_postcopy() && !s->bulk_completed) {
  621. bulk_phase(f, s, true);
  622. }
  623. qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
  624. return s->bulk_completed;
  625. }
  626. /* Called with iothread lock taken. */
  627. static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
  628. {
  629. DBMSaveState *s = &((DBMState *)opaque)->save;
  630. SaveBitmapState *dbms;
  631. trace_dirty_bitmap_save_complete_enter();
  632. if (!s->bulk_completed) {
  633. bulk_phase(f, s, false);
  634. }
  635. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  636. send_bitmap_complete(f, s, dbms);
  637. }
  638. qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
  639. trace_dirty_bitmap_save_complete_finish();
  640. dirty_bitmap_save_cleanup(opaque);
  641. return 0;
  642. }
  643. static void dirty_bitmap_state_pending(void *opaque,
  644. uint64_t *must_precopy,
  645. uint64_t *can_postcopy)
  646. {
  647. DBMSaveState *s = &((DBMState *)opaque)->save;
  648. SaveBitmapState *dbms;
  649. uint64_t pending = 0;
  650. qemu_mutex_lock_iothread();
  651. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  652. uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
  653. uint64_t sectors = dbms->bulk_completed ? 0 :
  654. dbms->total_sectors - dbms->cur_sector;
  655. pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
  656. }
  657. qemu_mutex_unlock_iothread();
  658. trace_dirty_bitmap_state_pending(pending);
  659. *can_postcopy += pending;
  660. }
  661. /* First occurrence of this bitmap. It should be created if doesn't exist */
  662. static int dirty_bitmap_load_start(QEMUFile *f, DBMLoadState *s)
  663. {
  664. Error *local_err = NULL;
  665. uint32_t granularity = qemu_get_be32(f);
  666. uint8_t flags = qemu_get_byte(f);
  667. LoadBitmapState *b;
  668. bool persistent;
  669. if (s->cancelled) {
  670. return 0;
  671. }
  672. if (s->bitmap) {
  673. error_report("Bitmap with the same name ('%s') already exists on "
  674. "destination", bdrv_dirty_bitmap_name(s->bitmap));
  675. return -EINVAL;
  676. } else {
  677. s->bitmap = bdrv_create_dirty_bitmap(s->bs, granularity,
  678. s->bitmap_name, &local_err);
  679. if (!s->bitmap) {
  680. error_report_err(local_err);
  681. return -EINVAL;
  682. }
  683. }
  684. if (flags & DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK) {
  685. error_report("Unknown flags in migrated dirty bitmap header: %x",
  686. flags);
  687. return -EINVAL;
  688. }
  689. if (s->bmap_inner &&
  690. s->bmap_inner->transform &&
  691. s->bmap_inner->transform->has_persistent) {
  692. persistent = s->bmap_inner->transform->persistent;
  693. } else {
  694. persistent = flags & DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
  695. }
  696. if (persistent) {
  697. bdrv_dirty_bitmap_set_persistence(s->bitmap, true);
  698. }
  699. bdrv_disable_dirty_bitmap(s->bitmap);
  700. if (flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED) {
  701. bdrv_dirty_bitmap_create_successor(s->bitmap, &local_err);
  702. if (local_err) {
  703. error_report_err(local_err);
  704. return -EINVAL;
  705. }
  706. } else {
  707. bdrv_dirty_bitmap_set_busy(s->bitmap, true);
  708. }
  709. b = g_new(LoadBitmapState, 1);
  710. b->bs = s->bs;
  711. b->bitmap = s->bitmap;
  712. b->migrated = false;
  713. b->enabled = flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
  714. s->bitmaps = g_slist_prepend(s->bitmaps, b);
  715. return 0;
  716. }
  717. /*
  718. * before_vm_start_handle_item
  719. *
  720. * g_slist_foreach helper
  721. *
  722. * item is LoadBitmapState*
  723. * opaque is DBMLoadState*
  724. */
  725. static void before_vm_start_handle_item(void *item, void *opaque)
  726. {
  727. DBMLoadState *s = opaque;
  728. LoadBitmapState *b = item;
  729. if (b->enabled) {
  730. if (b->migrated) {
  731. bdrv_enable_dirty_bitmap(b->bitmap);
  732. } else {
  733. bdrv_dirty_bitmap_enable_successor(b->bitmap);
  734. }
  735. }
  736. if (b->migrated) {
  737. s->bitmaps = g_slist_remove(s->bitmaps, b);
  738. g_free(b);
  739. }
  740. }
  741. void dirty_bitmap_mig_before_vm_start(void)
  742. {
  743. DBMLoadState *s = &dbm_state.load;
  744. qemu_mutex_lock(&s->lock);
  745. assert(!s->before_vm_start_handled);
  746. g_slist_foreach(s->bitmaps, before_vm_start_handle_item, s);
  747. s->before_vm_start_handled = true;
  748. qemu_mutex_unlock(&s->lock);
  749. }
  750. static void cancel_incoming_locked(DBMLoadState *s)
  751. {
  752. GSList *item;
  753. if (s->cancelled) {
  754. return;
  755. }
  756. s->cancelled = true;
  757. s->bs = NULL;
  758. s->bitmap = NULL;
  759. /* Drop all unfinished bitmaps */
  760. for (item = s->bitmaps; item; item = g_slist_next(item)) {
  761. LoadBitmapState *b = item->data;
  762. /*
  763. * Bitmap must be unfinished, as finished bitmaps should already be
  764. * removed from the list.
  765. */
  766. assert(!s->before_vm_start_handled || !b->migrated);
  767. if (bdrv_dirty_bitmap_has_successor(b->bitmap)) {
  768. bdrv_reclaim_dirty_bitmap(b->bitmap, &error_abort);
  769. } else {
  770. bdrv_dirty_bitmap_set_busy(b->bitmap, false);
  771. }
  772. bdrv_release_dirty_bitmap(b->bitmap);
  773. }
  774. g_slist_free_full(s->bitmaps, g_free);
  775. s->bitmaps = NULL;
  776. }
  777. void dirty_bitmap_mig_cancel_outgoing(void)
  778. {
  779. dirty_bitmap_do_save_cleanup(&dbm_state.save);
  780. }
  781. void dirty_bitmap_mig_cancel_incoming(void)
  782. {
  783. DBMLoadState *s = &dbm_state.load;
  784. qemu_mutex_lock(&s->lock);
  785. cancel_incoming_locked(s);
  786. qemu_mutex_unlock(&s->lock);
  787. }
  788. static void dirty_bitmap_load_complete(QEMUFile *f, DBMLoadState *s)
  789. {
  790. GSList *item;
  791. trace_dirty_bitmap_load_complete();
  792. if (s->cancelled) {
  793. return;
  794. }
  795. bdrv_dirty_bitmap_deserialize_finish(s->bitmap);
  796. if (bdrv_dirty_bitmap_has_successor(s->bitmap)) {
  797. bdrv_reclaim_dirty_bitmap(s->bitmap, &error_abort);
  798. } else {
  799. bdrv_dirty_bitmap_set_busy(s->bitmap, false);
  800. }
  801. for (item = s->bitmaps; item; item = g_slist_next(item)) {
  802. LoadBitmapState *b = item->data;
  803. if (b->bitmap == s->bitmap) {
  804. b->migrated = true;
  805. if (s->before_vm_start_handled) {
  806. s->bitmaps = g_slist_remove(s->bitmaps, b);
  807. g_free(b);
  808. }
  809. break;
  810. }
  811. }
  812. }
  813. static int dirty_bitmap_load_bits(QEMUFile *f, DBMLoadState *s)
  814. {
  815. uint64_t first_byte = qemu_get_be64(f) << BDRV_SECTOR_BITS;
  816. uint64_t nr_bytes = (uint64_t)qemu_get_be32(f) << BDRV_SECTOR_BITS;
  817. trace_dirty_bitmap_load_bits_enter(first_byte >> BDRV_SECTOR_BITS,
  818. nr_bytes >> BDRV_SECTOR_BITS);
  819. if (s->flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
  820. trace_dirty_bitmap_load_bits_zeroes();
  821. if (!s->cancelled) {
  822. bdrv_dirty_bitmap_deserialize_zeroes(s->bitmap, first_byte,
  823. nr_bytes, false);
  824. }
  825. } else {
  826. size_t ret;
  827. g_autofree uint8_t *buf = NULL;
  828. uint64_t buf_size = qemu_get_be64(f);
  829. uint64_t needed_size;
  830. /*
  831. * The actual check for buf_size is done a bit later. We can't do it in
  832. * cancelled mode as we don't have the bitmap to check the constraints
  833. * (so, we allocate a buffer and read prior to the check). On the other
  834. * hand, we shouldn't blindly g_malloc the number from the stream.
  835. * Actually one chunk should not be larger than CHUNK_SIZE. Let's allow
  836. * a bit larger (which means that bitmap migration will fail anyway and
  837. * the whole migration will most probably fail soon due to broken
  838. * stream).
  839. */
  840. if (buf_size > 10 * CHUNK_SIZE) {
  841. error_report("Bitmap migration stream buffer allocation request "
  842. "is too large");
  843. return -EIO;
  844. }
  845. buf = g_malloc(buf_size);
  846. ret = qemu_get_buffer(f, buf, buf_size);
  847. if (ret != buf_size) {
  848. error_report("Failed to read bitmap bits");
  849. return -EIO;
  850. }
  851. if (s->cancelled) {
  852. return 0;
  853. }
  854. needed_size = bdrv_dirty_bitmap_serialization_size(s->bitmap,
  855. first_byte,
  856. nr_bytes);
  857. if (needed_size > buf_size ||
  858. buf_size > QEMU_ALIGN_UP(needed_size, 4 * sizeof(long))
  859. /* Here used same alignment as in send_bitmap_bits */
  860. ) {
  861. error_report("Migrated bitmap granularity doesn't "
  862. "match the destination bitmap '%s' granularity",
  863. bdrv_dirty_bitmap_name(s->bitmap));
  864. cancel_incoming_locked(s);
  865. return 0;
  866. }
  867. bdrv_dirty_bitmap_deserialize_part(s->bitmap, buf, first_byte, nr_bytes,
  868. false);
  869. }
  870. return 0;
  871. }
  872. static int dirty_bitmap_load_header(QEMUFile *f, DBMLoadState *s,
  873. GHashTable *alias_map)
  874. {
  875. GHashTable *bitmap_alias_map = NULL;
  876. Error *local_err = NULL;
  877. bool nothing;
  878. s->flags = qemu_get_bitmap_flags(f);
  879. trace_dirty_bitmap_load_header(s->flags);
  880. nothing = s->flags == (s->flags & DIRTY_BITMAP_MIG_FLAG_EOS);
  881. if (s->flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
  882. if (!qemu_get_counted_string(f, s->node_alias)) {
  883. error_report("Unable to read node alias string");
  884. return -EINVAL;
  885. }
  886. if (!s->cancelled) {
  887. if (alias_map) {
  888. const AliasMapInnerNode *amin;
  889. amin = g_hash_table_lookup(alias_map, s->node_alias);
  890. if (!amin) {
  891. error_setg(&local_err, "Error: Unknown node alias '%s'",
  892. s->node_alias);
  893. s->bs = NULL;
  894. } else {
  895. bitmap_alias_map = amin->subtree;
  896. s->bs = bdrv_lookup_bs(NULL, amin->string, &local_err);
  897. }
  898. } else {
  899. s->bs = bdrv_lookup_bs(s->node_alias, s->node_alias,
  900. &local_err);
  901. }
  902. if (!s->bs) {
  903. error_report_err(local_err);
  904. cancel_incoming_locked(s);
  905. }
  906. }
  907. } else if (s->bs) {
  908. if (alias_map) {
  909. const AliasMapInnerNode *amin;
  910. /* Must be present in the map, or s->bs would not be set */
  911. amin = g_hash_table_lookup(alias_map, s->node_alias);
  912. assert(amin != NULL);
  913. bitmap_alias_map = amin->subtree;
  914. }
  915. } else if (!nothing && !s->cancelled) {
  916. error_report("Error: block device name is not set");
  917. cancel_incoming_locked(s);
  918. }
  919. assert(nothing || s->cancelled || !!alias_map == !!bitmap_alias_map);
  920. if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
  921. const char *bitmap_name;
  922. if (!qemu_get_counted_string(f, s->bitmap_alias)) {
  923. error_report("Unable to read bitmap alias string");
  924. return -EINVAL;
  925. }
  926. bitmap_name = s->bitmap_alias;
  927. if (!s->cancelled && bitmap_alias_map) {
  928. BitmapMigrationBitmapAlias *bmap_inner;
  929. bmap_inner = g_hash_table_lookup(bitmap_alias_map, s->bitmap_alias);
  930. if (!bmap_inner) {
  931. error_report("Error: Unknown bitmap alias '%s' on node "
  932. "'%s' (alias '%s')", s->bitmap_alias,
  933. s->bs->node_name, s->node_alias);
  934. cancel_incoming_locked(s);
  935. } else {
  936. bitmap_name = bmap_inner->name;
  937. }
  938. s->bmap_inner = bmap_inner;
  939. }
  940. if (!s->cancelled) {
  941. g_strlcpy(s->bitmap_name, bitmap_name, sizeof(s->bitmap_name));
  942. s->bitmap = bdrv_find_dirty_bitmap(s->bs, s->bitmap_name);
  943. /*
  944. * bitmap may be NULL here, it wouldn't be an error if it is the
  945. * first occurrence of the bitmap
  946. */
  947. if (!s->bitmap && !(s->flags & DIRTY_BITMAP_MIG_FLAG_START)) {
  948. error_report("Error: unknown dirty bitmap "
  949. "'%s' for block device '%s'",
  950. s->bitmap_name, s->bs->node_name);
  951. cancel_incoming_locked(s);
  952. }
  953. }
  954. } else if (!s->bitmap && !nothing && !s->cancelled) {
  955. error_report("Error: block device name is not set");
  956. cancel_incoming_locked(s);
  957. }
  958. return 0;
  959. }
  960. /*
  961. * dirty_bitmap_load
  962. *
  963. * Load sequence of dirty bitmap chunks. Return error only on fatal io stream
  964. * violations. On other errors just cancel bitmaps incoming migration and return
  965. * 0.
  966. *
  967. * Note, than when incoming bitmap migration is canceled, we still must read all
  968. * our chunks (and just ignore them), to not affect other migration objects.
  969. */
  970. static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id)
  971. {
  972. GHashTable *alias_map = NULL;
  973. const MigrationParameters *mig_params = &migrate_get_current()->parameters;
  974. DBMLoadState *s = &((DBMState *)opaque)->load;
  975. int ret = 0;
  976. trace_dirty_bitmap_load_enter();
  977. if (version_id != 1) {
  978. QEMU_LOCK_GUARD(&s->lock);
  979. cancel_incoming_locked(s);
  980. return -EINVAL;
  981. }
  982. if (mig_params->has_block_bitmap_mapping) {
  983. alias_map = construct_alias_map(mig_params->block_bitmap_mapping,
  984. false, &error_abort);
  985. }
  986. do {
  987. QEMU_LOCK_GUARD(&s->lock);
  988. ret = dirty_bitmap_load_header(f, s, alias_map);
  989. if (ret < 0) {
  990. cancel_incoming_locked(s);
  991. goto fail;
  992. }
  993. if (s->flags & DIRTY_BITMAP_MIG_FLAG_START) {
  994. ret = dirty_bitmap_load_start(f, s);
  995. } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_COMPLETE) {
  996. dirty_bitmap_load_complete(f, s);
  997. } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITS) {
  998. ret = dirty_bitmap_load_bits(f, s);
  999. }
  1000. if (!ret) {
  1001. ret = qemu_file_get_error(f);
  1002. }
  1003. if (ret) {
  1004. cancel_incoming_locked(s);
  1005. goto fail;
  1006. }
  1007. } while (!(s->flags & DIRTY_BITMAP_MIG_FLAG_EOS));
  1008. trace_dirty_bitmap_load_success();
  1009. ret = 0;
  1010. fail:
  1011. if (alias_map) {
  1012. g_hash_table_destroy(alias_map);
  1013. }
  1014. return ret;
  1015. }
  1016. static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque)
  1017. {
  1018. DBMSaveState *s = &((DBMState *)opaque)->save;
  1019. SaveBitmapState *dbms = NULL;
  1020. qemu_mutex_lock_iothread();
  1021. if (init_dirty_bitmap_migration(s) < 0) {
  1022. qemu_mutex_unlock_iothread();
  1023. return -1;
  1024. }
  1025. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  1026. send_bitmap_start(f, s, dbms);
  1027. }
  1028. qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
  1029. qemu_mutex_unlock_iothread();
  1030. return 0;
  1031. }
  1032. static bool dirty_bitmap_is_active(void *opaque)
  1033. {
  1034. DBMSaveState *s = &((DBMState *)opaque)->save;
  1035. return migrate_dirty_bitmaps() && !s->no_bitmaps;
  1036. }
  1037. static bool dirty_bitmap_is_active_iterate(void *opaque)
  1038. {
  1039. return dirty_bitmap_is_active(opaque) && !runstate_is_running();
  1040. }
  1041. static bool dirty_bitmap_has_postcopy(void *opaque)
  1042. {
  1043. return true;
  1044. }
  1045. static SaveVMHandlers savevm_dirty_bitmap_handlers = {
  1046. .save_setup = dirty_bitmap_save_setup,
  1047. .save_live_complete_postcopy = dirty_bitmap_save_complete,
  1048. .save_live_complete_precopy = dirty_bitmap_save_complete,
  1049. .has_postcopy = dirty_bitmap_has_postcopy,
  1050. .state_pending_exact = dirty_bitmap_state_pending,
  1051. .state_pending_estimate = dirty_bitmap_state_pending,
  1052. .save_live_iterate = dirty_bitmap_save_iterate,
  1053. .is_active_iterate = dirty_bitmap_is_active_iterate,
  1054. .load_state = dirty_bitmap_load,
  1055. .save_cleanup = dirty_bitmap_save_cleanup,
  1056. .is_active = dirty_bitmap_is_active,
  1057. };
  1058. void dirty_bitmap_mig_init(void)
  1059. {
  1060. QSIMPLEQ_INIT(&dbm_state.save.dbms_list);
  1061. qemu_mutex_init(&dbm_state.load.lock);
  1062. register_savevm_live("dirty-bitmap", 0, 1,
  1063. &savevm_dirty_bitmap_handlers,
  1064. &dbm_state);
  1065. }