2
0

block-dirty-bitmap.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232
  1. /*
  2. * Block dirty bitmap postcopy migration
  3. *
  4. * Copyright IBM, Corp. 2009
  5. * Copyright (c) 2016-2017 Virtuozzo International GmbH. All rights reserved.
  6. *
  7. * Authors:
  8. * Liran Schour <lirans@il.ibm.com>
  9. * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2. See
  12. * the COPYING file in the top-level directory.
  13. * This file is derived from migration/block.c, so it's author and IBM copyright
  14. * are here, although content is quite different.
  15. *
  16. * Contributions after 2012-01-13 are licensed under the terms of the
  17. * GNU GPL, version 2 or (at your option) any later version.
  18. *
  19. * ***
  20. *
  21. * Here postcopy migration of dirty bitmaps is realized. Only QMP-addressable
  22. * bitmaps are migrated.
  23. *
  24. * Bitmap migration implies creating bitmap with the same name and granularity
  25. * in destination QEMU. If the bitmap with the same name (for the same node)
  26. * already exists on destination an error will be generated.
  27. *
  28. * format of migration:
  29. *
  30. * # Header (shared for different chunk types)
  31. * 1, 2 or 4 bytes: flags (see qemu_{put,put}_flags)
  32. * [ 1 byte: node alias size ] \ flags & DEVICE_NAME
  33. * [ n bytes: node alias ] /
  34. * [ 1 byte: bitmap alias size ] \ flags & BITMAP_NAME
  35. * [ n bytes: bitmap alias ] /
  36. *
  37. * # Start of bitmap migration (flags & START)
  38. * header
  39. * be64: granularity
  40. * 1 byte: bitmap flags (corresponds to BdrvDirtyBitmap)
  41. * bit 0 - bitmap is enabled
  42. * bit 1 - bitmap is persistent
  43. * bit 2 - bitmap is autoloading
  44. * bits 3-7 - reserved, must be zero
  45. *
  46. * # Complete of bitmap migration (flags & COMPLETE)
  47. * header
  48. *
  49. * # Data chunk of bitmap migration
  50. * header
  51. * be64: start sector
  52. * be32: number of sectors
  53. * [ be64: buffer size ] \ ! (flags & ZEROES)
  54. * [ n bytes: buffer ] /
  55. *
  56. * The last chunk in stream should contain flags & EOS. The chunk may skip
  57. * device and/or bitmap names, assuming them to be the same with the previous
  58. * chunk.
  59. */
  60. #include "qemu/osdep.h"
  61. #include "block/block.h"
  62. #include "block/block_int.h"
  63. #include "sysemu/block-backend.h"
  64. #include "sysemu/runstate.h"
  65. #include "qemu/main-loop.h"
  66. #include "qemu/error-report.h"
  67. #include "migration/misc.h"
  68. #include "migration/migration.h"
  69. #include "qemu-file.h"
  70. #include "migration/vmstate.h"
  71. #include "migration/register.h"
  72. #include "qemu/hbitmap.h"
  73. #include "qemu/cutils.h"
  74. #include "qemu/id.h"
  75. #include "qapi/error.h"
  76. #include "qapi/qapi-commands-migration.h"
  77. #include "trace.h"
  78. #define CHUNK_SIZE (1 << 10)
  79. /* Flags occupy one, two or four bytes (Big Endian). The size is determined as
  80. * follows:
  81. * in first (most significant) byte bit 8 is clear --> one byte
  82. * in first byte bit 8 is set --> two or four bytes, depending on second
  83. * byte:
  84. * | in second byte bit 8 is clear --> two bytes
  85. * | in second byte bit 8 is set --> four bytes
  86. */
  87. #define DIRTY_BITMAP_MIG_FLAG_EOS 0x01
  88. #define DIRTY_BITMAP_MIG_FLAG_ZEROES 0x02
  89. #define DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME 0x04
  90. #define DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME 0x08
  91. #define DIRTY_BITMAP_MIG_FLAG_START 0x10
  92. #define DIRTY_BITMAP_MIG_FLAG_COMPLETE 0x20
  93. #define DIRTY_BITMAP_MIG_FLAG_BITS 0x40
  94. #define DIRTY_BITMAP_MIG_EXTRA_FLAGS 0x80
  95. #define DIRTY_BITMAP_MIG_START_FLAG_ENABLED 0x01
  96. #define DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT 0x02
  97. /* 0x04 was "AUTOLOAD" flags on older versions, now it is ignored */
  98. #define DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK 0xf8
  99. /* State of one bitmap during save process */
  100. typedef struct SaveBitmapState {
  101. /* Written during setup phase. */
  102. BlockDriverState *bs;
  103. char *node_alias;
  104. char *bitmap_alias;
  105. BdrvDirtyBitmap *bitmap;
  106. uint64_t total_sectors;
  107. uint64_t sectors_per_chunk;
  108. QSIMPLEQ_ENTRY(SaveBitmapState) entry;
  109. uint8_t flags;
  110. /* For bulk phase. */
  111. bool bulk_completed;
  112. uint64_t cur_sector;
  113. } SaveBitmapState;
  114. /* State of the dirty bitmap migration (DBM) during save process */
  115. typedef struct DBMSaveState {
  116. QSIMPLEQ_HEAD(, SaveBitmapState) dbms_list;
  117. bool bulk_completed;
  118. bool no_bitmaps;
  119. /* for send_bitmap_bits() */
  120. BlockDriverState *prev_bs;
  121. BdrvDirtyBitmap *prev_bitmap;
  122. } DBMSaveState;
  123. typedef struct LoadBitmapState {
  124. BlockDriverState *bs;
  125. BdrvDirtyBitmap *bitmap;
  126. bool migrated;
  127. bool enabled;
  128. } LoadBitmapState;
  129. /* State of the dirty bitmap migration (DBM) during load process */
  130. typedef struct DBMLoadState {
  131. uint32_t flags;
  132. char node_alias[256];
  133. char bitmap_alias[256];
  134. char bitmap_name[BDRV_BITMAP_MAX_NAME_SIZE + 1];
  135. BlockDriverState *bs;
  136. BdrvDirtyBitmap *bitmap;
  137. bool before_vm_start_handled; /* set in dirty_bitmap_mig_before_vm_start */
  138. /*
  139. * cancelled
  140. * Incoming migration is cancelled for some reason. That means that we
  141. * still should read our chunks from migration stream, to not affect other
  142. * migration objects (like RAM), but just ignore them and do not touch any
  143. * bitmaps or nodes.
  144. */
  145. bool cancelled;
  146. GSList *bitmaps;
  147. QemuMutex lock; /* protect bitmaps */
  148. } DBMLoadState;
  149. typedef struct DBMState {
  150. DBMSaveState save;
  151. DBMLoadState load;
  152. } DBMState;
  153. static DBMState dbm_state;
  154. /* For hash tables that map node/bitmap names to aliases */
  155. typedef struct AliasMapInnerNode {
  156. char *string;
  157. GHashTable *subtree;
  158. } AliasMapInnerNode;
  159. static void free_alias_map_inner_node(void *amin_ptr)
  160. {
  161. AliasMapInnerNode *amin = amin_ptr;
  162. g_free(amin->string);
  163. g_hash_table_unref(amin->subtree);
  164. g_free(amin);
  165. }
  166. /**
  167. * Construct an alias map based on the given QMP structure.
  168. *
  169. * (Note that we cannot store such maps in the MigrationParameters
  170. * object, because that struct is defined by the QAPI schema, which
  171. * makes it basically impossible to have dicts with arbitrary keys.
  172. * Therefore, we instead have to construct these maps when migration
  173. * starts.)
  174. *
  175. * @bbm is the block_bitmap_mapping from the migration parameters.
  176. *
  177. * If @name_to_alias is true, the returned hash table will map node
  178. * and bitmap names to their respective aliases (for outgoing
  179. * migration).
  180. *
  181. * If @name_to_alias is false, the returned hash table will map node
  182. * and bitmap aliases to their respective names (for incoming
  183. * migration).
  184. *
  185. * The hash table maps node names/aliases to AliasMapInnerNode
  186. * objects, whose .string is the respective node alias/name, and whose
  187. * .subtree table maps bitmap names/aliases to the respective bitmap
  188. * alias/name.
  189. */
  190. static GHashTable *construct_alias_map(const BitmapMigrationNodeAliasList *bbm,
  191. bool name_to_alias,
  192. Error **errp)
  193. {
  194. GHashTable *alias_map;
  195. size_t max_node_name_len = sizeof_field(BlockDriverState, node_name) - 1;
  196. alias_map = g_hash_table_new_full(g_str_hash, g_str_equal,
  197. g_free, free_alias_map_inner_node);
  198. for (; bbm; bbm = bbm->next) {
  199. const BitmapMigrationNodeAlias *bmna = bbm->value;
  200. const BitmapMigrationBitmapAliasList *bmbal;
  201. AliasMapInnerNode *amin;
  202. GHashTable *bitmaps_map;
  203. const char *node_map_from, *node_map_to;
  204. if (!id_wellformed(bmna->alias)) {
  205. error_setg(errp, "The node alias '%s' is not well-formed",
  206. bmna->alias);
  207. goto fail;
  208. }
  209. if (strlen(bmna->alias) > UINT8_MAX) {
  210. error_setg(errp, "The node alias '%s' is longer than %u bytes",
  211. bmna->alias, UINT8_MAX);
  212. goto fail;
  213. }
  214. if (strlen(bmna->node_name) > max_node_name_len) {
  215. error_setg(errp, "The node name '%s' is longer than %zu bytes",
  216. bmna->node_name, max_node_name_len);
  217. goto fail;
  218. }
  219. if (name_to_alias) {
  220. if (g_hash_table_contains(alias_map, bmna->node_name)) {
  221. error_setg(errp, "The node name '%s' is mapped twice",
  222. bmna->node_name);
  223. goto fail;
  224. }
  225. node_map_from = bmna->node_name;
  226. node_map_to = bmna->alias;
  227. } else {
  228. if (g_hash_table_contains(alias_map, bmna->alias)) {
  229. error_setg(errp, "The node alias '%s' is used twice",
  230. bmna->alias);
  231. goto fail;
  232. }
  233. node_map_from = bmna->alias;
  234. node_map_to = bmna->node_name;
  235. }
  236. bitmaps_map = g_hash_table_new_full(g_str_hash, g_str_equal,
  237. g_free, g_free);
  238. amin = g_new(AliasMapInnerNode, 1);
  239. *amin = (AliasMapInnerNode){
  240. .string = g_strdup(node_map_to),
  241. .subtree = bitmaps_map,
  242. };
  243. g_hash_table_insert(alias_map, g_strdup(node_map_from), amin);
  244. for (bmbal = bmna->bitmaps; bmbal; bmbal = bmbal->next) {
  245. const BitmapMigrationBitmapAlias *bmba = bmbal->value;
  246. const char *bmap_map_from, *bmap_map_to;
  247. if (strlen(bmba->alias) > UINT8_MAX) {
  248. error_setg(errp,
  249. "The bitmap alias '%s' is longer than %u bytes",
  250. bmba->alias, UINT8_MAX);
  251. goto fail;
  252. }
  253. if (strlen(bmba->name) > BDRV_BITMAP_MAX_NAME_SIZE) {
  254. error_setg(errp, "The bitmap name '%s' is longer than %d bytes",
  255. bmba->name, BDRV_BITMAP_MAX_NAME_SIZE);
  256. goto fail;
  257. }
  258. if (name_to_alias) {
  259. bmap_map_from = bmba->name;
  260. bmap_map_to = bmba->alias;
  261. if (g_hash_table_contains(bitmaps_map, bmba->name)) {
  262. error_setg(errp, "The bitmap '%s'/'%s' is mapped twice",
  263. bmna->node_name, bmba->name);
  264. goto fail;
  265. }
  266. } else {
  267. bmap_map_from = bmba->alias;
  268. bmap_map_to = bmba->name;
  269. if (g_hash_table_contains(bitmaps_map, bmba->alias)) {
  270. error_setg(errp, "The bitmap alias '%s'/'%s' is used twice",
  271. bmna->alias, bmba->alias);
  272. goto fail;
  273. }
  274. }
  275. g_hash_table_insert(bitmaps_map,
  276. g_strdup(bmap_map_from), g_strdup(bmap_map_to));
  277. }
  278. }
  279. return alias_map;
  280. fail:
  281. g_hash_table_destroy(alias_map);
  282. return NULL;
  283. }
  284. /**
  285. * Run construct_alias_map() in both directions to check whether @bbm
  286. * is valid.
  287. * (This function is to be used by migration/migration.c to validate
  288. * the user-specified block-bitmap-mapping migration parameter.)
  289. *
  290. * Returns true if and only if the mapping is valid.
  291. */
  292. bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
  293. Error **errp)
  294. {
  295. GHashTable *alias_map;
  296. alias_map = construct_alias_map(bbm, true, errp);
  297. if (!alias_map) {
  298. return false;
  299. }
  300. g_hash_table_destroy(alias_map);
  301. alias_map = construct_alias_map(bbm, false, errp);
  302. if (!alias_map) {
  303. return false;
  304. }
  305. g_hash_table_destroy(alias_map);
  306. return true;
  307. }
  308. static uint32_t qemu_get_bitmap_flags(QEMUFile *f)
  309. {
  310. uint8_t flags = qemu_get_byte(f);
  311. if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
  312. flags = flags << 8 | qemu_get_byte(f);
  313. if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
  314. flags = flags << 16 | qemu_get_be16(f);
  315. }
  316. }
  317. return flags;
  318. }
  319. static void qemu_put_bitmap_flags(QEMUFile *f, uint32_t flags)
  320. {
  321. /* The code currently does not send flags as more than one byte */
  322. assert(!(flags & (0xffffff00 | DIRTY_BITMAP_MIG_EXTRA_FLAGS)));
  323. qemu_put_byte(f, flags);
  324. }
  325. static void send_bitmap_header(QEMUFile *f, DBMSaveState *s,
  326. SaveBitmapState *dbms, uint32_t additional_flags)
  327. {
  328. BlockDriverState *bs = dbms->bs;
  329. BdrvDirtyBitmap *bitmap = dbms->bitmap;
  330. uint32_t flags = additional_flags;
  331. trace_send_bitmap_header_enter();
  332. if (bs != s->prev_bs) {
  333. s->prev_bs = bs;
  334. flags |= DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME;
  335. }
  336. if (bitmap != s->prev_bitmap) {
  337. s->prev_bitmap = bitmap;
  338. flags |= DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME;
  339. }
  340. qemu_put_bitmap_flags(f, flags);
  341. if (flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
  342. qemu_put_counted_string(f, dbms->node_alias);
  343. }
  344. if (flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
  345. qemu_put_counted_string(f, dbms->bitmap_alias);
  346. }
  347. }
  348. static void send_bitmap_start(QEMUFile *f, DBMSaveState *s,
  349. SaveBitmapState *dbms)
  350. {
  351. send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_START);
  352. qemu_put_be32(f, bdrv_dirty_bitmap_granularity(dbms->bitmap));
  353. qemu_put_byte(f, dbms->flags);
  354. }
  355. static void send_bitmap_complete(QEMUFile *f, DBMSaveState *s,
  356. SaveBitmapState *dbms)
  357. {
  358. send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_COMPLETE);
  359. }
  360. static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
  361. SaveBitmapState *dbms,
  362. uint64_t start_sector, uint32_t nr_sectors)
  363. {
  364. /* align for buffer_is_zero() */
  365. uint64_t align = 4 * sizeof(long);
  366. uint64_t unaligned_size =
  367. bdrv_dirty_bitmap_serialization_size(
  368. dbms->bitmap, start_sector << BDRV_SECTOR_BITS,
  369. (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
  370. uint64_t buf_size = QEMU_ALIGN_UP(unaligned_size, align);
  371. uint8_t *buf = g_malloc0(buf_size);
  372. uint32_t flags = DIRTY_BITMAP_MIG_FLAG_BITS;
  373. bdrv_dirty_bitmap_serialize_part(
  374. dbms->bitmap, buf, start_sector << BDRV_SECTOR_BITS,
  375. (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
  376. if (buffer_is_zero(buf, buf_size)) {
  377. g_free(buf);
  378. buf = NULL;
  379. flags |= DIRTY_BITMAP_MIG_FLAG_ZEROES;
  380. }
  381. trace_send_bitmap_bits(flags, start_sector, nr_sectors, buf_size);
  382. send_bitmap_header(f, s, dbms, flags);
  383. qemu_put_be64(f, start_sector);
  384. qemu_put_be32(f, nr_sectors);
  385. /* if a block is zero we need to flush here since the network
  386. * bandwidth is now a lot higher than the storage device bandwidth.
  387. * thus if we queue zero blocks we slow down the migration. */
  388. if (flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
  389. qemu_fflush(f);
  390. } else {
  391. qemu_put_be64(f, buf_size);
  392. qemu_put_buffer(f, buf, buf_size);
  393. }
  394. g_free(buf);
  395. }
  396. /* Called with iothread lock taken. */
  397. static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
  398. {
  399. SaveBitmapState *dbms;
  400. while ((dbms = QSIMPLEQ_FIRST(&s->dbms_list)) != NULL) {
  401. QSIMPLEQ_REMOVE_HEAD(&s->dbms_list, entry);
  402. bdrv_dirty_bitmap_set_busy(dbms->bitmap, false);
  403. bdrv_unref(dbms->bs);
  404. g_free(dbms->node_alias);
  405. g_free(dbms->bitmap_alias);
  406. g_free(dbms);
  407. }
  408. }
  409. /* Called with iothread lock taken. */
  410. static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
  411. const char *bs_name, GHashTable *alias_map)
  412. {
  413. BdrvDirtyBitmap *bitmap;
  414. SaveBitmapState *dbms;
  415. GHashTable *bitmap_aliases;
  416. const char *node_alias, *bitmap_name, *bitmap_alias;
  417. Error *local_err = NULL;
  418. /* When an alias map is given, @bs_name must be @bs's node name */
  419. assert(!alias_map || !strcmp(bs_name, bdrv_get_node_name(bs)));
  420. FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
  421. if (bdrv_dirty_bitmap_name(bitmap)) {
  422. break;
  423. }
  424. }
  425. if (!bitmap) {
  426. return 0;
  427. }
  428. bitmap_name = bdrv_dirty_bitmap_name(bitmap);
  429. if (!bs_name || strcmp(bs_name, "") == 0) {
  430. error_report("Bitmap '%s' in unnamed node can't be migrated",
  431. bitmap_name);
  432. return -1;
  433. }
  434. if (alias_map) {
  435. const AliasMapInnerNode *amin = g_hash_table_lookup(alias_map, bs_name);
  436. if (!amin) {
  437. /* Skip bitmaps on nodes with no alias */
  438. return 0;
  439. }
  440. node_alias = amin->string;
  441. bitmap_aliases = amin->subtree;
  442. } else {
  443. node_alias = bs_name;
  444. bitmap_aliases = NULL;
  445. }
  446. if (node_alias[0] == '#') {
  447. error_report("Bitmap '%s' in a node with auto-generated "
  448. "name '%s' can't be migrated",
  449. bitmap_name, node_alias);
  450. return -1;
  451. }
  452. FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
  453. bitmap_name = bdrv_dirty_bitmap_name(bitmap);
  454. if (!bitmap_name) {
  455. continue;
  456. }
  457. if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_DEFAULT, &local_err)) {
  458. error_report_err(local_err);
  459. return -1;
  460. }
  461. if (bitmap_aliases) {
  462. bitmap_alias = g_hash_table_lookup(bitmap_aliases, bitmap_name);
  463. if (!bitmap_alias) {
  464. /* Skip bitmaps with no alias */
  465. continue;
  466. }
  467. } else {
  468. if (strlen(bitmap_name) > UINT8_MAX) {
  469. error_report("Cannot migrate bitmap '%s' on node '%s': "
  470. "Name is longer than %u bytes",
  471. bitmap_name, bs_name, UINT8_MAX);
  472. return -1;
  473. }
  474. bitmap_alias = bitmap_name;
  475. }
  476. bdrv_ref(bs);
  477. bdrv_dirty_bitmap_set_busy(bitmap, true);
  478. dbms = g_new0(SaveBitmapState, 1);
  479. dbms->bs = bs;
  480. dbms->node_alias = g_strdup(node_alias);
  481. dbms->bitmap_alias = g_strdup(bitmap_alias);
  482. dbms->bitmap = bitmap;
  483. dbms->total_sectors = bdrv_nb_sectors(bs);
  484. dbms->sectors_per_chunk = CHUNK_SIZE * 8 *
  485. bdrv_dirty_bitmap_granularity(bitmap) >> BDRV_SECTOR_BITS;
  486. if (bdrv_dirty_bitmap_enabled(bitmap)) {
  487. dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
  488. }
  489. if (bdrv_dirty_bitmap_get_persistence(bitmap)) {
  490. dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
  491. }
  492. QSIMPLEQ_INSERT_TAIL(&s->dbms_list, dbms, entry);
  493. }
  494. return 0;
  495. }
  496. /* Called with iothread lock taken. */
  497. static int init_dirty_bitmap_migration(DBMSaveState *s)
  498. {
  499. BlockDriverState *bs;
  500. SaveBitmapState *dbms;
  501. GHashTable *handled_by_blk = g_hash_table_new(NULL, NULL);
  502. BlockBackend *blk;
  503. const MigrationParameters *mig_params = &migrate_get_current()->parameters;
  504. GHashTable *alias_map = NULL;
  505. if (mig_params->has_block_bitmap_mapping) {
  506. alias_map = construct_alias_map(mig_params->block_bitmap_mapping, true,
  507. &error_abort);
  508. }
  509. s->bulk_completed = false;
  510. s->prev_bs = NULL;
  511. s->prev_bitmap = NULL;
  512. s->no_bitmaps = false;
  513. if (!alias_map) {
  514. /*
  515. * Use blockdevice name for direct (or filtered) children of named block
  516. * backends.
  517. */
  518. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  519. const char *name = blk_name(blk);
  520. if (!name || strcmp(name, "") == 0) {
  521. continue;
  522. }
  523. bs = blk_bs(blk);
  524. /* Skip filters without bitmaps */
  525. while (bs && bs->drv && bs->drv->is_filter &&
  526. !bdrv_has_named_bitmaps(bs))
  527. {
  528. bs = bdrv_filter_bs(bs);
  529. }
  530. if (bs && bs->drv && !bs->drv->is_filter) {
  531. if (add_bitmaps_to_list(s, bs, name, NULL)) {
  532. goto fail;
  533. }
  534. g_hash_table_add(handled_by_blk, bs);
  535. }
  536. }
  537. }
  538. for (bs = bdrv_next_all_states(NULL); bs; bs = bdrv_next_all_states(bs)) {
  539. if (g_hash_table_contains(handled_by_blk, bs)) {
  540. continue;
  541. }
  542. if (add_bitmaps_to_list(s, bs, bdrv_get_node_name(bs), alias_map)) {
  543. goto fail;
  544. }
  545. }
  546. /* unset migration flags here, to not roll back it */
  547. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  548. bdrv_dirty_bitmap_skip_store(dbms->bitmap, true);
  549. }
  550. if (QSIMPLEQ_EMPTY(&s->dbms_list)) {
  551. s->no_bitmaps = true;
  552. }
  553. g_hash_table_destroy(handled_by_blk);
  554. if (alias_map) {
  555. g_hash_table_destroy(alias_map);
  556. }
  557. return 0;
  558. fail:
  559. g_hash_table_destroy(handled_by_blk);
  560. if (alias_map) {
  561. g_hash_table_destroy(alias_map);
  562. }
  563. dirty_bitmap_do_save_cleanup(s);
  564. return -1;
  565. }
  566. /* Called with no lock taken. */
  567. static void bulk_phase_send_chunk(QEMUFile *f, DBMSaveState *s,
  568. SaveBitmapState *dbms)
  569. {
  570. uint32_t nr_sectors = MIN(dbms->total_sectors - dbms->cur_sector,
  571. dbms->sectors_per_chunk);
  572. send_bitmap_bits(f, s, dbms, dbms->cur_sector, nr_sectors);
  573. dbms->cur_sector += nr_sectors;
  574. if (dbms->cur_sector >= dbms->total_sectors) {
  575. dbms->bulk_completed = true;
  576. }
  577. }
  578. /* Called with no lock taken. */
  579. static void bulk_phase(QEMUFile *f, DBMSaveState *s, bool limit)
  580. {
  581. SaveBitmapState *dbms;
  582. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  583. while (!dbms->bulk_completed) {
  584. bulk_phase_send_chunk(f, s, dbms);
  585. if (limit && qemu_file_rate_limit(f)) {
  586. return;
  587. }
  588. }
  589. }
  590. s->bulk_completed = true;
  591. }
  592. /* for SaveVMHandlers */
  593. static void dirty_bitmap_save_cleanup(void *opaque)
  594. {
  595. DBMSaveState *s = &((DBMState *)opaque)->save;
  596. dirty_bitmap_do_save_cleanup(s);
  597. }
  598. static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
  599. {
  600. DBMSaveState *s = &((DBMState *)opaque)->save;
  601. trace_dirty_bitmap_save_iterate(migration_in_postcopy());
  602. if (migration_in_postcopy() && !s->bulk_completed) {
  603. bulk_phase(f, s, true);
  604. }
  605. qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
  606. return s->bulk_completed;
  607. }
  608. /* Called with iothread lock taken. */
  609. static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
  610. {
  611. DBMSaveState *s = &((DBMState *)opaque)->save;
  612. SaveBitmapState *dbms;
  613. trace_dirty_bitmap_save_complete_enter();
  614. if (!s->bulk_completed) {
  615. bulk_phase(f, s, false);
  616. }
  617. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  618. send_bitmap_complete(f, s, dbms);
  619. }
  620. qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
  621. trace_dirty_bitmap_save_complete_finish();
  622. dirty_bitmap_save_cleanup(opaque);
  623. return 0;
  624. }
  625. static void dirty_bitmap_save_pending(QEMUFile *f, void *opaque,
  626. uint64_t max_size,
  627. uint64_t *res_precopy_only,
  628. uint64_t *res_compatible,
  629. uint64_t *res_postcopy_only)
  630. {
  631. DBMSaveState *s = &((DBMState *)opaque)->save;
  632. SaveBitmapState *dbms;
  633. uint64_t pending = 0;
  634. qemu_mutex_lock_iothread();
  635. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  636. uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
  637. uint64_t sectors = dbms->bulk_completed ? 0 :
  638. dbms->total_sectors - dbms->cur_sector;
  639. pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
  640. }
  641. qemu_mutex_unlock_iothread();
  642. trace_dirty_bitmap_save_pending(pending, max_size);
  643. *res_postcopy_only += pending;
  644. }
  645. /* First occurrence of this bitmap. It should be created if doesn't exist */
  646. static int dirty_bitmap_load_start(QEMUFile *f, DBMLoadState *s)
  647. {
  648. Error *local_err = NULL;
  649. uint32_t granularity = qemu_get_be32(f);
  650. uint8_t flags = qemu_get_byte(f);
  651. LoadBitmapState *b;
  652. if (s->cancelled) {
  653. return 0;
  654. }
  655. if (s->bitmap) {
  656. error_report("Bitmap with the same name ('%s') already exists on "
  657. "destination", bdrv_dirty_bitmap_name(s->bitmap));
  658. return -EINVAL;
  659. } else {
  660. s->bitmap = bdrv_create_dirty_bitmap(s->bs, granularity,
  661. s->bitmap_name, &local_err);
  662. if (!s->bitmap) {
  663. error_report_err(local_err);
  664. return -EINVAL;
  665. }
  666. }
  667. if (flags & DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK) {
  668. error_report("Unknown flags in migrated dirty bitmap header: %x",
  669. flags);
  670. return -EINVAL;
  671. }
  672. if (flags & DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT) {
  673. bdrv_dirty_bitmap_set_persistence(s->bitmap, true);
  674. }
  675. bdrv_disable_dirty_bitmap(s->bitmap);
  676. if (flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED) {
  677. bdrv_dirty_bitmap_create_successor(s->bitmap, &local_err);
  678. if (local_err) {
  679. error_report_err(local_err);
  680. return -EINVAL;
  681. }
  682. }
  683. b = g_new(LoadBitmapState, 1);
  684. b->bs = s->bs;
  685. b->bitmap = s->bitmap;
  686. b->migrated = false;
  687. b->enabled = flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
  688. s->bitmaps = g_slist_prepend(s->bitmaps, b);
  689. return 0;
  690. }
  691. /*
  692. * before_vm_start_handle_item
  693. *
  694. * g_slist_foreach helper
  695. *
  696. * item is LoadBitmapState*
  697. * opaque is DBMLoadState*
  698. */
  699. static void before_vm_start_handle_item(void *item, void *opaque)
  700. {
  701. DBMLoadState *s = opaque;
  702. LoadBitmapState *b = item;
  703. if (b->enabled) {
  704. if (b->migrated) {
  705. bdrv_enable_dirty_bitmap(b->bitmap);
  706. } else {
  707. bdrv_dirty_bitmap_enable_successor(b->bitmap);
  708. }
  709. }
  710. if (b->migrated) {
  711. s->bitmaps = g_slist_remove(s->bitmaps, b);
  712. g_free(b);
  713. }
  714. }
  715. void dirty_bitmap_mig_before_vm_start(void)
  716. {
  717. DBMLoadState *s = &dbm_state.load;
  718. qemu_mutex_lock(&s->lock);
  719. assert(!s->before_vm_start_handled);
  720. g_slist_foreach(s->bitmaps, before_vm_start_handle_item, s);
  721. s->before_vm_start_handled = true;
  722. qemu_mutex_unlock(&s->lock);
  723. }
  724. static void cancel_incoming_locked(DBMLoadState *s)
  725. {
  726. GSList *item;
  727. if (s->cancelled) {
  728. return;
  729. }
  730. s->cancelled = true;
  731. s->bs = NULL;
  732. s->bitmap = NULL;
  733. /* Drop all unfinished bitmaps */
  734. for (item = s->bitmaps; item; item = g_slist_next(item)) {
  735. LoadBitmapState *b = item->data;
  736. /*
  737. * Bitmap must be unfinished, as finished bitmaps should already be
  738. * removed from the list.
  739. */
  740. assert(!s->before_vm_start_handled || !b->migrated);
  741. if (bdrv_dirty_bitmap_has_successor(b->bitmap)) {
  742. bdrv_reclaim_dirty_bitmap(b->bitmap, &error_abort);
  743. }
  744. bdrv_release_dirty_bitmap(b->bitmap);
  745. }
  746. g_slist_free_full(s->bitmaps, g_free);
  747. s->bitmaps = NULL;
  748. }
  749. void dirty_bitmap_mig_cancel_outgoing(void)
  750. {
  751. dirty_bitmap_do_save_cleanup(&dbm_state.save);
  752. }
  753. void dirty_bitmap_mig_cancel_incoming(void)
  754. {
  755. DBMLoadState *s = &dbm_state.load;
  756. qemu_mutex_lock(&s->lock);
  757. cancel_incoming_locked(s);
  758. qemu_mutex_unlock(&s->lock);
  759. }
  760. static void dirty_bitmap_load_complete(QEMUFile *f, DBMLoadState *s)
  761. {
  762. GSList *item;
  763. trace_dirty_bitmap_load_complete();
  764. if (s->cancelled) {
  765. return;
  766. }
  767. bdrv_dirty_bitmap_deserialize_finish(s->bitmap);
  768. if (bdrv_dirty_bitmap_has_successor(s->bitmap)) {
  769. bdrv_reclaim_dirty_bitmap(s->bitmap, &error_abort);
  770. }
  771. for (item = s->bitmaps; item; item = g_slist_next(item)) {
  772. LoadBitmapState *b = item->data;
  773. if (b->bitmap == s->bitmap) {
  774. b->migrated = true;
  775. if (s->before_vm_start_handled) {
  776. s->bitmaps = g_slist_remove(s->bitmaps, b);
  777. g_free(b);
  778. }
  779. break;
  780. }
  781. }
  782. }
  783. static int dirty_bitmap_load_bits(QEMUFile *f, DBMLoadState *s)
  784. {
  785. uint64_t first_byte = qemu_get_be64(f) << BDRV_SECTOR_BITS;
  786. uint64_t nr_bytes = (uint64_t)qemu_get_be32(f) << BDRV_SECTOR_BITS;
  787. trace_dirty_bitmap_load_bits_enter(first_byte >> BDRV_SECTOR_BITS,
  788. nr_bytes >> BDRV_SECTOR_BITS);
  789. if (s->flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
  790. trace_dirty_bitmap_load_bits_zeroes();
  791. if (!s->cancelled) {
  792. bdrv_dirty_bitmap_deserialize_zeroes(s->bitmap, first_byte,
  793. nr_bytes, false);
  794. }
  795. } else {
  796. size_t ret;
  797. g_autofree uint8_t *buf = NULL;
  798. uint64_t buf_size = qemu_get_be64(f);
  799. uint64_t needed_size;
  800. /*
  801. * The actual check for buf_size is done a bit later. We can't do it in
  802. * cancelled mode as we don't have the bitmap to check the constraints
  803. * (so, we allocate a buffer and read prior to the check). On the other
  804. * hand, we shouldn't blindly g_malloc the number from the stream.
  805. * Actually one chunk should not be larger than CHUNK_SIZE. Let's allow
  806. * a bit larger (which means that bitmap migration will fail anyway and
  807. * the whole migration will most probably fail soon due to broken
  808. * stream).
  809. */
  810. if (buf_size > 10 * CHUNK_SIZE) {
  811. error_report("Bitmap migration stream buffer allocation request "
  812. "is too large");
  813. return -EIO;
  814. }
  815. buf = g_malloc(buf_size);
  816. ret = qemu_get_buffer(f, buf, buf_size);
  817. if (ret != buf_size) {
  818. error_report("Failed to read bitmap bits");
  819. return -EIO;
  820. }
  821. if (s->cancelled) {
  822. return 0;
  823. }
  824. needed_size = bdrv_dirty_bitmap_serialization_size(s->bitmap,
  825. first_byte,
  826. nr_bytes);
  827. if (needed_size > buf_size ||
  828. buf_size > QEMU_ALIGN_UP(needed_size, 4 * sizeof(long))
  829. /* Here used same alignment as in send_bitmap_bits */
  830. ) {
  831. error_report("Migrated bitmap granularity doesn't "
  832. "match the destination bitmap '%s' granularity",
  833. bdrv_dirty_bitmap_name(s->bitmap));
  834. cancel_incoming_locked(s);
  835. return 0;
  836. }
  837. bdrv_dirty_bitmap_deserialize_part(s->bitmap, buf, first_byte, nr_bytes,
  838. false);
  839. }
  840. return 0;
  841. }
  842. static int dirty_bitmap_load_header(QEMUFile *f, DBMLoadState *s,
  843. GHashTable *alias_map)
  844. {
  845. GHashTable *bitmap_alias_map = NULL;
  846. Error *local_err = NULL;
  847. bool nothing;
  848. s->flags = qemu_get_bitmap_flags(f);
  849. trace_dirty_bitmap_load_header(s->flags);
  850. nothing = s->flags == (s->flags & DIRTY_BITMAP_MIG_FLAG_EOS);
  851. if (s->flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
  852. if (!qemu_get_counted_string(f, s->node_alias)) {
  853. error_report("Unable to read node alias string");
  854. return -EINVAL;
  855. }
  856. if (!s->cancelled) {
  857. if (alias_map) {
  858. const AliasMapInnerNode *amin;
  859. amin = g_hash_table_lookup(alias_map, s->node_alias);
  860. if (!amin) {
  861. error_setg(&local_err, "Error: Unknown node alias '%s'",
  862. s->node_alias);
  863. s->bs = NULL;
  864. } else {
  865. bitmap_alias_map = amin->subtree;
  866. s->bs = bdrv_lookup_bs(NULL, amin->string, &local_err);
  867. }
  868. } else {
  869. s->bs = bdrv_lookup_bs(s->node_alias, s->node_alias,
  870. &local_err);
  871. }
  872. if (!s->bs) {
  873. error_report_err(local_err);
  874. cancel_incoming_locked(s);
  875. }
  876. }
  877. } else if (s->bs) {
  878. if (alias_map) {
  879. const AliasMapInnerNode *amin;
  880. /* Must be present in the map, or s->bs would not be set */
  881. amin = g_hash_table_lookup(alias_map, s->node_alias);
  882. assert(amin != NULL);
  883. bitmap_alias_map = amin->subtree;
  884. }
  885. } else if (!nothing && !s->cancelled) {
  886. error_report("Error: block device name is not set");
  887. cancel_incoming_locked(s);
  888. }
  889. assert(nothing || s->cancelled || !!alias_map == !!bitmap_alias_map);
  890. if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
  891. const char *bitmap_name;
  892. if (!qemu_get_counted_string(f, s->bitmap_alias)) {
  893. error_report("Unable to read bitmap alias string");
  894. return -EINVAL;
  895. }
  896. if (!s->cancelled) {
  897. if (bitmap_alias_map) {
  898. bitmap_name = g_hash_table_lookup(bitmap_alias_map,
  899. s->bitmap_alias);
  900. if (!bitmap_name) {
  901. error_report("Error: Unknown bitmap alias '%s' on node "
  902. "'%s' (alias '%s')", s->bitmap_alias,
  903. s->bs->node_name, s->node_alias);
  904. cancel_incoming_locked(s);
  905. }
  906. } else {
  907. bitmap_name = s->bitmap_alias;
  908. }
  909. }
  910. if (!s->cancelled) {
  911. g_strlcpy(s->bitmap_name, bitmap_name, sizeof(s->bitmap_name));
  912. s->bitmap = bdrv_find_dirty_bitmap(s->bs, s->bitmap_name);
  913. /*
  914. * bitmap may be NULL here, it wouldn't be an error if it is the
  915. * first occurrence of the bitmap
  916. */
  917. if (!s->bitmap && !(s->flags & DIRTY_BITMAP_MIG_FLAG_START)) {
  918. error_report("Error: unknown dirty bitmap "
  919. "'%s' for block device '%s'",
  920. s->bitmap_name, s->bs->node_name);
  921. cancel_incoming_locked(s);
  922. }
  923. }
  924. } else if (!s->bitmap && !nothing && !s->cancelled) {
  925. error_report("Error: block device name is not set");
  926. cancel_incoming_locked(s);
  927. }
  928. return 0;
  929. }
  930. /*
  931. * dirty_bitmap_load
  932. *
  933. * Load sequence of dirty bitmap chunks. Return error only on fatal io stream
  934. * violations. On other errors just cancel bitmaps incoming migration and return
  935. * 0.
  936. *
  937. * Note, than when incoming bitmap migration is canceled, we still must read all
  938. * our chunks (and just ignore them), to not affect other migration objects.
  939. */
  940. static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id)
  941. {
  942. GHashTable *alias_map = NULL;
  943. const MigrationParameters *mig_params = &migrate_get_current()->parameters;
  944. DBMLoadState *s = &((DBMState *)opaque)->load;
  945. int ret = 0;
  946. trace_dirty_bitmap_load_enter();
  947. if (version_id != 1) {
  948. QEMU_LOCK_GUARD(&s->lock);
  949. cancel_incoming_locked(s);
  950. return -EINVAL;
  951. }
  952. if (mig_params->has_block_bitmap_mapping) {
  953. alias_map = construct_alias_map(mig_params->block_bitmap_mapping,
  954. false, &error_abort);
  955. }
  956. do {
  957. QEMU_LOCK_GUARD(&s->lock);
  958. ret = dirty_bitmap_load_header(f, s, alias_map);
  959. if (ret < 0) {
  960. cancel_incoming_locked(s);
  961. goto fail;
  962. }
  963. if (s->flags & DIRTY_BITMAP_MIG_FLAG_START) {
  964. ret = dirty_bitmap_load_start(f, s);
  965. } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_COMPLETE) {
  966. dirty_bitmap_load_complete(f, s);
  967. } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITS) {
  968. ret = dirty_bitmap_load_bits(f, s);
  969. }
  970. if (!ret) {
  971. ret = qemu_file_get_error(f);
  972. }
  973. if (ret) {
  974. cancel_incoming_locked(s);
  975. goto fail;
  976. }
  977. } while (!(s->flags & DIRTY_BITMAP_MIG_FLAG_EOS));
  978. trace_dirty_bitmap_load_success();
  979. ret = 0;
  980. fail:
  981. if (alias_map) {
  982. g_hash_table_destroy(alias_map);
  983. }
  984. return ret;
  985. }
  986. static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque)
  987. {
  988. DBMSaveState *s = &((DBMState *)opaque)->save;
  989. SaveBitmapState *dbms = NULL;
  990. if (init_dirty_bitmap_migration(s) < 0) {
  991. return -1;
  992. }
  993. QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
  994. send_bitmap_start(f, s, dbms);
  995. }
  996. qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
  997. return 0;
  998. }
  999. static bool dirty_bitmap_is_active(void *opaque)
  1000. {
  1001. DBMSaveState *s = &((DBMState *)opaque)->save;
  1002. return migrate_dirty_bitmaps() && !s->no_bitmaps;
  1003. }
  1004. static bool dirty_bitmap_is_active_iterate(void *opaque)
  1005. {
  1006. return dirty_bitmap_is_active(opaque) && !runstate_is_running();
  1007. }
  1008. static bool dirty_bitmap_has_postcopy(void *opaque)
  1009. {
  1010. return true;
  1011. }
  1012. static SaveVMHandlers savevm_dirty_bitmap_handlers = {
  1013. .save_setup = dirty_bitmap_save_setup,
  1014. .save_live_complete_postcopy = dirty_bitmap_save_complete,
  1015. .save_live_complete_precopy = dirty_bitmap_save_complete,
  1016. .has_postcopy = dirty_bitmap_has_postcopy,
  1017. .save_live_pending = dirty_bitmap_save_pending,
  1018. .save_live_iterate = dirty_bitmap_save_iterate,
  1019. .is_active_iterate = dirty_bitmap_is_active_iterate,
  1020. .load_state = dirty_bitmap_load,
  1021. .save_cleanup = dirty_bitmap_save_cleanup,
  1022. .is_active = dirty_bitmap_is_active,
  1023. };
  1024. void dirty_bitmap_mig_init(void)
  1025. {
  1026. QSIMPLEQ_INIT(&dbm_state.save.dbms_list);
  1027. qemu_mutex_init(&dbm_state.load.lock);
  1028. register_savevm_live("dirty-bitmap", 0, 1,
  1029. &savevm_dirty_bitmap_handlers,
  1030. &dbm_state);
  1031. }