|
@@ -118,6 +118,7 @@ static void check_guest_throttling(void);
|
|
|
#define RAM_SAVE_FLAG_EOS 0x10
|
|
|
#define RAM_SAVE_FLAG_CONTINUE 0x20
|
|
|
#define RAM_SAVE_FLAG_XBZRLE 0x40
|
|
|
+/* 0x80 is reserved in migration.h start with 0x100 next */
|
|
|
|
|
|
|
|
|
static struct defconfig_file {
|
|
@@ -475,6 +476,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
|
|
|
ram_bulk_stage = false;
|
|
|
}
|
|
|
} else {
|
|
|
+ int ret;
|
|
|
uint8_t *p;
|
|
|
int cont = (block == last_sent_block) ?
|
|
|
RAM_SAVE_FLAG_CONTINUE : 0;
|
|
@@ -483,7 +485,18 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
|
|
|
|
|
|
/* In doubt sent page as normal */
|
|
|
bytes_sent = -1;
|
|
|
- if (is_zero_page(p)) {
|
|
|
+ ret = ram_control_save_page(f, block->offset,
|
|
|
+ offset, TARGET_PAGE_SIZE, &bytes_sent);
|
|
|
+
|
|
|
+ if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
|
|
|
+ if (ret != RAM_SAVE_CONTROL_DELAYED) {
|
|
|
+ if (bytes_sent > 0) {
|
|
|
+ acct_info.norm_pages++;
|
|
|
+ } else if (bytes_sent == 0) {
|
|
|
+ acct_info.dup_pages++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else if (is_zero_page(p)) {
|
|
|
acct_info.dup_pages++;
|
|
|
bytes_sent = save_block_hdr(f, block, offset, cont,
|
|
|
RAM_SAVE_FLAG_COMPRESS);
|
|
@@ -635,6 +648,10 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|
|
}
|
|
|
|
|
|
qemu_mutex_unlock_ramlist();
|
|
|
+
|
|
|
+ ram_control_before_iterate(f, RAM_CONTROL_SETUP);
|
|
|
+ ram_control_after_iterate(f, RAM_CONTROL_SETUP);
|
|
|
+
|
|
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
|
|
|
|
|
return 0;
|
|
@@ -653,6 +670,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
|
|
reset_ram_globals();
|
|
|
}
|
|
|
|
|
|
+ ram_control_before_iterate(f, RAM_CONTROL_ROUND);
|
|
|
+
|
|
|
t0 = qemu_get_clock_ns(rt_clock);
|
|
|
i = 0;
|
|
|
while ((ret = qemu_file_rate_limit(f)) == 0) {
|
|
@@ -684,6 +703,12 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
|
|
|
|
|
qemu_mutex_unlock_ramlist();
|
|
|
|
|
|
+ /*
|
|
|
+ * Must occur before EOS (or any QEMUFile operation)
|
|
|
+ * because of RDMA protocol.
|
|
|
+ */
|
|
|
+ ram_control_after_iterate(f, RAM_CONTROL_ROUND);
|
|
|
+
|
|
|
if (ret < 0) {
|
|
|
bytes_transferred += total_sent;
|
|
|
return ret;
|
|
@@ -701,6 +726,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
|
|
qemu_mutex_lock_ramlist();
|
|
|
migration_bitmap_sync();
|
|
|
|
|
|
+ ram_control_before_iterate(f, RAM_CONTROL_FINISH);
|
|
|
+
|
|
|
/* try transferring iterative blocks of memory */
|
|
|
|
|
|
/* flush all remaining blocks regardless of rate limiting */
|
|
@@ -714,6 +741,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
|
|
}
|
|
|
bytes_transferred += bytes_sent;
|
|
|
}
|
|
|
+
|
|
|
+ ram_control_after_iterate(f, RAM_CONTROL_FINISH);
|
|
|
migration_end();
|
|
|
|
|
|
qemu_mutex_unlock_ramlist();
|
|
@@ -917,6 +946,8 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|
|
ret = -EINVAL;
|
|
|
goto done;
|
|
|
}
|
|
|
+ } else if (flags & RAM_SAVE_FLAG_HOOK) {
|
|
|
+ ram_control_load_hook(f, flags);
|
|
|
}
|
|
|
error = qemu_file_get_error(f);
|
|
|
if (error) {
|