migration.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. /*
  2. * Migration support for VFIO devices
  3. *
  4. * Copyright NVIDIA, Inc. 2020
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See
  7. * the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/main-loop.h"
  11. #include "qemu/cutils.h"
  12. #include "qemu/units.h"
  13. #include "qemu/error-report.h"
  14. #include <linux/vfio.h>
  15. #include <sys/ioctl.h>
  16. #include "sysemu/runstate.h"
  17. #include "hw/vfio/vfio-common.h"
  18. #include "migration/misc.h"
  19. #include "migration/savevm.h"
  20. #include "migration/vmstate.h"
  21. #include "migration/qemu-file.h"
  22. #include "migration/register.h"
  23. #include "migration/blocker.h"
  24. #include "qapi/error.h"
  25. #include "qapi/qapi-events-vfio.h"
  26. #include "exec/ramlist.h"
  27. #include "exec/ram_addr.h"
  28. #include "pci.h"
  29. #include "trace.h"
  30. #include "hw/hw.h"
  31. /*
  32. * Flags to be used as unique delimiters for VFIO devices in the migration
  33. * stream. These flags are composed as:
  34. * 0xffffffff => MSB 32-bit all 1s
  35. * 0xef10 => Magic ID, represents emulated (virtual) function IO
  36. * 0x0000 => 16-bits reserved for flags
  37. *
  38. * The beginning of state information is marked by _DEV_CONFIG_STATE,
  39. * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a
  40. * certain state information is marked by _END_OF_STATE.
  41. */
  42. #define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL)
  43. #define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL)
  44. #define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL)
  45. #define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL)
  46. #define VFIO_MIG_FLAG_DEV_INIT_DATA_SENT (0xffffffffef100005ULL)
  47. /*
  48. * This is an arbitrary size based on migration of mlx5 devices, where typically
  49. * total device migration size is on the order of 100s of MB. Testing with
  50. * larger values, e.g. 128MB and 1GB, did not show a performance improvement.
  51. */
  52. #define VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE (1 * MiB)
  53. static int64_t bytes_transferred;
  54. static const char *mig_state_to_str(enum vfio_device_mig_state state)
  55. {
  56. switch (state) {
  57. case VFIO_DEVICE_STATE_ERROR:
  58. return "ERROR";
  59. case VFIO_DEVICE_STATE_STOP:
  60. return "STOP";
  61. case VFIO_DEVICE_STATE_RUNNING:
  62. return "RUNNING";
  63. case VFIO_DEVICE_STATE_STOP_COPY:
  64. return "STOP_COPY";
  65. case VFIO_DEVICE_STATE_RESUMING:
  66. return "RESUMING";
  67. case VFIO_DEVICE_STATE_RUNNING_P2P:
  68. return "RUNNING_P2P";
  69. case VFIO_DEVICE_STATE_PRE_COPY:
  70. return "PRE_COPY";
  71. case VFIO_DEVICE_STATE_PRE_COPY_P2P:
  72. return "PRE_COPY_P2P";
  73. default:
  74. return "UNKNOWN STATE";
  75. }
  76. }
  77. static QapiVfioMigrationState
  78. mig_state_to_qapi_state(enum vfio_device_mig_state state)
  79. {
  80. switch (state) {
  81. case VFIO_DEVICE_STATE_STOP:
  82. return QAPI_VFIO_MIGRATION_STATE_STOP;
  83. case VFIO_DEVICE_STATE_RUNNING:
  84. return QAPI_VFIO_MIGRATION_STATE_RUNNING;
  85. case VFIO_DEVICE_STATE_STOP_COPY:
  86. return QAPI_VFIO_MIGRATION_STATE_STOP_COPY;
  87. case VFIO_DEVICE_STATE_RESUMING:
  88. return QAPI_VFIO_MIGRATION_STATE_RESUMING;
  89. case VFIO_DEVICE_STATE_RUNNING_P2P:
  90. return QAPI_VFIO_MIGRATION_STATE_RUNNING_P2P;
  91. case VFIO_DEVICE_STATE_PRE_COPY:
  92. return QAPI_VFIO_MIGRATION_STATE_PRE_COPY;
  93. case VFIO_DEVICE_STATE_PRE_COPY_P2P:
  94. return QAPI_VFIO_MIGRATION_STATE_PRE_COPY_P2P;
  95. default:
  96. g_assert_not_reached();
  97. }
  98. }
  99. static void vfio_migration_send_event(VFIODevice *vbasedev)
  100. {
  101. VFIOMigration *migration = vbasedev->migration;
  102. DeviceState *dev = vbasedev->dev;
  103. g_autofree char *qom_path = NULL;
  104. Object *obj;
  105. if (!vbasedev->migration_events) {
  106. return;
  107. }
  108. g_assert(vbasedev->ops->vfio_get_object);
  109. obj = vbasedev->ops->vfio_get_object(vbasedev);
  110. g_assert(obj);
  111. qom_path = object_get_canonical_path(obj);
  112. qapi_event_send_vfio_migration(
  113. dev->id, qom_path, mig_state_to_qapi_state(migration->device_state));
  114. }
  115. static void vfio_migration_set_device_state(VFIODevice *vbasedev,
  116. enum vfio_device_mig_state state)
  117. {
  118. VFIOMigration *migration = vbasedev->migration;
  119. trace_vfio_migration_set_device_state(vbasedev->name,
  120. mig_state_to_str(state));
  121. migration->device_state = state;
  122. vfio_migration_send_event(vbasedev);
  123. }
  124. static int vfio_migration_set_state(VFIODevice *vbasedev,
  125. enum vfio_device_mig_state new_state,
  126. enum vfio_device_mig_state recover_state,
  127. Error **errp)
  128. {
  129. VFIOMigration *migration = vbasedev->migration;
  130. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
  131. sizeof(struct vfio_device_feature_mig_state),
  132. sizeof(uint64_t))] = {};
  133. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  134. struct vfio_device_feature_mig_state *mig_state =
  135. (struct vfio_device_feature_mig_state *)feature->data;
  136. int ret;
  137. g_autofree char *error_prefix =
  138. g_strdup_printf("%s: Failed setting device state to %s.",
  139. vbasedev->name, mig_state_to_str(new_state));
  140. trace_vfio_migration_set_state(vbasedev->name, mig_state_to_str(new_state),
  141. mig_state_to_str(recover_state));
  142. if (new_state == migration->device_state) {
  143. return 0;
  144. }
  145. feature->argsz = sizeof(buf);
  146. feature->flags =
  147. VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE;
  148. mig_state->device_state = new_state;
  149. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  150. /* Try to set the device in some good state */
  151. ret = -errno;
  152. if (recover_state == VFIO_DEVICE_STATE_ERROR) {
  153. error_setg_errno(errp, errno,
  154. "%s Recover state is ERROR. Resetting device",
  155. error_prefix);
  156. goto reset_device;
  157. }
  158. error_setg_errno(errp, errno,
  159. "%s Setting device in recover state %s",
  160. error_prefix, mig_state_to_str(recover_state));
  161. mig_state->device_state = recover_state;
  162. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  163. ret = -errno;
  164. /*
  165. * If setting the device in recover state fails, report
  166. * the error here and propagate the first error.
  167. */
  168. error_report(
  169. "%s: Failed setting device in recover state, err: %s. Resetting device",
  170. vbasedev->name, strerror(errno));
  171. goto reset_device;
  172. }
  173. vfio_migration_set_device_state(vbasedev, recover_state);
  174. return ret;
  175. }
  176. vfio_migration_set_device_state(vbasedev, new_state);
  177. if (mig_state->data_fd != -1) {
  178. if (migration->data_fd != -1) {
  179. /*
  180. * This can happen if the device is asynchronously reset and
  181. * terminates a data transfer.
  182. */
  183. error_setg(errp, "%s: data_fd out of sync", vbasedev->name);
  184. close(mig_state->data_fd);
  185. return -EBADF;
  186. }
  187. migration->data_fd = mig_state->data_fd;
  188. }
  189. return 0;
  190. reset_device:
  191. if (ioctl(vbasedev->fd, VFIO_DEVICE_RESET)) {
  192. hw_error("%s: Failed resetting device, err: %s", vbasedev->name,
  193. strerror(errno));
  194. }
  195. vfio_migration_set_device_state(vbasedev, VFIO_DEVICE_STATE_RUNNING);
  196. return ret;
  197. }
  198. /*
  199. * Some device state transitions require resetting the device if they fail.
  200. * This function sets the device in new_state and resets the device if that
  201. * fails. Reset is done by using ERROR as the recover state.
  202. */
  203. static int
  204. vfio_migration_set_state_or_reset(VFIODevice *vbasedev,
  205. enum vfio_device_mig_state new_state,
  206. Error **errp)
  207. {
  208. return vfio_migration_set_state(vbasedev, new_state,
  209. VFIO_DEVICE_STATE_ERROR, errp);
  210. }
  211. static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
  212. uint64_t data_size)
  213. {
  214. VFIOMigration *migration = vbasedev->migration;
  215. int ret;
  216. ret = qemu_file_get_to_fd(f, migration->data_fd, data_size);
  217. trace_vfio_load_state_device_data(vbasedev->name, data_size, ret);
  218. return ret;
  219. }
  220. static int vfio_save_device_config_state(QEMUFile *f, void *opaque,
  221. Error **errp)
  222. {
  223. VFIODevice *vbasedev = opaque;
  224. int ret;
  225. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE);
  226. if (vbasedev->ops && vbasedev->ops->vfio_save_config) {
  227. ret = vbasedev->ops->vfio_save_config(vbasedev, f, errp);
  228. if (ret) {
  229. return ret;
  230. }
  231. }
  232. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  233. trace_vfio_save_device_config_state(vbasedev->name);
  234. ret = qemu_file_get_error(f);
  235. if (ret < 0) {
  236. error_setg_errno(errp, -ret, "Failed to save state");
  237. }
  238. return ret;
  239. }
  240. static int vfio_load_device_config_state(QEMUFile *f, void *opaque)
  241. {
  242. VFIODevice *vbasedev = opaque;
  243. uint64_t data;
  244. if (vbasedev->ops && vbasedev->ops->vfio_load_config) {
  245. int ret;
  246. ret = vbasedev->ops->vfio_load_config(vbasedev, f);
  247. if (ret) {
  248. error_report("%s: Failed to load device config space",
  249. vbasedev->name);
  250. return ret;
  251. }
  252. }
  253. data = qemu_get_be64(f);
  254. if (data != VFIO_MIG_FLAG_END_OF_STATE) {
  255. error_report("%s: Failed loading device config space, "
  256. "end flag incorrect 0x%"PRIx64, vbasedev->name, data);
  257. return -EINVAL;
  258. }
  259. trace_vfio_load_device_config_state(vbasedev->name);
  260. return qemu_file_get_error(f);
  261. }
  262. static void vfio_migration_cleanup(VFIODevice *vbasedev)
  263. {
  264. VFIOMigration *migration = vbasedev->migration;
  265. close(migration->data_fd);
  266. migration->data_fd = -1;
  267. }
  268. static int vfio_query_stop_copy_size(VFIODevice *vbasedev,
  269. uint64_t *stop_copy_size)
  270. {
  271. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
  272. sizeof(struct vfio_device_feature_mig_data_size),
  273. sizeof(uint64_t))] = {};
  274. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  275. struct vfio_device_feature_mig_data_size *mig_data_size =
  276. (struct vfio_device_feature_mig_data_size *)feature->data;
  277. feature->argsz = sizeof(buf);
  278. feature->flags =
  279. VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_MIG_DATA_SIZE;
  280. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  281. return -errno;
  282. }
  283. *stop_copy_size = mig_data_size->stop_copy_length;
  284. return 0;
  285. }
  286. static int vfio_query_precopy_size(VFIOMigration *migration)
  287. {
  288. struct vfio_precopy_info precopy = {
  289. .argsz = sizeof(precopy),
  290. };
  291. migration->precopy_init_size = 0;
  292. migration->precopy_dirty_size = 0;
  293. if (ioctl(migration->data_fd, VFIO_MIG_GET_PRECOPY_INFO, &precopy)) {
  294. return -errno;
  295. }
  296. migration->precopy_init_size = precopy.initial_bytes;
  297. migration->precopy_dirty_size = precopy.dirty_bytes;
  298. return 0;
  299. }
  300. /* Returns the size of saved data on success and -errno on error */
  301. static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration)
  302. {
  303. ssize_t data_size;
  304. data_size = read(migration->data_fd, migration->data_buffer,
  305. migration->data_buffer_size);
  306. if (data_size < 0) {
  307. /*
  308. * Pre-copy emptied all the device state for now. For more information,
  309. * please refer to the Linux kernel VFIO uAPI.
  310. */
  311. if (errno == ENOMSG) {
  312. return 0;
  313. }
  314. return -errno;
  315. }
  316. if (data_size == 0) {
  317. return 0;
  318. }
  319. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE);
  320. qemu_put_be64(f, data_size);
  321. qemu_put_buffer(f, migration->data_buffer, data_size);
  322. bytes_transferred += data_size;
  323. trace_vfio_save_block(migration->vbasedev->name, data_size);
  324. return qemu_file_get_error(f) ?: data_size;
  325. }
  326. static void vfio_update_estimated_pending_data(VFIOMigration *migration,
  327. uint64_t data_size)
  328. {
  329. if (!data_size) {
  330. /*
  331. * Pre-copy emptied all the device state for now, update estimated sizes
  332. * accordingly.
  333. */
  334. migration->precopy_init_size = 0;
  335. migration->precopy_dirty_size = 0;
  336. return;
  337. }
  338. if (migration->precopy_init_size) {
  339. uint64_t init_size = MIN(migration->precopy_init_size, data_size);
  340. migration->precopy_init_size -= init_size;
  341. data_size -= init_size;
  342. }
  343. migration->precopy_dirty_size -= MIN(migration->precopy_dirty_size,
  344. data_size);
  345. }
  346. static bool vfio_precopy_supported(VFIODevice *vbasedev)
  347. {
  348. VFIOMigration *migration = vbasedev->migration;
  349. return migration->mig_flags & VFIO_MIGRATION_PRE_COPY;
  350. }
  351. /* ---------------------------------------------------------------------- */
  352. static int vfio_save_prepare(void *opaque, Error **errp)
  353. {
  354. VFIODevice *vbasedev = opaque;
  355. /*
  356. * Snapshot doesn't use postcopy nor background snapshot, so allow snapshot
  357. * even if they are on.
  358. */
  359. if (runstate_check(RUN_STATE_SAVE_VM)) {
  360. return 0;
  361. }
  362. if (migrate_postcopy_ram()) {
  363. error_setg(
  364. errp, "%s: VFIO migration is not supported with postcopy migration",
  365. vbasedev->name);
  366. return -EOPNOTSUPP;
  367. }
  368. if (migrate_background_snapshot()) {
  369. error_setg(
  370. errp,
  371. "%s: VFIO migration is not supported with background snapshot",
  372. vbasedev->name);
  373. return -EOPNOTSUPP;
  374. }
  375. return 0;
  376. }
  377. static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp)
  378. {
  379. VFIODevice *vbasedev = opaque;
  380. VFIOMigration *migration = vbasedev->migration;
  381. uint64_t stop_copy_size = VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE;
  382. int ret;
  383. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE);
  384. vfio_query_stop_copy_size(vbasedev, &stop_copy_size);
  385. migration->data_buffer_size = MIN(VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE,
  386. stop_copy_size);
  387. migration->data_buffer = g_try_malloc0(migration->data_buffer_size);
  388. if (!migration->data_buffer) {
  389. error_setg(errp, "%s: Failed to allocate migration data buffer",
  390. vbasedev->name);
  391. return -ENOMEM;
  392. }
  393. if (vfio_precopy_supported(vbasedev)) {
  394. switch (migration->device_state) {
  395. case VFIO_DEVICE_STATE_RUNNING:
  396. ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_PRE_COPY,
  397. VFIO_DEVICE_STATE_RUNNING, errp);
  398. if (ret) {
  399. return ret;
  400. }
  401. vfio_query_precopy_size(migration);
  402. break;
  403. case VFIO_DEVICE_STATE_STOP:
  404. /* vfio_save_complete_precopy() will go to STOP_COPY */
  405. break;
  406. default:
  407. error_setg(errp, "%s: Invalid device state %d", vbasedev->name,
  408. migration->device_state);
  409. return -EINVAL;
  410. }
  411. }
  412. trace_vfio_save_setup(vbasedev->name, migration->data_buffer_size);
  413. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  414. ret = qemu_file_get_error(f);
  415. if (ret < 0) {
  416. error_setg_errno(errp, -ret, "%s: save setup failed", vbasedev->name);
  417. }
  418. return ret;
  419. }
  420. static void vfio_save_cleanup(void *opaque)
  421. {
  422. VFIODevice *vbasedev = opaque;
  423. VFIOMigration *migration = vbasedev->migration;
  424. Error *local_err = NULL;
  425. int ret;
  426. /*
  427. * Changing device state from STOP_COPY to STOP can take time. Do it here,
  428. * after migration has completed, so it won't increase downtime.
  429. */
  430. if (migration->device_state == VFIO_DEVICE_STATE_STOP_COPY) {
  431. ret = vfio_migration_set_state_or_reset(vbasedev,
  432. VFIO_DEVICE_STATE_STOP,
  433. &local_err);
  434. if (ret) {
  435. error_report_err(local_err);
  436. }
  437. }
  438. g_free(migration->data_buffer);
  439. migration->data_buffer = NULL;
  440. migration->precopy_init_size = 0;
  441. migration->precopy_dirty_size = 0;
  442. migration->initial_data_sent = false;
  443. vfio_migration_cleanup(vbasedev);
  444. trace_vfio_save_cleanup(vbasedev->name);
  445. }
  446. static void vfio_state_pending_estimate(void *opaque, uint64_t *must_precopy,
  447. uint64_t *can_postcopy)
  448. {
  449. VFIODevice *vbasedev = opaque;
  450. VFIOMigration *migration = vbasedev->migration;
  451. if (!vfio_device_state_is_precopy(vbasedev)) {
  452. return;
  453. }
  454. *must_precopy +=
  455. migration->precopy_init_size + migration->precopy_dirty_size;
  456. trace_vfio_state_pending_estimate(vbasedev->name, *must_precopy,
  457. *can_postcopy,
  458. migration->precopy_init_size,
  459. migration->precopy_dirty_size);
  460. }
  461. /*
  462. * Migration size of VFIO devices can be as little as a few KBs or as big as
  463. * many GBs. This value should be big enough to cover the worst case.
  464. */
  465. #define VFIO_MIG_STOP_COPY_SIZE (100 * GiB)
  466. static void vfio_state_pending_exact(void *opaque, uint64_t *must_precopy,
  467. uint64_t *can_postcopy)
  468. {
  469. VFIODevice *vbasedev = opaque;
  470. VFIOMigration *migration = vbasedev->migration;
  471. uint64_t stop_copy_size = VFIO_MIG_STOP_COPY_SIZE;
  472. /*
  473. * If getting pending migration size fails, VFIO_MIG_STOP_COPY_SIZE is
  474. * reported so downtime limit won't be violated.
  475. */
  476. vfio_query_stop_copy_size(vbasedev, &stop_copy_size);
  477. *must_precopy += stop_copy_size;
  478. if (vfio_device_state_is_precopy(vbasedev)) {
  479. vfio_query_precopy_size(migration);
  480. *must_precopy +=
  481. migration->precopy_init_size + migration->precopy_dirty_size;
  482. }
  483. trace_vfio_state_pending_exact(vbasedev->name, *must_precopy, *can_postcopy,
  484. stop_copy_size, migration->precopy_init_size,
  485. migration->precopy_dirty_size);
  486. }
  487. static bool vfio_is_active_iterate(void *opaque)
  488. {
  489. VFIODevice *vbasedev = opaque;
  490. return vfio_device_state_is_precopy(vbasedev);
  491. }
  492. /*
  493. * Note about migration rate limiting: VFIO migration buffer size is currently
  494. * limited to 1MB, so there is no need to check if migration rate exceeded (as
  495. * in the worst case it will exceed by 1MB). However, if the buffer size is
  496. * later changed to a bigger value, migration rate should be enforced here.
  497. */
  498. static int vfio_save_iterate(QEMUFile *f, void *opaque)
  499. {
  500. VFIODevice *vbasedev = opaque;
  501. VFIOMigration *migration = vbasedev->migration;
  502. ssize_t data_size;
  503. data_size = vfio_save_block(f, migration);
  504. if (data_size < 0) {
  505. return data_size;
  506. }
  507. vfio_update_estimated_pending_data(migration, data_size);
  508. if (migrate_switchover_ack() && !migration->precopy_init_size &&
  509. !migration->initial_data_sent) {
  510. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_INIT_DATA_SENT);
  511. migration->initial_data_sent = true;
  512. } else {
  513. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  514. }
  515. trace_vfio_save_iterate(vbasedev->name, migration->precopy_init_size,
  516. migration->precopy_dirty_size);
  517. return !migration->precopy_init_size && !migration->precopy_dirty_size;
  518. }
  519. static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
  520. {
  521. VFIODevice *vbasedev = opaque;
  522. ssize_t data_size;
  523. int ret;
  524. Error *local_err = NULL;
  525. /* We reach here with device state STOP or STOP_COPY only */
  526. ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY,
  527. VFIO_DEVICE_STATE_STOP, &local_err);
  528. if (ret) {
  529. error_report_err(local_err);
  530. return ret;
  531. }
  532. do {
  533. data_size = vfio_save_block(f, vbasedev->migration);
  534. if (data_size < 0) {
  535. return data_size;
  536. }
  537. } while (data_size);
  538. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  539. ret = qemu_file_get_error(f);
  540. trace_vfio_save_complete_precopy(vbasedev->name, ret);
  541. return ret;
  542. }
  543. static void vfio_save_state(QEMUFile *f, void *opaque)
  544. {
  545. VFIODevice *vbasedev = opaque;
  546. Error *local_err = NULL;
  547. int ret;
  548. ret = vfio_save_device_config_state(f, opaque, &local_err);
  549. if (ret) {
  550. error_prepend(&local_err,
  551. "vfio: Failed to save device config space of %s - ",
  552. vbasedev->name);
  553. qemu_file_set_error_obj(f, ret, local_err);
  554. }
  555. }
  556. static int vfio_load_setup(QEMUFile *f, void *opaque, Error **errp)
  557. {
  558. VFIODevice *vbasedev = opaque;
  559. return vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING,
  560. vbasedev->migration->device_state, errp);
  561. }
  562. static int vfio_load_cleanup(void *opaque)
  563. {
  564. VFIODevice *vbasedev = opaque;
  565. vfio_migration_cleanup(vbasedev);
  566. trace_vfio_load_cleanup(vbasedev->name);
  567. return 0;
  568. }
  569. static int vfio_load_state(QEMUFile *f, void *opaque, int version_id)
  570. {
  571. VFIODevice *vbasedev = opaque;
  572. int ret = 0;
  573. uint64_t data;
  574. data = qemu_get_be64(f);
  575. while (data != VFIO_MIG_FLAG_END_OF_STATE) {
  576. trace_vfio_load_state(vbasedev->name, data);
  577. switch (data) {
  578. case VFIO_MIG_FLAG_DEV_CONFIG_STATE:
  579. {
  580. return vfio_load_device_config_state(f, opaque);
  581. }
  582. case VFIO_MIG_FLAG_DEV_SETUP_STATE:
  583. {
  584. data = qemu_get_be64(f);
  585. if (data == VFIO_MIG_FLAG_END_OF_STATE) {
  586. return ret;
  587. } else {
  588. error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64,
  589. vbasedev->name, data);
  590. return -EINVAL;
  591. }
  592. break;
  593. }
  594. case VFIO_MIG_FLAG_DEV_DATA_STATE:
  595. {
  596. uint64_t data_size = qemu_get_be64(f);
  597. if (data_size) {
  598. ret = vfio_load_buffer(f, vbasedev, data_size);
  599. if (ret < 0) {
  600. return ret;
  601. }
  602. }
  603. break;
  604. }
  605. case VFIO_MIG_FLAG_DEV_INIT_DATA_SENT:
  606. {
  607. if (!vfio_precopy_supported(vbasedev) ||
  608. !migrate_switchover_ack()) {
  609. error_report("%s: Received INIT_DATA_SENT but switchover ack "
  610. "is not used", vbasedev->name);
  611. return -EINVAL;
  612. }
  613. ret = qemu_loadvm_approve_switchover();
  614. if (ret) {
  615. error_report(
  616. "%s: qemu_loadvm_approve_switchover failed, err=%d (%s)",
  617. vbasedev->name, ret, strerror(-ret));
  618. }
  619. return ret;
  620. }
  621. default:
  622. error_report("%s: Unknown tag 0x%"PRIx64, vbasedev->name, data);
  623. return -EINVAL;
  624. }
  625. data = qemu_get_be64(f);
  626. ret = qemu_file_get_error(f);
  627. if (ret) {
  628. return ret;
  629. }
  630. }
  631. return ret;
  632. }
  633. static bool vfio_switchover_ack_needed(void *opaque)
  634. {
  635. VFIODevice *vbasedev = opaque;
  636. return vfio_precopy_supported(vbasedev);
  637. }
  638. static const SaveVMHandlers savevm_vfio_handlers = {
  639. .save_prepare = vfio_save_prepare,
  640. .save_setup = vfio_save_setup,
  641. .save_cleanup = vfio_save_cleanup,
  642. .state_pending_estimate = vfio_state_pending_estimate,
  643. .state_pending_exact = vfio_state_pending_exact,
  644. .is_active_iterate = vfio_is_active_iterate,
  645. .save_live_iterate = vfio_save_iterate,
  646. .save_live_complete_precopy = vfio_save_complete_precopy,
  647. .save_state = vfio_save_state,
  648. .load_setup = vfio_load_setup,
  649. .load_cleanup = vfio_load_cleanup,
  650. .load_state = vfio_load_state,
  651. .switchover_ack_needed = vfio_switchover_ack_needed,
  652. };
  653. /* ---------------------------------------------------------------------- */
  654. static void vfio_vmstate_change_prepare(void *opaque, bool running,
  655. RunState state)
  656. {
  657. VFIODevice *vbasedev = opaque;
  658. VFIOMigration *migration = vbasedev->migration;
  659. enum vfio_device_mig_state new_state;
  660. Error *local_err = NULL;
  661. int ret;
  662. new_state = migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ?
  663. VFIO_DEVICE_STATE_PRE_COPY_P2P :
  664. VFIO_DEVICE_STATE_RUNNING_P2P;
  665. ret = vfio_migration_set_state_or_reset(vbasedev, new_state, &local_err);
  666. if (ret) {
  667. /*
  668. * Migration should be aborted in this case, but vm_state_notify()
  669. * currently does not support reporting failures.
  670. */
  671. migration_file_set_error(ret, local_err);
  672. }
  673. trace_vfio_vmstate_change_prepare(vbasedev->name, running,
  674. RunState_str(state),
  675. mig_state_to_str(new_state));
  676. }
  677. static void vfio_vmstate_change(void *opaque, bool running, RunState state)
  678. {
  679. VFIODevice *vbasedev = opaque;
  680. enum vfio_device_mig_state new_state;
  681. Error *local_err = NULL;
  682. int ret;
  683. if (running) {
  684. new_state = VFIO_DEVICE_STATE_RUNNING;
  685. } else {
  686. new_state =
  687. (vfio_device_state_is_precopy(vbasedev) &&
  688. (state == RUN_STATE_FINISH_MIGRATE || state == RUN_STATE_PAUSED)) ?
  689. VFIO_DEVICE_STATE_STOP_COPY :
  690. VFIO_DEVICE_STATE_STOP;
  691. }
  692. ret = vfio_migration_set_state_or_reset(vbasedev, new_state, &local_err);
  693. if (ret) {
  694. /*
  695. * Migration should be aborted in this case, but vm_state_notify()
  696. * currently does not support reporting failures.
  697. */
  698. migration_file_set_error(ret, local_err);
  699. }
  700. trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state),
  701. mig_state_to_str(new_state));
  702. }
  703. static int vfio_migration_state_notifier(NotifierWithReturn *notifier,
  704. MigrationEvent *e, Error **errp)
  705. {
  706. VFIOMigration *migration = container_of(notifier, VFIOMigration,
  707. migration_state);
  708. VFIODevice *vbasedev = migration->vbasedev;
  709. Error *local_err = NULL;
  710. int ret;
  711. trace_vfio_migration_state_notifier(vbasedev->name, e->type);
  712. if (e->type == MIG_EVENT_PRECOPY_FAILED) {
  713. /*
  714. * MigrationNotifyFunc may not return an error code and an Error
  715. * object for MIG_EVENT_PRECOPY_FAILED. Hence, report the error
  716. * locally and ignore the errp argument.
  717. */
  718. ret = vfio_migration_set_state_or_reset(vbasedev,
  719. VFIO_DEVICE_STATE_RUNNING,
  720. &local_err);
  721. if (ret) {
  722. error_report_err(local_err);
  723. }
  724. }
  725. return 0;
  726. }
  727. static void vfio_migration_free(VFIODevice *vbasedev)
  728. {
  729. g_free(vbasedev->migration);
  730. vbasedev->migration = NULL;
  731. }
  732. static int vfio_migration_query_flags(VFIODevice *vbasedev, uint64_t *mig_flags)
  733. {
  734. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
  735. sizeof(struct vfio_device_feature_migration),
  736. sizeof(uint64_t))] = {};
  737. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  738. struct vfio_device_feature_migration *mig =
  739. (struct vfio_device_feature_migration *)feature->data;
  740. feature->argsz = sizeof(buf);
  741. feature->flags = VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_MIGRATION;
  742. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  743. return -errno;
  744. }
  745. *mig_flags = mig->flags;
  746. return 0;
  747. }
  748. static bool vfio_dma_logging_supported(VFIODevice *vbasedev)
  749. {
  750. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
  751. sizeof(uint64_t))] = {};
  752. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  753. feature->argsz = sizeof(buf);
  754. feature->flags = VFIO_DEVICE_FEATURE_PROBE |
  755. VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
  756. return !ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
  757. }
  758. static int vfio_migration_init(VFIODevice *vbasedev)
  759. {
  760. int ret;
  761. Object *obj;
  762. VFIOMigration *migration;
  763. char id[256] = "";
  764. g_autofree char *path = NULL, *oid = NULL;
  765. uint64_t mig_flags = 0;
  766. VMChangeStateHandler *prepare_cb;
  767. if (!vbasedev->ops->vfio_get_object) {
  768. return -EINVAL;
  769. }
  770. obj = vbasedev->ops->vfio_get_object(vbasedev);
  771. if (!obj) {
  772. return -EINVAL;
  773. }
  774. ret = vfio_migration_query_flags(vbasedev, &mig_flags);
  775. if (ret) {
  776. return ret;
  777. }
  778. /* Basic migration functionality must be supported */
  779. if (!(mig_flags & VFIO_MIGRATION_STOP_COPY)) {
  780. return -EOPNOTSUPP;
  781. }
  782. vbasedev->migration = g_new0(VFIOMigration, 1);
  783. migration = vbasedev->migration;
  784. migration->vbasedev = vbasedev;
  785. migration->device_state = VFIO_DEVICE_STATE_RUNNING;
  786. migration->data_fd = -1;
  787. migration->mig_flags = mig_flags;
  788. vbasedev->dirty_pages_supported = vfio_dma_logging_supported(vbasedev);
  789. oid = vmstate_if_get_id(VMSTATE_IF(DEVICE(obj)));
  790. if (oid) {
  791. path = g_strdup_printf("%s/vfio", oid);
  792. } else {
  793. path = g_strdup("vfio");
  794. }
  795. strpadcpy(id, sizeof(id), path, '\0');
  796. register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1, &savevm_vfio_handlers,
  797. vbasedev);
  798. prepare_cb = migration->mig_flags & VFIO_MIGRATION_P2P ?
  799. vfio_vmstate_change_prepare :
  800. NULL;
  801. migration->vm_state = qdev_add_vm_change_state_handler_full(
  802. vbasedev->dev, vfio_vmstate_change, prepare_cb, vbasedev);
  803. migration_add_notifier(&migration->migration_state,
  804. vfio_migration_state_notifier);
  805. return 0;
  806. }
  807. static void vfio_migration_deinit(VFIODevice *vbasedev)
  808. {
  809. VFIOMigration *migration = vbasedev->migration;
  810. migration_remove_notifier(&migration->migration_state);
  811. qemu_del_vm_change_state_handler(migration->vm_state);
  812. unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
  813. vfio_migration_free(vbasedev);
  814. vfio_unblock_multiple_devices_migration();
  815. }
  816. static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
  817. {
  818. if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
  819. error_propagate(errp, err);
  820. return -EINVAL;
  821. }
  822. vbasedev->migration_blocker = error_copy(err);
  823. error_free(err);
  824. return migrate_add_blocker_normal(&vbasedev->migration_blocker, errp);
  825. }
  826. /* ---------------------------------------------------------------------- */
  827. int64_t vfio_mig_bytes_transferred(void)
  828. {
  829. return bytes_transferred;
  830. }
  831. void vfio_reset_bytes_transferred(void)
  832. {
  833. bytes_transferred = 0;
  834. }
  835. /*
  836. * Return true when either migration initialized or blocker registered.
  837. * Currently only return false when adding blocker fails which will
  838. * de-register vfio device.
  839. */
  840. bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
  841. {
  842. Error *err = NULL;
  843. int ret;
  844. if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
  845. error_setg(&err, "%s: Migration is disabled for VFIO device",
  846. vbasedev->name);
  847. return !vfio_block_migration(vbasedev, err, errp);
  848. }
  849. ret = vfio_migration_init(vbasedev);
  850. if (ret) {
  851. if (ret == -ENOTTY) {
  852. error_setg(&err, "%s: VFIO migration is not supported in kernel",
  853. vbasedev->name);
  854. } else {
  855. error_setg(&err,
  856. "%s: Migration couldn't be initialized for VFIO device, "
  857. "err: %d (%s)",
  858. vbasedev->name, ret, strerror(-ret));
  859. }
  860. return !vfio_block_migration(vbasedev, err, errp);
  861. }
  862. if ((!vbasedev->dirty_pages_supported ||
  863. vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) &&
  864. !vbasedev->iommu_dirty_tracking) {
  865. if (vbasedev->enable_migration == ON_OFF_AUTO_AUTO) {
  866. error_setg(&err,
  867. "%s: VFIO device doesn't support device and "
  868. "IOMMU dirty tracking", vbasedev->name);
  869. goto add_blocker;
  870. }
  871. warn_report("%s: VFIO device doesn't support device and "
  872. "IOMMU dirty tracking", vbasedev->name);
  873. }
  874. ret = vfio_block_multiple_devices_migration(vbasedev, errp);
  875. if (ret) {
  876. goto out_deinit;
  877. }
  878. if (vfio_viommu_preset(vbasedev)) {
  879. error_setg(&err, "%s: Migration is currently not supported "
  880. "with vIOMMU enabled", vbasedev->name);
  881. goto add_blocker;
  882. }
  883. trace_vfio_migration_realize(vbasedev->name);
  884. return true;
  885. add_blocker:
  886. ret = vfio_block_migration(vbasedev, err, errp);
  887. out_deinit:
  888. if (ret) {
  889. vfio_migration_deinit(vbasedev);
  890. }
  891. return !ret;
  892. }
  893. void vfio_migration_exit(VFIODevice *vbasedev)
  894. {
  895. if (vbasedev->migration) {
  896. vfio_migration_deinit(vbasedev);
  897. }
  898. migrate_del_blocker(&vbasedev->migration_blocker);
  899. }