migration.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * Migration support for VFIO devices
  3. *
  4. * Copyright NVIDIA, Inc. 2020
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See
  7. * the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/main-loop.h"
  11. #include "qemu/cutils.h"
  12. #include "qemu/units.h"
  13. #include "qemu/error-report.h"
  14. #include <linux/vfio.h>
  15. #include <sys/ioctl.h>
  16. #include "system/runstate.h"
  17. #include "hw/vfio/vfio-common.h"
  18. #include "migration/misc.h"
  19. #include "migration/savevm.h"
  20. #include "migration/vmstate.h"
  21. #include "migration/qemu-file.h"
  22. #include "migration/register.h"
  23. #include "migration/blocker.h"
  24. #include "migration-multifd.h"
  25. #include "qapi/error.h"
  26. #include "qapi/qapi-events-vfio.h"
  27. #include "exec/ramlist.h"
  28. #include "pci.h"
  29. #include "trace.h"
  30. #include "hw/hw.h"
  31. /*
  32. * This is an arbitrary size based on migration of mlx5 devices, where typically
  33. * total device migration size is on the order of 100s of MB. Testing with
  34. * larger values, e.g. 128MB and 1GB, did not show a performance improvement.
  35. */
  36. #define VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE (1 * MiB)
  37. static unsigned long bytes_transferred;
  38. static const char *mig_state_to_str(enum vfio_device_mig_state state)
  39. {
  40. switch (state) {
  41. case VFIO_DEVICE_STATE_ERROR:
  42. return "ERROR";
  43. case VFIO_DEVICE_STATE_STOP:
  44. return "STOP";
  45. case VFIO_DEVICE_STATE_RUNNING:
  46. return "RUNNING";
  47. case VFIO_DEVICE_STATE_STOP_COPY:
  48. return "STOP_COPY";
  49. case VFIO_DEVICE_STATE_RESUMING:
  50. return "RESUMING";
  51. case VFIO_DEVICE_STATE_RUNNING_P2P:
  52. return "RUNNING_P2P";
  53. case VFIO_DEVICE_STATE_PRE_COPY:
  54. return "PRE_COPY";
  55. case VFIO_DEVICE_STATE_PRE_COPY_P2P:
  56. return "PRE_COPY_P2P";
  57. default:
  58. return "UNKNOWN STATE";
  59. }
  60. }
  61. static QapiVfioMigrationState
  62. mig_state_to_qapi_state(enum vfio_device_mig_state state)
  63. {
  64. switch (state) {
  65. case VFIO_DEVICE_STATE_STOP:
  66. return QAPI_VFIO_MIGRATION_STATE_STOP;
  67. case VFIO_DEVICE_STATE_RUNNING:
  68. return QAPI_VFIO_MIGRATION_STATE_RUNNING;
  69. case VFIO_DEVICE_STATE_STOP_COPY:
  70. return QAPI_VFIO_MIGRATION_STATE_STOP_COPY;
  71. case VFIO_DEVICE_STATE_RESUMING:
  72. return QAPI_VFIO_MIGRATION_STATE_RESUMING;
  73. case VFIO_DEVICE_STATE_RUNNING_P2P:
  74. return QAPI_VFIO_MIGRATION_STATE_RUNNING_P2P;
  75. case VFIO_DEVICE_STATE_PRE_COPY:
  76. return QAPI_VFIO_MIGRATION_STATE_PRE_COPY;
  77. case VFIO_DEVICE_STATE_PRE_COPY_P2P:
  78. return QAPI_VFIO_MIGRATION_STATE_PRE_COPY_P2P;
  79. default:
  80. g_assert_not_reached();
  81. }
  82. }
  83. static void vfio_migration_send_event(VFIODevice *vbasedev)
  84. {
  85. VFIOMigration *migration = vbasedev->migration;
  86. DeviceState *dev = vbasedev->dev;
  87. g_autofree char *qom_path = NULL;
  88. Object *obj;
  89. if (!vbasedev->migration_events) {
  90. return;
  91. }
  92. g_assert(vbasedev->ops->vfio_get_object);
  93. obj = vbasedev->ops->vfio_get_object(vbasedev);
  94. g_assert(obj);
  95. qom_path = object_get_canonical_path(obj);
  96. qapi_event_send_vfio_migration(
  97. dev->id, qom_path, mig_state_to_qapi_state(migration->device_state));
  98. }
  99. static void vfio_migration_set_device_state(VFIODevice *vbasedev,
  100. enum vfio_device_mig_state state)
  101. {
  102. VFIOMigration *migration = vbasedev->migration;
  103. trace_vfio_migration_set_device_state(vbasedev->name,
  104. mig_state_to_str(state));
  105. migration->device_state = state;
  106. vfio_migration_send_event(vbasedev);
  107. }
  108. int vfio_migration_set_state(VFIODevice *vbasedev,
  109. enum vfio_device_mig_state new_state,
  110. enum vfio_device_mig_state recover_state,
  111. Error **errp)
  112. {
  113. VFIOMigration *migration = vbasedev->migration;
  114. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
  115. sizeof(struct vfio_device_feature_mig_state),
  116. sizeof(uint64_t))] = {};
  117. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  118. struct vfio_device_feature_mig_state *mig_state =
  119. (struct vfio_device_feature_mig_state *)feature->data;
  120. int ret;
  121. g_autofree char *error_prefix =
  122. g_strdup_printf("%s: Failed setting device state to %s.",
  123. vbasedev->name, mig_state_to_str(new_state));
  124. trace_vfio_migration_set_state(vbasedev->name, mig_state_to_str(new_state),
  125. mig_state_to_str(recover_state));
  126. if (new_state == migration->device_state) {
  127. return 0;
  128. }
  129. feature->argsz = sizeof(buf);
  130. feature->flags =
  131. VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE;
  132. mig_state->device_state = new_state;
  133. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  134. /* Try to set the device in some good state */
  135. ret = -errno;
  136. if (recover_state == VFIO_DEVICE_STATE_ERROR) {
  137. error_setg_errno(errp, errno,
  138. "%s Recover state is ERROR. Resetting device",
  139. error_prefix);
  140. goto reset_device;
  141. }
  142. error_setg_errno(errp, errno,
  143. "%s Setting device in recover state %s",
  144. error_prefix, mig_state_to_str(recover_state));
  145. mig_state->device_state = recover_state;
  146. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  147. ret = -errno;
  148. /*
  149. * If setting the device in recover state fails, report
  150. * the error here and propagate the first error.
  151. */
  152. error_report(
  153. "%s: Failed setting device in recover state, err: %s. Resetting device",
  154. vbasedev->name, strerror(errno));
  155. goto reset_device;
  156. }
  157. vfio_migration_set_device_state(vbasedev, recover_state);
  158. return ret;
  159. }
  160. vfio_migration_set_device_state(vbasedev, new_state);
  161. if (mig_state->data_fd != -1) {
  162. if (migration->data_fd != -1) {
  163. /*
  164. * This can happen if the device is asynchronously reset and
  165. * terminates a data transfer.
  166. */
  167. error_setg(errp, "%s: data_fd out of sync", vbasedev->name);
  168. close(mig_state->data_fd);
  169. return -EBADF;
  170. }
  171. migration->data_fd = mig_state->data_fd;
  172. }
  173. return 0;
  174. reset_device:
  175. if (ioctl(vbasedev->fd, VFIO_DEVICE_RESET)) {
  176. hw_error("%s: Failed resetting device, err: %s", vbasedev->name,
  177. strerror(errno));
  178. }
  179. vfio_migration_set_device_state(vbasedev, VFIO_DEVICE_STATE_RUNNING);
  180. return ret;
  181. }
  182. /*
  183. * Some device state transitions require resetting the device if they fail.
  184. * This function sets the device in new_state and resets the device if that
  185. * fails. Reset is done by using ERROR as the recover state.
  186. */
  187. static int
  188. vfio_migration_set_state_or_reset(VFIODevice *vbasedev,
  189. enum vfio_device_mig_state new_state,
  190. Error **errp)
  191. {
  192. return vfio_migration_set_state(vbasedev, new_state,
  193. VFIO_DEVICE_STATE_ERROR, errp);
  194. }
  195. static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
  196. uint64_t data_size)
  197. {
  198. VFIOMigration *migration = vbasedev->migration;
  199. int ret;
  200. ret = qemu_file_get_to_fd(f, migration->data_fd, data_size);
  201. trace_vfio_load_state_device_data(vbasedev->name, data_size, ret);
  202. return ret;
  203. }
  204. int vfio_save_device_config_state(QEMUFile *f, void *opaque, Error **errp)
  205. {
  206. VFIODevice *vbasedev = opaque;
  207. int ret;
  208. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE);
  209. if (vbasedev->ops && vbasedev->ops->vfio_save_config) {
  210. ret = vbasedev->ops->vfio_save_config(vbasedev, f, errp);
  211. if (ret) {
  212. return ret;
  213. }
  214. }
  215. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  216. trace_vfio_save_device_config_state(vbasedev->name);
  217. ret = qemu_file_get_error(f);
  218. if (ret < 0) {
  219. error_setg_errno(errp, -ret, "Failed to save state");
  220. }
  221. return ret;
  222. }
  223. int vfio_load_device_config_state(QEMUFile *f, void *opaque)
  224. {
  225. VFIODevice *vbasedev = opaque;
  226. uint64_t data;
  227. trace_vfio_load_device_config_state_start(vbasedev->name);
  228. if (vbasedev->ops && vbasedev->ops->vfio_load_config) {
  229. int ret;
  230. ret = vbasedev->ops->vfio_load_config(vbasedev, f);
  231. if (ret) {
  232. error_report("%s: Failed to load device config space",
  233. vbasedev->name);
  234. return ret;
  235. }
  236. }
  237. data = qemu_get_be64(f);
  238. if (data != VFIO_MIG_FLAG_END_OF_STATE) {
  239. error_report("%s: Failed loading device config space, "
  240. "end flag incorrect 0x%"PRIx64, vbasedev->name, data);
  241. return -EINVAL;
  242. }
  243. trace_vfio_load_device_config_state_end(vbasedev->name);
  244. return qemu_file_get_error(f);
  245. }
  246. static void vfio_migration_cleanup(VFIODevice *vbasedev)
  247. {
  248. VFIOMigration *migration = vbasedev->migration;
  249. close(migration->data_fd);
  250. migration->data_fd = -1;
  251. }
  252. static int vfio_query_stop_copy_size(VFIODevice *vbasedev,
  253. uint64_t *stop_copy_size)
  254. {
  255. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
  256. sizeof(struct vfio_device_feature_mig_data_size),
  257. sizeof(uint64_t))] = {};
  258. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  259. struct vfio_device_feature_mig_data_size *mig_data_size =
  260. (struct vfio_device_feature_mig_data_size *)feature->data;
  261. feature->argsz = sizeof(buf);
  262. feature->flags =
  263. VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_MIG_DATA_SIZE;
  264. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  265. return -errno;
  266. }
  267. *stop_copy_size = mig_data_size->stop_copy_length;
  268. return 0;
  269. }
  270. static int vfio_query_precopy_size(VFIOMigration *migration)
  271. {
  272. struct vfio_precopy_info precopy = {
  273. .argsz = sizeof(precopy),
  274. };
  275. migration->precopy_init_size = 0;
  276. migration->precopy_dirty_size = 0;
  277. if (ioctl(migration->data_fd, VFIO_MIG_GET_PRECOPY_INFO, &precopy)) {
  278. return -errno;
  279. }
  280. migration->precopy_init_size = precopy.initial_bytes;
  281. migration->precopy_dirty_size = precopy.dirty_bytes;
  282. return 0;
  283. }
  284. /* Returns the size of saved data on success and -errno on error */
  285. static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration)
  286. {
  287. ssize_t data_size;
  288. data_size = read(migration->data_fd, migration->data_buffer,
  289. migration->data_buffer_size);
  290. if (data_size < 0) {
  291. /*
  292. * Pre-copy emptied all the device state for now. For more information,
  293. * please refer to the Linux kernel VFIO uAPI.
  294. */
  295. if (errno == ENOMSG) {
  296. if (!migration->event_precopy_empty_hit) {
  297. trace_vfio_save_block_precopy_empty_hit(migration->vbasedev->name);
  298. migration->event_precopy_empty_hit = true;
  299. }
  300. return 0;
  301. }
  302. return -errno;
  303. }
  304. if (data_size == 0) {
  305. return 0;
  306. }
  307. /* Non-empty read: re-arm the trace event */
  308. migration->event_precopy_empty_hit = false;
  309. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE);
  310. qemu_put_be64(f, data_size);
  311. qemu_put_buffer(f, migration->data_buffer, data_size);
  312. vfio_mig_add_bytes_transferred(data_size);
  313. trace_vfio_save_block(migration->vbasedev->name, data_size);
  314. return qemu_file_get_error(f) ?: data_size;
  315. }
  316. static void vfio_update_estimated_pending_data(VFIOMigration *migration,
  317. uint64_t data_size)
  318. {
  319. if (!data_size) {
  320. /*
  321. * Pre-copy emptied all the device state for now, update estimated sizes
  322. * accordingly.
  323. */
  324. migration->precopy_init_size = 0;
  325. migration->precopy_dirty_size = 0;
  326. return;
  327. }
  328. if (migration->precopy_init_size) {
  329. uint64_t init_size = MIN(migration->precopy_init_size, data_size);
  330. migration->precopy_init_size -= init_size;
  331. data_size -= init_size;
  332. }
  333. migration->precopy_dirty_size -= MIN(migration->precopy_dirty_size,
  334. data_size);
  335. }
  336. static bool vfio_precopy_supported(VFIODevice *vbasedev)
  337. {
  338. VFIOMigration *migration = vbasedev->migration;
  339. return migration->mig_flags & VFIO_MIGRATION_PRE_COPY;
  340. }
  341. /* ---------------------------------------------------------------------- */
  342. static int vfio_save_prepare(void *opaque, Error **errp)
  343. {
  344. VFIODevice *vbasedev = opaque;
  345. /*
  346. * Snapshot doesn't use postcopy nor background snapshot, so allow snapshot
  347. * even if they are on.
  348. */
  349. if (runstate_check(RUN_STATE_SAVE_VM)) {
  350. return 0;
  351. }
  352. if (migrate_postcopy_ram()) {
  353. error_setg(
  354. errp, "%s: VFIO migration is not supported with postcopy migration",
  355. vbasedev->name);
  356. return -EOPNOTSUPP;
  357. }
  358. if (migrate_background_snapshot()) {
  359. error_setg(
  360. errp,
  361. "%s: VFIO migration is not supported with background snapshot",
  362. vbasedev->name);
  363. return -EOPNOTSUPP;
  364. }
  365. return 0;
  366. }
  367. static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp)
  368. {
  369. VFIODevice *vbasedev = opaque;
  370. VFIOMigration *migration = vbasedev->migration;
  371. uint64_t stop_copy_size = VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE;
  372. int ret;
  373. if (!vfio_multifd_setup(vbasedev, false, errp)) {
  374. return -EINVAL;
  375. }
  376. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE);
  377. vfio_query_stop_copy_size(vbasedev, &stop_copy_size);
  378. migration->data_buffer_size = MIN(VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE,
  379. stop_copy_size);
  380. migration->data_buffer = g_try_malloc0(migration->data_buffer_size);
  381. if (!migration->data_buffer) {
  382. error_setg(errp, "%s: Failed to allocate migration data buffer",
  383. vbasedev->name);
  384. return -ENOMEM;
  385. }
  386. migration->event_save_iterate_started = false;
  387. migration->event_precopy_empty_hit = false;
  388. if (vfio_precopy_supported(vbasedev)) {
  389. switch (migration->device_state) {
  390. case VFIO_DEVICE_STATE_RUNNING:
  391. ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_PRE_COPY,
  392. VFIO_DEVICE_STATE_RUNNING, errp);
  393. if (ret) {
  394. return ret;
  395. }
  396. vfio_query_precopy_size(migration);
  397. break;
  398. case VFIO_DEVICE_STATE_STOP:
  399. /* vfio_save_complete_precopy() will go to STOP_COPY */
  400. break;
  401. default:
  402. error_setg(errp, "%s: Invalid device state %d", vbasedev->name,
  403. migration->device_state);
  404. return -EINVAL;
  405. }
  406. }
  407. trace_vfio_save_setup(vbasedev->name, migration->data_buffer_size);
  408. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  409. ret = qemu_file_get_error(f);
  410. if (ret < 0) {
  411. error_setg_errno(errp, -ret, "%s: save setup failed", vbasedev->name);
  412. }
  413. return ret;
  414. }
  415. static void vfio_save_cleanup(void *opaque)
  416. {
  417. VFIODevice *vbasedev = opaque;
  418. VFIOMigration *migration = vbasedev->migration;
  419. Error *local_err = NULL;
  420. int ret;
  421. /* Currently a NOP, done for symmetry with load_cleanup() */
  422. vfio_multifd_cleanup(vbasedev);
  423. /*
  424. * Changing device state from STOP_COPY to STOP can take time. Do it here,
  425. * after migration has completed, so it won't increase downtime.
  426. */
  427. if (migration->device_state == VFIO_DEVICE_STATE_STOP_COPY) {
  428. ret = vfio_migration_set_state_or_reset(vbasedev,
  429. VFIO_DEVICE_STATE_STOP,
  430. &local_err);
  431. if (ret) {
  432. error_report_err(local_err);
  433. }
  434. }
  435. g_free(migration->data_buffer);
  436. migration->data_buffer = NULL;
  437. migration->precopy_init_size = 0;
  438. migration->precopy_dirty_size = 0;
  439. migration->initial_data_sent = false;
  440. vfio_migration_cleanup(vbasedev);
  441. trace_vfio_save_cleanup(vbasedev->name);
  442. }
  443. static void vfio_state_pending_estimate(void *opaque, uint64_t *must_precopy,
  444. uint64_t *can_postcopy)
  445. {
  446. VFIODevice *vbasedev = opaque;
  447. VFIOMigration *migration = vbasedev->migration;
  448. if (!vfio_device_state_is_precopy(vbasedev)) {
  449. return;
  450. }
  451. *must_precopy +=
  452. migration->precopy_init_size + migration->precopy_dirty_size;
  453. trace_vfio_state_pending_estimate(vbasedev->name, *must_precopy,
  454. *can_postcopy,
  455. migration->precopy_init_size,
  456. migration->precopy_dirty_size);
  457. }
  458. /*
  459. * Migration size of VFIO devices can be as little as a few KBs or as big as
  460. * many GBs. This value should be big enough to cover the worst case.
  461. */
  462. #define VFIO_MIG_STOP_COPY_SIZE (100 * GiB)
  463. static void vfio_state_pending_exact(void *opaque, uint64_t *must_precopy,
  464. uint64_t *can_postcopy)
  465. {
  466. VFIODevice *vbasedev = opaque;
  467. VFIOMigration *migration = vbasedev->migration;
  468. uint64_t stop_copy_size = VFIO_MIG_STOP_COPY_SIZE;
  469. /*
  470. * If getting pending migration size fails, VFIO_MIG_STOP_COPY_SIZE is
  471. * reported so downtime limit won't be violated.
  472. */
  473. vfio_query_stop_copy_size(vbasedev, &stop_copy_size);
  474. *must_precopy += stop_copy_size;
  475. if (vfio_device_state_is_precopy(vbasedev)) {
  476. vfio_query_precopy_size(migration);
  477. }
  478. trace_vfio_state_pending_exact(vbasedev->name, *must_precopy, *can_postcopy,
  479. stop_copy_size, migration->precopy_init_size,
  480. migration->precopy_dirty_size);
  481. }
  482. static bool vfio_is_active_iterate(void *opaque)
  483. {
  484. VFIODevice *vbasedev = opaque;
  485. return vfio_device_state_is_precopy(vbasedev);
  486. }
  487. /*
  488. * Note about migration rate limiting: VFIO migration buffer size is currently
  489. * limited to 1MB, so there is no need to check if migration rate exceeded (as
  490. * in the worst case it will exceed by 1MB). However, if the buffer size is
  491. * later changed to a bigger value, migration rate should be enforced here.
  492. */
  493. static int vfio_save_iterate(QEMUFile *f, void *opaque)
  494. {
  495. VFIODevice *vbasedev = opaque;
  496. VFIOMigration *migration = vbasedev->migration;
  497. ssize_t data_size;
  498. if (!migration->event_save_iterate_started) {
  499. trace_vfio_save_iterate_start(vbasedev->name);
  500. migration->event_save_iterate_started = true;
  501. }
  502. data_size = vfio_save_block(f, migration);
  503. if (data_size < 0) {
  504. return data_size;
  505. }
  506. vfio_update_estimated_pending_data(migration, data_size);
  507. if (migrate_switchover_ack() && !migration->precopy_init_size &&
  508. !migration->initial_data_sent) {
  509. qemu_put_be64(f, VFIO_MIG_FLAG_DEV_INIT_DATA_SENT);
  510. migration->initial_data_sent = true;
  511. } else {
  512. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  513. }
  514. trace_vfio_save_iterate(vbasedev->name, migration->precopy_init_size,
  515. migration->precopy_dirty_size);
  516. return !migration->precopy_init_size && !migration->precopy_dirty_size;
  517. }
  518. static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
  519. {
  520. VFIODevice *vbasedev = opaque;
  521. ssize_t data_size;
  522. int ret;
  523. Error *local_err = NULL;
  524. if (vfio_multifd_transfer_enabled(vbasedev)) {
  525. vfio_multifd_emit_dummy_eos(vbasedev, f);
  526. return 0;
  527. }
  528. trace_vfio_save_complete_precopy_start(vbasedev->name);
  529. /* We reach here with device state STOP or STOP_COPY only */
  530. ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY,
  531. VFIO_DEVICE_STATE_STOP, &local_err);
  532. if (ret) {
  533. error_report_err(local_err);
  534. return ret;
  535. }
  536. do {
  537. data_size = vfio_save_block(f, vbasedev->migration);
  538. if (data_size < 0) {
  539. return data_size;
  540. }
  541. } while (data_size);
  542. qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
  543. ret = qemu_file_get_error(f);
  544. trace_vfio_save_complete_precopy(vbasedev->name, ret);
  545. return ret;
  546. }
  547. static void vfio_save_state(QEMUFile *f, void *opaque)
  548. {
  549. VFIODevice *vbasedev = opaque;
  550. Error *local_err = NULL;
  551. int ret;
  552. if (vfio_multifd_transfer_enabled(vbasedev)) {
  553. vfio_multifd_emit_dummy_eos(vbasedev, f);
  554. return;
  555. }
  556. ret = vfio_save_device_config_state(f, opaque, &local_err);
  557. if (ret) {
  558. error_prepend(&local_err,
  559. "vfio: Failed to save device config space of %s - ",
  560. vbasedev->name);
  561. qemu_file_set_error_obj(f, ret, local_err);
  562. }
  563. }
  564. static int vfio_load_setup(QEMUFile *f, void *opaque, Error **errp)
  565. {
  566. VFIODevice *vbasedev = opaque;
  567. VFIOMigration *migration = vbasedev->migration;
  568. int ret;
  569. if (!vfio_multifd_setup(vbasedev, true, errp)) {
  570. return -EINVAL;
  571. }
  572. ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING,
  573. migration->device_state, errp);
  574. if (ret) {
  575. return ret;
  576. }
  577. return 0;
  578. }
  579. static int vfio_load_cleanup(void *opaque)
  580. {
  581. VFIODevice *vbasedev = opaque;
  582. vfio_multifd_cleanup(vbasedev);
  583. vfio_migration_cleanup(vbasedev);
  584. trace_vfio_load_cleanup(vbasedev->name);
  585. return 0;
  586. }
  587. static int vfio_load_state(QEMUFile *f, void *opaque, int version_id)
  588. {
  589. VFIODevice *vbasedev = opaque;
  590. int ret = 0;
  591. uint64_t data;
  592. data = qemu_get_be64(f);
  593. while (data != VFIO_MIG_FLAG_END_OF_STATE) {
  594. trace_vfio_load_state(vbasedev->name, data);
  595. switch (data) {
  596. case VFIO_MIG_FLAG_DEV_CONFIG_STATE:
  597. {
  598. if (vfio_multifd_transfer_enabled(vbasedev)) {
  599. error_report("%s: got DEV_CONFIG_STATE in main migration "
  600. "channel but doing multifd transfer",
  601. vbasedev->name);
  602. return -EINVAL;
  603. }
  604. return vfio_load_device_config_state(f, opaque);
  605. }
  606. case VFIO_MIG_FLAG_DEV_SETUP_STATE:
  607. {
  608. data = qemu_get_be64(f);
  609. if (data == VFIO_MIG_FLAG_END_OF_STATE) {
  610. return ret;
  611. } else {
  612. error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64,
  613. vbasedev->name, data);
  614. return -EINVAL;
  615. }
  616. break;
  617. }
  618. case VFIO_MIG_FLAG_DEV_DATA_STATE:
  619. {
  620. uint64_t data_size = qemu_get_be64(f);
  621. if (data_size) {
  622. ret = vfio_load_buffer(f, vbasedev, data_size);
  623. if (ret < 0) {
  624. return ret;
  625. }
  626. }
  627. break;
  628. }
  629. case VFIO_MIG_FLAG_DEV_INIT_DATA_SENT:
  630. {
  631. if (!vfio_precopy_supported(vbasedev) ||
  632. !migrate_switchover_ack()) {
  633. error_report("%s: Received INIT_DATA_SENT but switchover ack "
  634. "is not used", vbasedev->name);
  635. return -EINVAL;
  636. }
  637. ret = qemu_loadvm_approve_switchover();
  638. if (ret) {
  639. error_report(
  640. "%s: qemu_loadvm_approve_switchover failed, err=%d (%s)",
  641. vbasedev->name, ret, strerror(-ret));
  642. }
  643. return ret;
  644. }
  645. default:
  646. error_report("%s: Unknown tag 0x%"PRIx64, vbasedev->name, data);
  647. return -EINVAL;
  648. }
  649. data = qemu_get_be64(f);
  650. ret = qemu_file_get_error(f);
  651. if (ret) {
  652. return ret;
  653. }
  654. }
  655. return ret;
  656. }
  657. static bool vfio_switchover_ack_needed(void *opaque)
  658. {
  659. VFIODevice *vbasedev = opaque;
  660. return vfio_precopy_supported(vbasedev);
  661. }
  662. static int vfio_switchover_start(void *opaque)
  663. {
  664. VFIODevice *vbasedev = opaque;
  665. if (vfio_multifd_transfer_enabled(vbasedev)) {
  666. return vfio_multifd_switchover_start(vbasedev);
  667. }
  668. return 0;
  669. }
  670. static const SaveVMHandlers savevm_vfio_handlers = {
  671. .save_prepare = vfio_save_prepare,
  672. .save_setup = vfio_save_setup,
  673. .save_cleanup = vfio_save_cleanup,
  674. .state_pending_estimate = vfio_state_pending_estimate,
  675. .state_pending_exact = vfio_state_pending_exact,
  676. .is_active_iterate = vfio_is_active_iterate,
  677. .save_live_iterate = vfio_save_iterate,
  678. .save_live_complete_precopy = vfio_save_complete_precopy,
  679. .save_state = vfio_save_state,
  680. .load_setup = vfio_load_setup,
  681. .load_cleanup = vfio_load_cleanup,
  682. .load_state = vfio_load_state,
  683. .switchover_ack_needed = vfio_switchover_ack_needed,
  684. /*
  685. * Multifd support
  686. */
  687. .load_state_buffer = vfio_multifd_load_state_buffer,
  688. .switchover_start = vfio_switchover_start,
  689. .save_live_complete_precopy_thread = vfio_multifd_save_complete_precopy_thread,
  690. };
  691. /* ---------------------------------------------------------------------- */
  692. static void vfio_vmstate_change_prepare(void *opaque, bool running,
  693. RunState state)
  694. {
  695. VFIODevice *vbasedev = opaque;
  696. VFIOMigration *migration = vbasedev->migration;
  697. enum vfio_device_mig_state new_state;
  698. Error *local_err = NULL;
  699. int ret;
  700. new_state = migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ?
  701. VFIO_DEVICE_STATE_PRE_COPY_P2P :
  702. VFIO_DEVICE_STATE_RUNNING_P2P;
  703. ret = vfio_migration_set_state_or_reset(vbasedev, new_state, &local_err);
  704. if (ret) {
  705. /*
  706. * Migration should be aborted in this case, but vm_state_notify()
  707. * currently does not support reporting failures.
  708. */
  709. migration_file_set_error(ret, local_err);
  710. }
  711. trace_vfio_vmstate_change_prepare(vbasedev->name, running,
  712. RunState_str(state),
  713. mig_state_to_str(new_state));
  714. }
  715. static void vfio_vmstate_change(void *opaque, bool running, RunState state)
  716. {
  717. VFIODevice *vbasedev = opaque;
  718. enum vfio_device_mig_state new_state;
  719. Error *local_err = NULL;
  720. int ret;
  721. if (running) {
  722. new_state = VFIO_DEVICE_STATE_RUNNING;
  723. } else {
  724. new_state =
  725. (vfio_device_state_is_precopy(vbasedev) &&
  726. (state == RUN_STATE_FINISH_MIGRATE || state == RUN_STATE_PAUSED)) ?
  727. VFIO_DEVICE_STATE_STOP_COPY :
  728. VFIO_DEVICE_STATE_STOP;
  729. }
  730. ret = vfio_migration_set_state_or_reset(vbasedev, new_state, &local_err);
  731. if (ret) {
  732. /*
  733. * Migration should be aborted in this case, but vm_state_notify()
  734. * currently does not support reporting failures.
  735. */
  736. migration_file_set_error(ret, local_err);
  737. }
  738. trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state),
  739. mig_state_to_str(new_state));
  740. }
  741. static int vfio_migration_state_notifier(NotifierWithReturn *notifier,
  742. MigrationEvent *e, Error **errp)
  743. {
  744. VFIOMigration *migration = container_of(notifier, VFIOMigration,
  745. migration_state);
  746. VFIODevice *vbasedev = migration->vbasedev;
  747. Error *local_err = NULL;
  748. int ret;
  749. trace_vfio_migration_state_notifier(vbasedev->name, e->type);
  750. if (e->type == MIG_EVENT_PRECOPY_FAILED) {
  751. /*
  752. * MigrationNotifyFunc may not return an error code and an Error
  753. * object for MIG_EVENT_PRECOPY_FAILED. Hence, report the error
  754. * locally and ignore the errp argument.
  755. */
  756. ret = vfio_migration_set_state_or_reset(vbasedev,
  757. VFIO_DEVICE_STATE_RUNNING,
  758. &local_err);
  759. if (ret) {
  760. error_report_err(local_err);
  761. }
  762. }
  763. return 0;
  764. }
  765. static void vfio_migration_free(VFIODevice *vbasedev)
  766. {
  767. g_free(vbasedev->migration);
  768. vbasedev->migration = NULL;
  769. }
  770. static int vfio_migration_query_flags(VFIODevice *vbasedev, uint64_t *mig_flags)
  771. {
  772. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
  773. sizeof(struct vfio_device_feature_migration),
  774. sizeof(uint64_t))] = {};
  775. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  776. struct vfio_device_feature_migration *mig =
  777. (struct vfio_device_feature_migration *)feature->data;
  778. feature->argsz = sizeof(buf);
  779. feature->flags = VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_MIGRATION;
  780. if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
  781. return -errno;
  782. }
  783. *mig_flags = mig->flags;
  784. return 0;
  785. }
  786. static bool vfio_dma_logging_supported(VFIODevice *vbasedev)
  787. {
  788. uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
  789. sizeof(uint64_t))] = {};
  790. struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
  791. feature->argsz = sizeof(buf);
  792. feature->flags = VFIO_DEVICE_FEATURE_PROBE |
  793. VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
  794. return !ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
  795. }
  796. static int vfio_migration_init(VFIODevice *vbasedev)
  797. {
  798. int ret;
  799. Object *obj;
  800. VFIOMigration *migration;
  801. char id[256] = "";
  802. g_autofree char *path = NULL, *oid = NULL;
  803. uint64_t mig_flags = 0;
  804. VMChangeStateHandler *prepare_cb;
  805. if (!vbasedev->ops->vfio_get_object) {
  806. return -EINVAL;
  807. }
  808. obj = vbasedev->ops->vfio_get_object(vbasedev);
  809. if (!obj) {
  810. return -EINVAL;
  811. }
  812. ret = vfio_migration_query_flags(vbasedev, &mig_flags);
  813. if (ret) {
  814. return ret;
  815. }
  816. /* Basic migration functionality must be supported */
  817. if (!(mig_flags & VFIO_MIGRATION_STOP_COPY)) {
  818. return -EOPNOTSUPP;
  819. }
  820. vbasedev->migration = g_new0(VFIOMigration, 1);
  821. migration = vbasedev->migration;
  822. migration->vbasedev = vbasedev;
  823. migration->device_state = VFIO_DEVICE_STATE_RUNNING;
  824. migration->data_fd = -1;
  825. migration->mig_flags = mig_flags;
  826. vbasedev->dirty_pages_supported = vfio_dma_logging_supported(vbasedev);
  827. oid = vmstate_if_get_id(VMSTATE_IF(DEVICE(obj)));
  828. if (oid) {
  829. path = g_strdup_printf("%s/vfio", oid);
  830. } else {
  831. path = g_strdup("vfio");
  832. }
  833. strpadcpy(id, sizeof(id), path, '\0');
  834. register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1, &savevm_vfio_handlers,
  835. vbasedev);
  836. prepare_cb = migration->mig_flags & VFIO_MIGRATION_P2P ?
  837. vfio_vmstate_change_prepare :
  838. NULL;
  839. migration->vm_state = qdev_add_vm_change_state_handler_full(
  840. vbasedev->dev, vfio_vmstate_change, prepare_cb, vbasedev);
  841. migration_add_notifier(&migration->migration_state,
  842. vfio_migration_state_notifier);
  843. return 0;
  844. }
  845. static void vfio_migration_deinit(VFIODevice *vbasedev)
  846. {
  847. VFIOMigration *migration = vbasedev->migration;
  848. migration_remove_notifier(&migration->migration_state);
  849. qemu_del_vm_change_state_handler(migration->vm_state);
  850. unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
  851. vfio_migration_free(vbasedev);
  852. vfio_unblock_multiple_devices_migration();
  853. }
  854. static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
  855. {
  856. if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
  857. error_propagate(errp, err);
  858. return -EINVAL;
  859. }
  860. vbasedev->migration_blocker = error_copy(err);
  861. error_free(err);
  862. return migrate_add_blocker_normal(&vbasedev->migration_blocker, errp);
  863. }
  864. /* ---------------------------------------------------------------------- */
  865. int64_t vfio_mig_bytes_transferred(void)
  866. {
  867. return MIN(qatomic_read(&bytes_transferred), INT64_MAX);
  868. }
  869. void vfio_reset_bytes_transferred(void)
  870. {
  871. qatomic_set(&bytes_transferred, 0);
  872. }
  873. void vfio_mig_add_bytes_transferred(unsigned long val)
  874. {
  875. qatomic_add(&bytes_transferred, val);
  876. }
  877. /*
  878. * Return true when either migration initialized or blocker registered.
  879. * Currently only return false when adding blocker fails which will
  880. * de-register vfio device.
  881. */
  882. bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
  883. {
  884. Error *err = NULL;
  885. int ret;
  886. if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
  887. error_setg(&err, "%s: Migration is disabled for VFIO device",
  888. vbasedev->name);
  889. return !vfio_block_migration(vbasedev, err, errp);
  890. }
  891. ret = vfio_migration_init(vbasedev);
  892. if (ret) {
  893. if (ret == -ENOTTY) {
  894. error_setg(&err, "%s: VFIO migration is not supported in kernel",
  895. vbasedev->name);
  896. } else {
  897. error_setg(&err,
  898. "%s: Migration couldn't be initialized for VFIO device, "
  899. "err: %d (%s)",
  900. vbasedev->name, ret, strerror(-ret));
  901. }
  902. return !vfio_block_migration(vbasedev, err, errp);
  903. }
  904. if ((!vbasedev->dirty_pages_supported ||
  905. vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) &&
  906. !vbasedev->iommu_dirty_tracking) {
  907. if (vbasedev->enable_migration == ON_OFF_AUTO_AUTO) {
  908. error_setg(&err,
  909. "%s: VFIO device doesn't support device and "
  910. "IOMMU dirty tracking", vbasedev->name);
  911. goto add_blocker;
  912. }
  913. warn_report("%s: VFIO device doesn't support device and "
  914. "IOMMU dirty tracking", vbasedev->name);
  915. }
  916. ret = vfio_block_multiple_devices_migration(vbasedev, errp);
  917. if (ret) {
  918. goto out_deinit;
  919. }
  920. if (vfio_viommu_preset(vbasedev)) {
  921. error_setg(&err, "%s: Migration is currently not supported "
  922. "with vIOMMU enabled", vbasedev->name);
  923. goto add_blocker;
  924. }
  925. trace_vfio_migration_realize(vbasedev->name);
  926. return true;
  927. add_blocker:
  928. ret = vfio_block_migration(vbasedev, err, errp);
  929. out_deinit:
  930. if (ret) {
  931. vfio_migration_deinit(vbasedev);
  932. }
  933. return !ret;
  934. }
  935. void vfio_migration_exit(VFIODevice *vbasedev)
  936. {
  937. if (vbasedev->migration) {
  938. vfio_migration_deinit(vbasedev);
  939. }
  940. migrate_del_blocker(&vbasedev->migration_blocker);
  941. }