nvmm-accel-ops.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. /*
  2. * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
  3. *
  4. * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "system/kvm_int.h"
  11. #include "qemu/main-loop.h"
  12. #include "system/accel-ops.h"
  13. #include "system/cpus.h"
  14. #include "qemu/guest-random.h"
  15. #include "system/nvmm.h"
  16. #include "nvmm-accel-ops.h"
  17. static void *qemu_nvmm_cpu_thread_fn(void *arg)
  18. {
  19. CPUState *cpu = arg;
  20. int r;
  21. assert(nvmm_enabled());
  22. rcu_register_thread();
  23. bql_lock();
  24. qemu_thread_get_self(cpu->thread);
  25. cpu->thread_id = qemu_get_thread_id();
  26. current_cpu = cpu;
  27. r = nvmm_init_vcpu(cpu);
  28. if (r < 0) {
  29. fprintf(stderr, "nvmm_init_vcpu failed: %s\n", strerror(-r));
  30. exit(1);
  31. }
  32. /* signal CPU creation */
  33. cpu_thread_signal_created(cpu);
  34. qemu_guest_random_seed_thread_part2(cpu->random_seed);
  35. do {
  36. if (cpu_can_run(cpu)) {
  37. r = nvmm_vcpu_exec(cpu);
  38. if (r == EXCP_DEBUG) {
  39. cpu_handle_guest_debug(cpu);
  40. }
  41. }
  42. while (cpu_thread_is_idle(cpu)) {
  43. qemu_cond_wait_bql(cpu->halt_cond);
  44. }
  45. qemu_wait_io_event_common(cpu);
  46. } while (!cpu->unplug || cpu_can_run(cpu));
  47. nvmm_destroy_vcpu(cpu);
  48. cpu_thread_signal_destroyed(cpu);
  49. bql_unlock();
  50. rcu_unregister_thread();
  51. return NULL;
  52. }
  53. static void nvmm_start_vcpu_thread(CPUState *cpu)
  54. {
  55. char thread_name[VCPU_THREAD_NAME_SIZE];
  56. snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/NVMM",
  57. cpu->cpu_index);
  58. qemu_thread_create(cpu->thread, thread_name, qemu_nvmm_cpu_thread_fn,
  59. cpu, QEMU_THREAD_JOINABLE);
  60. }
  61. /*
  62. * Abort the call to run the virtual processor by another thread, and to
  63. * return the control to that thread.
  64. */
  65. static void nvmm_kick_vcpu_thread(CPUState *cpu)
  66. {
  67. cpu->exit_request = 1;
  68. cpus_kick_thread(cpu);
  69. }
  70. static void nvmm_accel_ops_class_init(ObjectClass *oc, void *data)
  71. {
  72. AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
  73. ops->create_vcpu_thread = nvmm_start_vcpu_thread;
  74. ops->kick_vcpu_thread = nvmm_kick_vcpu_thread;
  75. ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset;
  76. ops->synchronize_post_init = nvmm_cpu_synchronize_post_init;
  77. ops->synchronize_state = nvmm_cpu_synchronize_state;
  78. ops->synchronize_pre_loadvm = nvmm_cpu_synchronize_pre_loadvm;
  79. }
  80. static const TypeInfo nvmm_accel_ops_type = {
  81. .name = ACCEL_OPS_NAME("nvmm"),
  82. .parent = TYPE_ACCEL_OPS,
  83. .class_init = nvmm_accel_ops_class_init,
  84. .abstract = true,
  85. };
  86. static void nvmm_accel_ops_register_types(void)
  87. {
  88. type_register_static(&nvmm_accel_ops_type);
  89. }
  90. type_init(nvmm_accel_ops_register_types);