2
0

hyperv.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. /*
  2. * Hyper-V guest/hypervisor interaction
  3. *
  4. * Copyright (c) 2015-2018 Virtuozzo International GmbH.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/main-loop.h"
  11. #include "qemu/module.h"
  12. #include "qapi/error.h"
  13. #include "exec/address-spaces.h"
  14. #include "exec/memory.h"
  15. #include "system/kvm.h"
  16. #include "qemu/bitops.h"
  17. #include "qemu/error-report.h"
  18. #include "qemu/lockable.h"
  19. #include "qemu/queue.h"
  20. #include "qemu/rcu.h"
  21. #include "qemu/rcu_queue.h"
  22. #include "hw/hyperv/hyperv.h"
  23. #include "qom/object.h"
  24. #include "target/i386/kvm/hyperv-proto.h"
  25. #include "target/i386/cpu.h"
  26. #include "exec/cpu-all.h"
  27. struct SynICState {
  28. DeviceState parent_obj;
  29. CPUState *cs;
  30. bool sctl_enabled;
  31. hwaddr msg_page_addr;
  32. hwaddr event_page_addr;
  33. MemoryRegion msg_page_mr;
  34. MemoryRegion event_page_mr;
  35. struct hyperv_message_page *msg_page;
  36. struct hyperv_event_flags_page *event_page;
  37. QemuMutex sint_routes_mutex;
  38. QLIST_HEAD(, HvSintRoute) sint_routes;
  39. };
  40. #define TYPE_SYNIC "hyperv-synic"
  41. OBJECT_DECLARE_SIMPLE_TYPE(SynICState, SYNIC)
  42. static bool synic_enabled;
  43. bool hyperv_is_synic_enabled(void)
  44. {
  45. return synic_enabled;
  46. }
  47. static SynICState *get_synic(CPUState *cs)
  48. {
  49. return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
  50. }
  51. static void synic_update(SynICState *synic, bool sctl_enable,
  52. hwaddr msg_page_addr, hwaddr event_page_addr)
  53. {
  54. synic->sctl_enabled = sctl_enable;
  55. if (synic->msg_page_addr != msg_page_addr) {
  56. if (synic->msg_page_addr) {
  57. memory_region_del_subregion(get_system_memory(),
  58. &synic->msg_page_mr);
  59. }
  60. if (msg_page_addr) {
  61. memory_region_add_subregion(get_system_memory(), msg_page_addr,
  62. &synic->msg_page_mr);
  63. }
  64. synic->msg_page_addr = msg_page_addr;
  65. }
  66. if (synic->event_page_addr != event_page_addr) {
  67. if (synic->event_page_addr) {
  68. memory_region_del_subregion(get_system_memory(),
  69. &synic->event_page_mr);
  70. }
  71. if (event_page_addr) {
  72. memory_region_add_subregion(get_system_memory(), event_page_addr,
  73. &synic->event_page_mr);
  74. }
  75. synic->event_page_addr = event_page_addr;
  76. }
  77. }
  78. void hyperv_synic_update(CPUState *cs, bool sctl_enable,
  79. hwaddr msg_page_addr, hwaddr event_page_addr)
  80. {
  81. SynICState *synic = get_synic(cs);
  82. if (!synic) {
  83. return;
  84. }
  85. synic_update(synic, sctl_enable, msg_page_addr, event_page_addr);
  86. }
  87. static void synic_realize(DeviceState *dev, Error **errp)
  88. {
  89. Object *obj = OBJECT(dev);
  90. SynICState *synic = SYNIC(dev);
  91. char *msgp_name, *eventp_name;
  92. uint32_t vp_index;
  93. /* memory region names have to be globally unique */
  94. vp_index = hyperv_vp_index(synic->cs);
  95. msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index);
  96. eventp_name = g_strdup_printf("synic-%u-event-page", vp_index);
  97. memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name,
  98. sizeof(*synic->msg_page), &error_abort);
  99. memory_region_init_ram(&synic->event_page_mr, obj, eventp_name,
  100. sizeof(*synic->event_page), &error_abort);
  101. synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr);
  102. synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr);
  103. qemu_mutex_init(&synic->sint_routes_mutex);
  104. QLIST_INIT(&synic->sint_routes);
  105. g_free(msgp_name);
  106. g_free(eventp_name);
  107. }
  108. static void synic_reset(DeviceState *dev)
  109. {
  110. SynICState *synic = SYNIC(dev);
  111. memset(synic->msg_page, 0, sizeof(*synic->msg_page));
  112. memset(synic->event_page, 0, sizeof(*synic->event_page));
  113. synic_update(synic, false, 0, 0);
  114. assert(QLIST_EMPTY(&synic->sint_routes));
  115. }
  116. static void synic_class_init(ObjectClass *klass, void *data)
  117. {
  118. DeviceClass *dc = DEVICE_CLASS(klass);
  119. dc->realize = synic_realize;
  120. device_class_set_legacy_reset(dc, synic_reset);
  121. dc->user_creatable = false;
  122. }
  123. void hyperv_synic_add(CPUState *cs)
  124. {
  125. Object *obj;
  126. SynICState *synic;
  127. obj = object_new(TYPE_SYNIC);
  128. synic = SYNIC(obj);
  129. synic->cs = cs;
  130. object_property_add_child(OBJECT(cs), "synic", obj);
  131. object_unref(obj);
  132. qdev_realize(DEVICE(obj), NULL, &error_abort);
  133. synic_enabled = true;
  134. }
  135. void hyperv_synic_reset(CPUState *cs)
  136. {
  137. SynICState *synic = get_synic(cs);
  138. if (synic) {
  139. device_cold_reset(DEVICE(synic));
  140. }
  141. }
  142. static const TypeInfo synic_type_info = {
  143. .name = TYPE_SYNIC,
  144. .parent = TYPE_DEVICE,
  145. .instance_size = sizeof(SynICState),
  146. .class_init = synic_class_init,
  147. };
  148. static void synic_register_types(void)
  149. {
  150. type_register_static(&synic_type_info);
  151. }
  152. type_init(synic_register_types)
  153. /*
  154. * KVM has its own message producers (SynIC timers). To guarantee
  155. * serialization with both KVM vcpu and the guest cpu, the messages are first
  156. * staged in an intermediate area and then posted to the SynIC message page in
  157. * the vcpu thread.
  158. */
  159. typedef struct HvSintStagedMessage {
  160. /* message content staged by hyperv_post_msg */
  161. struct hyperv_message msg;
  162. /* callback + data (r/o) to complete the processing in a BH */
  163. HvSintMsgCb cb;
  164. void *cb_data;
  165. /* message posting status filled by cpu_post_msg */
  166. int status;
  167. /* passing the buck: */
  168. enum {
  169. /* initial state */
  170. HV_STAGED_MSG_FREE,
  171. /*
  172. * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
  173. * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
  174. */
  175. HV_STAGED_MSG_BUSY,
  176. /*
  177. * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
  178. * notify the guest, records the status, marks the posting done (BUSY
  179. * -> POSTED), and schedules sint_msg_bh BH
  180. */
  181. HV_STAGED_MSG_POSTED,
  182. /*
  183. * sint_msg_bh (BH) verifies that the posting is done, runs the
  184. * callback, and starts over (POSTED -> FREE)
  185. */
  186. } state;
  187. } HvSintStagedMessage;
  188. struct HvSintRoute {
  189. uint32_t sint;
  190. SynICState *synic;
  191. int gsi;
  192. EventNotifier sint_set_notifier;
  193. EventNotifier sint_ack_notifier;
  194. HvSintStagedMessage *staged_msg;
  195. unsigned refcount;
  196. QLIST_ENTRY(HvSintRoute) link;
  197. };
  198. static CPUState *hyperv_find_vcpu(uint32_t vp_index)
  199. {
  200. CPUState *cs = qemu_get_cpu(vp_index);
  201. assert(hyperv_vp_index(cs) == vp_index);
  202. return cs;
  203. }
  204. /*
  205. * BH to complete the processing of a staged message.
  206. */
  207. static void sint_msg_bh(void *opaque)
  208. {
  209. HvSintRoute *sint_route = opaque;
  210. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  211. if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
  212. /* status nor ready yet (spurious ack from guest?), ignore */
  213. return;
  214. }
  215. staged_msg->cb(staged_msg->cb_data, staged_msg->status);
  216. staged_msg->status = 0;
  217. /* staged message processing finished, ready to start over */
  218. qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
  219. /* drop the reference taken in hyperv_post_msg */
  220. hyperv_sint_route_unref(sint_route);
  221. }
  222. /*
  223. * Worker to transfer the message from the staging area into the SynIC message
  224. * page in vcpu context.
  225. */
  226. static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
  227. {
  228. HvSintRoute *sint_route = data.host_ptr;
  229. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  230. SynICState *synic = sint_route->synic;
  231. struct hyperv_message *dst_msg;
  232. bool wait_for_sint_ack = false;
  233. assert(staged_msg->state == HV_STAGED_MSG_BUSY);
  234. if (!synic->msg_page_addr) {
  235. staged_msg->status = -ENXIO;
  236. goto posted;
  237. }
  238. dst_msg = &synic->msg_page->slot[sint_route->sint];
  239. if (dst_msg->header.message_type != HV_MESSAGE_NONE) {
  240. dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING;
  241. staged_msg->status = -EAGAIN;
  242. wait_for_sint_ack = true;
  243. } else {
  244. memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg));
  245. staged_msg->status = hyperv_sint_route_set_sint(sint_route);
  246. }
  247. memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
  248. posted:
  249. qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
  250. /*
  251. * Notify the msg originator of the progress made; if the slot was busy we
  252. * set msg_pending flag in it so it will be the guest who will do EOM and
  253. * trigger the notification from KVM via sint_ack_notifier
  254. */
  255. if (!wait_for_sint_ack) {
  256. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh,
  257. sint_route);
  258. }
  259. }
  260. /*
  261. * Post a Hyper-V message to the staging area, for delivery to guest in the
  262. * vcpu thread.
  263. */
  264. int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
  265. {
  266. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  267. assert(staged_msg);
  268. /* grab the staging area */
  269. if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
  270. HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
  271. return -EAGAIN;
  272. }
  273. memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg));
  274. /* hold a reference on sint_route until the callback is finished */
  275. hyperv_sint_route_ref(sint_route);
  276. /* schedule message posting attempt in vcpu thread */
  277. async_run_on_cpu(sint_route->synic->cs, cpu_post_msg,
  278. RUN_ON_CPU_HOST_PTR(sint_route));
  279. return 0;
  280. }
  281. static void sint_ack_handler(EventNotifier *notifier)
  282. {
  283. HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
  284. sint_ack_notifier);
  285. event_notifier_test_and_clear(notifier);
  286. /*
  287. * the guest consumed the previous message so complete the current one with
  288. * -EAGAIN and let the msg originator retry
  289. */
  290. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route);
  291. }
  292. /*
  293. * Set given event flag for a given sint on a given vcpu, and signal the sint.
  294. */
  295. int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
  296. {
  297. int ret;
  298. SynICState *synic = sint_route->synic;
  299. unsigned long *flags, set_mask;
  300. unsigned set_idx;
  301. if (eventno > HV_EVENT_FLAGS_COUNT) {
  302. return -EINVAL;
  303. }
  304. if (!synic->sctl_enabled || !synic->event_page_addr) {
  305. return -ENXIO;
  306. }
  307. set_idx = BIT_WORD(eventno);
  308. set_mask = BIT_MASK(eventno);
  309. flags = synic->event_page->slot[sint_route->sint].flags;
  310. if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
  311. memory_region_set_dirty(&synic->event_page_mr, 0,
  312. sizeof(*synic->event_page));
  313. ret = hyperv_sint_route_set_sint(sint_route);
  314. } else {
  315. ret = 0;
  316. }
  317. return ret;
  318. }
  319. static int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
  320. {
  321. struct kvm_irq_routing_entry kroute = {};
  322. int virq;
  323. if (!kvm_gsi_routing_enabled()) {
  324. return -ENOSYS;
  325. }
  326. virq = kvm_irqchip_get_virq(s);
  327. if (virq < 0) {
  328. return virq;
  329. }
  330. kroute.gsi = virq;
  331. kroute.type = KVM_IRQ_ROUTING_HV_SINT;
  332. kroute.flags = 0;
  333. kroute.u.hv_sint.vcpu = vcpu;
  334. kroute.u.hv_sint.sint = sint;
  335. kvm_add_routing_entry(s, &kroute);
  336. kvm_irqchip_commit_routes(s);
  337. return virq;
  338. }
  339. HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint,
  340. HvSintMsgCb cb, void *cb_data)
  341. {
  342. HvSintRoute *sint_route = NULL;
  343. EventNotifier *ack_notifier = NULL;
  344. int r, gsi;
  345. CPUState *cs;
  346. SynICState *synic;
  347. bool ack_event_initialized = false;
  348. cs = hyperv_find_vcpu(vp_index);
  349. if (!cs) {
  350. return NULL;
  351. }
  352. synic = get_synic(cs);
  353. if (!synic) {
  354. return NULL;
  355. }
  356. sint_route = g_new0(HvSintRoute, 1);
  357. if (!sint_route) {
  358. return NULL;
  359. }
  360. sint_route->synic = synic;
  361. sint_route->sint = sint;
  362. sint_route->refcount = 1;
  363. ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL;
  364. if (ack_notifier) {
  365. sint_route->staged_msg = g_new0(HvSintStagedMessage, 1);
  366. if (!sint_route->staged_msg) {
  367. goto cleanup_err_sint;
  368. }
  369. sint_route->staged_msg->cb = cb;
  370. sint_route->staged_msg->cb_data = cb_data;
  371. r = event_notifier_init(ack_notifier, false);
  372. if (r) {
  373. goto cleanup_err_sint;
  374. }
  375. event_notifier_set_handler(ack_notifier, sint_ack_handler);
  376. ack_event_initialized = true;
  377. }
  378. /* See if we are done or we need to setup a GSI for this SintRoute */
  379. if (!synic->sctl_enabled) {
  380. goto cleanup;
  381. }
  382. /* We need to setup a GSI for this SintRoute */
  383. r = event_notifier_init(&sint_route->sint_set_notifier, false);
  384. if (r) {
  385. goto cleanup_err_sint;
  386. }
  387. gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint);
  388. if (gsi < 0) {
  389. goto cleanup_err_sint_notifier;
  390. }
  391. r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
  392. &sint_route->sint_set_notifier,
  393. ack_notifier, gsi);
  394. if (r) {
  395. goto cleanup_err_irqfd;
  396. }
  397. sint_route->gsi = gsi;
  398. cleanup:
  399. qemu_mutex_lock(&synic->sint_routes_mutex);
  400. QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link);
  401. qemu_mutex_unlock(&synic->sint_routes_mutex);
  402. return sint_route;
  403. cleanup_err_irqfd:
  404. kvm_irqchip_release_virq(kvm_state, gsi);
  405. cleanup_err_sint_notifier:
  406. event_notifier_cleanup(&sint_route->sint_set_notifier);
  407. cleanup_err_sint:
  408. if (ack_notifier) {
  409. if (ack_event_initialized) {
  410. event_notifier_set_handler(ack_notifier, NULL);
  411. event_notifier_cleanup(ack_notifier);
  412. }
  413. g_free(sint_route->staged_msg);
  414. }
  415. g_free(sint_route);
  416. return NULL;
  417. }
  418. void hyperv_sint_route_ref(HvSintRoute *sint_route)
  419. {
  420. sint_route->refcount++;
  421. }
  422. void hyperv_sint_route_unref(HvSintRoute *sint_route)
  423. {
  424. SynICState *synic;
  425. if (!sint_route) {
  426. return;
  427. }
  428. assert(sint_route->refcount > 0);
  429. if (--sint_route->refcount) {
  430. return;
  431. }
  432. synic = sint_route->synic;
  433. qemu_mutex_lock(&synic->sint_routes_mutex);
  434. QLIST_REMOVE(sint_route, link);
  435. qemu_mutex_unlock(&synic->sint_routes_mutex);
  436. if (sint_route->gsi) {
  437. kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
  438. &sint_route->sint_set_notifier,
  439. sint_route->gsi);
  440. kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
  441. event_notifier_cleanup(&sint_route->sint_set_notifier);
  442. }
  443. if (sint_route->staged_msg) {
  444. event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
  445. event_notifier_cleanup(&sint_route->sint_ack_notifier);
  446. g_free(sint_route->staged_msg);
  447. }
  448. g_free(sint_route);
  449. }
  450. int hyperv_sint_route_set_sint(HvSintRoute *sint_route)
  451. {
  452. if (!sint_route->gsi) {
  453. return 0;
  454. }
  455. return event_notifier_set(&sint_route->sint_set_notifier);
  456. }
  457. typedef struct MsgHandler {
  458. struct rcu_head rcu;
  459. QLIST_ENTRY(MsgHandler) link;
  460. uint32_t conn_id;
  461. HvMsgHandler handler;
  462. void *data;
  463. } MsgHandler;
  464. typedef struct EventFlagHandler {
  465. struct rcu_head rcu;
  466. QLIST_ENTRY(EventFlagHandler) link;
  467. uint32_t conn_id;
  468. EventNotifier *notifier;
  469. } EventFlagHandler;
  470. static QLIST_HEAD(, MsgHandler) msg_handlers;
  471. static QLIST_HEAD(, EventFlagHandler) event_flag_handlers;
  472. static QemuMutex handlers_mutex;
  473. static void __attribute__((constructor)) hv_init(void)
  474. {
  475. QLIST_INIT(&msg_handlers);
  476. QLIST_INIT(&event_flag_handlers);
  477. qemu_mutex_init(&handlers_mutex);
  478. }
  479. int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
  480. {
  481. int ret;
  482. MsgHandler *mh;
  483. QEMU_LOCK_GUARD(&handlers_mutex);
  484. QLIST_FOREACH(mh, &msg_handlers, link) {
  485. if (mh->conn_id == conn_id) {
  486. if (handler) {
  487. ret = -EEXIST;
  488. } else {
  489. QLIST_REMOVE_RCU(mh, link);
  490. g_free_rcu(mh, rcu);
  491. ret = 0;
  492. }
  493. return ret;
  494. }
  495. }
  496. if (handler) {
  497. mh = g_new(MsgHandler, 1);
  498. mh->conn_id = conn_id;
  499. mh->handler = handler;
  500. mh->data = data;
  501. QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link);
  502. ret = 0;
  503. } else {
  504. ret = -ENOENT;
  505. }
  506. return ret;
  507. }
  508. uint16_t hyperv_hcall_post_message(uint64_t param, bool fast)
  509. {
  510. uint16_t ret;
  511. hwaddr len;
  512. struct hyperv_post_message_input *msg;
  513. MsgHandler *mh;
  514. if (fast) {
  515. return HV_STATUS_INVALID_HYPERCALL_CODE;
  516. }
  517. if (param & (__alignof__(*msg) - 1)) {
  518. return HV_STATUS_INVALID_ALIGNMENT;
  519. }
  520. len = sizeof(*msg);
  521. msg = cpu_physical_memory_map(param, &len, 0);
  522. if (len < sizeof(*msg)) {
  523. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  524. goto unmap;
  525. }
  526. if (msg->payload_size > sizeof(msg->payload)) {
  527. ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
  528. goto unmap;
  529. }
  530. ret = HV_STATUS_INVALID_CONNECTION_ID;
  531. WITH_RCU_READ_LOCK_GUARD() {
  532. QLIST_FOREACH_RCU(mh, &msg_handlers, link) {
  533. if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) {
  534. ret = mh->handler(msg, mh->data);
  535. break;
  536. }
  537. }
  538. }
  539. unmap:
  540. cpu_physical_memory_unmap(msg, len, 0, 0);
  541. return ret;
  542. }
  543. static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  544. {
  545. int ret;
  546. EventFlagHandler *handler;
  547. QEMU_LOCK_GUARD(&handlers_mutex);
  548. QLIST_FOREACH(handler, &event_flag_handlers, link) {
  549. if (handler->conn_id == conn_id) {
  550. if (notifier) {
  551. ret = -EEXIST;
  552. } else {
  553. QLIST_REMOVE_RCU(handler, link);
  554. g_free_rcu(handler, rcu);
  555. ret = 0;
  556. }
  557. return ret;
  558. }
  559. }
  560. if (notifier) {
  561. handler = g_new(EventFlagHandler, 1);
  562. handler->conn_id = conn_id;
  563. handler->notifier = notifier;
  564. QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link);
  565. ret = 0;
  566. } else {
  567. ret = -ENOENT;
  568. }
  569. return ret;
  570. }
  571. static bool process_event_flags_userspace;
  572. int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  573. {
  574. if (!process_event_flags_userspace &&
  575. !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) {
  576. process_event_flags_userspace = true;
  577. warn_report("Hyper-V event signaling is not supported by this kernel; "
  578. "using slower userspace hypercall processing");
  579. }
  580. if (!process_event_flags_userspace) {
  581. struct kvm_hyperv_eventfd hvevfd = {
  582. .conn_id = conn_id,
  583. .fd = notifier ? event_notifier_get_fd(notifier) : -1,
  584. .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN,
  585. };
  586. return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd);
  587. }
  588. return set_event_flag_handler(conn_id, notifier);
  589. }
  590. uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast)
  591. {
  592. EventFlagHandler *handler;
  593. if (unlikely(!fast)) {
  594. hwaddr addr = param;
  595. if (addr & (__alignof__(addr) - 1)) {
  596. return HV_STATUS_INVALID_ALIGNMENT;
  597. }
  598. param = ldq_phys(&address_space_memory, addr);
  599. }
  600. /*
  601. * Per spec, bits 32-47 contain the extra "flag number". However, we
  602. * have no use for it, and in all known usecases it is zero, so just
  603. * report lookup failure if it isn't.
  604. */
  605. if (param & 0xffff00000000ULL) {
  606. return HV_STATUS_INVALID_PORT_ID;
  607. }
  608. /* remaining bits are reserved-zero */
  609. if (param & ~HV_CONNECTION_ID_MASK) {
  610. return HV_STATUS_INVALID_HYPERCALL_INPUT;
  611. }
  612. RCU_READ_LOCK_GUARD();
  613. QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) {
  614. if (handler->conn_id == param) {
  615. event_notifier_set(handler->notifier);
  616. return 0;
  617. }
  618. }
  619. return HV_STATUS_INVALID_CONNECTION_ID;
  620. }
  621. static HvSynDbgHandler hv_syndbg_handler;
  622. static void *hv_syndbg_context;
  623. void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context)
  624. {
  625. assert(!hv_syndbg_handler);
  626. hv_syndbg_handler = handler;
  627. hv_syndbg_context = context;
  628. }
  629. uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa)
  630. {
  631. uint16_t ret;
  632. HvSynDbgMsg msg;
  633. struct hyperv_reset_debug_session_output *reset_dbg_session = NULL;
  634. hwaddr len;
  635. if (!hv_syndbg_handler) {
  636. ret = HV_STATUS_INVALID_HYPERCALL_CODE;
  637. goto cleanup;
  638. }
  639. len = sizeof(*reset_dbg_session);
  640. reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1);
  641. if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) {
  642. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  643. goto cleanup;
  644. }
  645. msg.type = HV_SYNDBG_MSG_CONNECTION_INFO;
  646. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  647. if (ret) {
  648. goto cleanup;
  649. }
  650. reset_dbg_session->host_ip = msg.u.connection_info.host_ip;
  651. reset_dbg_session->host_port = msg.u.connection_info.host_port;
  652. /* The following fields are only used as validation for KDVM */
  653. memset(&reset_dbg_session->host_mac, 0,
  654. sizeof(reset_dbg_session->host_mac));
  655. reset_dbg_session->target_ip = msg.u.connection_info.host_ip;
  656. reset_dbg_session->target_port = msg.u.connection_info.host_port;
  657. memset(&reset_dbg_session->target_mac, 0,
  658. sizeof(reset_dbg_session->target_mac));
  659. cleanup:
  660. if (reset_dbg_session) {
  661. cpu_physical_memory_unmap(reset_dbg_session,
  662. sizeof(*reset_dbg_session), 1, len);
  663. }
  664. return ret;
  665. }
  666. uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa,
  667. bool fast)
  668. {
  669. uint16_t ret;
  670. struct hyperv_retrieve_debug_data_input *debug_data_in = NULL;
  671. struct hyperv_retrieve_debug_data_output *debug_data_out = NULL;
  672. hwaddr in_len, out_len;
  673. HvSynDbgMsg msg;
  674. if (fast || !hv_syndbg_handler) {
  675. ret = HV_STATUS_INVALID_HYPERCALL_CODE;
  676. goto cleanup;
  677. }
  678. in_len = sizeof(*debug_data_in);
  679. debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
  680. if (!debug_data_in || in_len < sizeof(*debug_data_in)) {
  681. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  682. goto cleanup;
  683. }
  684. out_len = sizeof(*debug_data_out);
  685. debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
  686. if (!debug_data_out || out_len < sizeof(*debug_data_out)) {
  687. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  688. goto cleanup;
  689. }
  690. msg.type = HV_SYNDBG_MSG_RECV;
  691. msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out);
  692. msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out);
  693. msg.u.recv.options = debug_data_in->options;
  694. msg.u.recv.timeout = debug_data_in->timeout;
  695. msg.u.recv.is_raw = true;
  696. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  697. if (ret == HV_STATUS_NO_DATA) {
  698. debug_data_out->retrieved_count = 0;
  699. debug_data_out->remaining_count = debug_data_in->count;
  700. goto cleanup;
  701. } else if (ret != HV_STATUS_SUCCESS) {
  702. goto cleanup;
  703. }
  704. debug_data_out->retrieved_count = msg.u.recv.retrieved_count;
  705. debug_data_out->remaining_count =
  706. debug_data_in->count - msg.u.recv.retrieved_count;
  707. cleanup:
  708. if (debug_data_out) {
  709. cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1,
  710. out_len);
  711. }
  712. if (debug_data_in) {
  713. cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0,
  714. in_len);
  715. }
  716. return ret;
  717. }
  718. uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast)
  719. {
  720. uint16_t ret;
  721. struct hyperv_post_debug_data_input *post_data_in = NULL;
  722. struct hyperv_post_debug_data_output *post_data_out = NULL;
  723. hwaddr in_len, out_len;
  724. HvSynDbgMsg msg;
  725. if (fast || !hv_syndbg_handler) {
  726. ret = HV_STATUS_INVALID_HYPERCALL_CODE;
  727. goto cleanup;
  728. }
  729. in_len = sizeof(*post_data_in);
  730. post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
  731. if (!post_data_in || in_len < sizeof(*post_data_in)) {
  732. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  733. goto cleanup;
  734. }
  735. if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) {
  736. ret = HV_STATUS_INVALID_PARAMETER;
  737. goto cleanup;
  738. }
  739. out_len = sizeof(*post_data_out);
  740. post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
  741. if (!post_data_out || out_len < sizeof(*post_data_out)) {
  742. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  743. goto cleanup;
  744. }
  745. msg.type = HV_SYNDBG_MSG_SEND;
  746. msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in);
  747. msg.u.send.count = post_data_in->count;
  748. msg.u.send.is_raw = true;
  749. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  750. if (ret != HV_STATUS_SUCCESS) {
  751. goto cleanup;
  752. }
  753. post_data_out->pending_count = msg.u.send.pending_count;
  754. ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS :
  755. HV_STATUS_SUCCESS;
  756. cleanup:
  757. if (post_data_out) {
  758. cpu_physical_memory_unmap(post_data_out,
  759. sizeof(*post_data_out), 1, out_len);
  760. }
  761. if (post_data_in) {
  762. cpu_physical_memory_unmap(post_data_in,
  763. sizeof(*post_data_in), 0, in_len);
  764. }
  765. return ret;
  766. }
  767. uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count)
  768. {
  769. HvSynDbgMsg msg;
  770. if (!hv_syndbg_handler) {
  771. return HV_SYNDBG_STATUS_INVALID;
  772. }
  773. msg.type = HV_SYNDBG_MSG_SEND;
  774. msg.u.send.buf_gpa = ingpa;
  775. msg.u.send.count = count;
  776. msg.u.send.is_raw = false;
  777. if (hv_syndbg_handler(hv_syndbg_context, &msg)) {
  778. return HV_SYNDBG_STATUS_INVALID;
  779. }
  780. return HV_SYNDBG_STATUS_SEND_SUCCESS;
  781. }
  782. uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count)
  783. {
  784. uint16_t ret;
  785. HvSynDbgMsg msg;
  786. if (!hv_syndbg_handler) {
  787. return HV_SYNDBG_STATUS_INVALID;
  788. }
  789. msg.type = HV_SYNDBG_MSG_RECV;
  790. msg.u.recv.buf_gpa = ingpa;
  791. msg.u.recv.count = count;
  792. msg.u.recv.options = 0;
  793. msg.u.recv.timeout = 0;
  794. msg.u.recv.is_raw = false;
  795. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  796. if (ret != HV_STATUS_SUCCESS) {
  797. return 0;
  798. }
  799. return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS,
  800. msg.u.recv.retrieved_count);
  801. }
  802. void hyperv_syndbg_set_pending_page(uint64_t ingpa)
  803. {
  804. HvSynDbgMsg msg;
  805. if (!hv_syndbg_handler) {
  806. return;
  807. }
  808. msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE;
  809. msg.u.pending_page.buf_gpa = ingpa;
  810. hv_syndbg_handler(hv_syndbg_context, &msg);
  811. }
  812. uint64_t hyperv_syndbg_query_options(void)
  813. {
  814. HvSynDbgMsg msg;
  815. if (!hv_syndbg_handler) {
  816. return 0;
  817. }
  818. msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS;
  819. if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) {
  820. return 0;
  821. }
  822. return msg.u.query_options.options;
  823. }
  824. static bool vmbus_recommended_features_enabled;
  825. bool hyperv_are_vmbus_recommended_features_enabled(void)
  826. {
  827. return vmbus_recommended_features_enabled;
  828. }
  829. void hyperv_set_vmbus_recommended_features_enabled(void)
  830. {
  831. vmbus_recommended_features_enabled = true;
  832. }