hyperv.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949
  1. /*
  2. * Hyper-V guest/hypervisor interaction
  3. *
  4. * Copyright (c) 2015-2018 Virtuozzo International GmbH.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/main-loop.h"
  11. #include "qemu/module.h"
  12. #include "qapi/error.h"
  13. #include "exec/address-spaces.h"
  14. #include "sysemu/kvm.h"
  15. #include "qemu/bitops.h"
  16. #include "qemu/error-report.h"
  17. #include "qemu/lockable.h"
  18. #include "qemu/queue.h"
  19. #include "qemu/rcu.h"
  20. #include "qemu/rcu_queue.h"
  21. #include "hw/hyperv/hyperv.h"
  22. #include "qom/object.h"
  23. struct SynICState {
  24. DeviceState parent_obj;
  25. CPUState *cs;
  26. bool sctl_enabled;
  27. hwaddr msg_page_addr;
  28. hwaddr event_page_addr;
  29. MemoryRegion msg_page_mr;
  30. MemoryRegion event_page_mr;
  31. struct hyperv_message_page *msg_page;
  32. struct hyperv_event_flags_page *event_page;
  33. QemuMutex sint_routes_mutex;
  34. QLIST_HEAD(, HvSintRoute) sint_routes;
  35. };
  36. #define TYPE_SYNIC "hyperv-synic"
  37. OBJECT_DECLARE_SIMPLE_TYPE(SynICState, SYNIC)
  38. static bool synic_enabled;
  39. bool hyperv_is_synic_enabled(void)
  40. {
  41. return synic_enabled;
  42. }
  43. static SynICState *get_synic(CPUState *cs)
  44. {
  45. return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
  46. }
  47. static void synic_update(SynICState *synic, bool sctl_enable,
  48. hwaddr msg_page_addr, hwaddr event_page_addr)
  49. {
  50. synic->sctl_enabled = sctl_enable;
  51. if (synic->msg_page_addr != msg_page_addr) {
  52. if (synic->msg_page_addr) {
  53. memory_region_del_subregion(get_system_memory(),
  54. &synic->msg_page_mr);
  55. }
  56. if (msg_page_addr) {
  57. memory_region_add_subregion(get_system_memory(), msg_page_addr,
  58. &synic->msg_page_mr);
  59. }
  60. synic->msg_page_addr = msg_page_addr;
  61. }
  62. if (synic->event_page_addr != event_page_addr) {
  63. if (synic->event_page_addr) {
  64. memory_region_del_subregion(get_system_memory(),
  65. &synic->event_page_mr);
  66. }
  67. if (event_page_addr) {
  68. memory_region_add_subregion(get_system_memory(), event_page_addr,
  69. &synic->event_page_mr);
  70. }
  71. synic->event_page_addr = event_page_addr;
  72. }
  73. }
  74. void hyperv_synic_update(CPUState *cs, bool sctl_enable,
  75. hwaddr msg_page_addr, hwaddr event_page_addr)
  76. {
  77. SynICState *synic = get_synic(cs);
  78. if (!synic) {
  79. return;
  80. }
  81. synic_update(synic, sctl_enable, msg_page_addr, event_page_addr);
  82. }
  83. static void synic_realize(DeviceState *dev, Error **errp)
  84. {
  85. Object *obj = OBJECT(dev);
  86. SynICState *synic = SYNIC(dev);
  87. char *msgp_name, *eventp_name;
  88. uint32_t vp_index;
  89. /* memory region names have to be globally unique */
  90. vp_index = hyperv_vp_index(synic->cs);
  91. msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index);
  92. eventp_name = g_strdup_printf("synic-%u-event-page", vp_index);
  93. memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name,
  94. sizeof(*synic->msg_page), &error_abort);
  95. memory_region_init_ram(&synic->event_page_mr, obj, eventp_name,
  96. sizeof(*synic->event_page), &error_abort);
  97. synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr);
  98. synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr);
  99. qemu_mutex_init(&synic->sint_routes_mutex);
  100. QLIST_INIT(&synic->sint_routes);
  101. g_free(msgp_name);
  102. g_free(eventp_name);
  103. }
  104. static void synic_reset(DeviceState *dev)
  105. {
  106. SynICState *synic = SYNIC(dev);
  107. memset(synic->msg_page, 0, sizeof(*synic->msg_page));
  108. memset(synic->event_page, 0, sizeof(*synic->event_page));
  109. synic_update(synic, false, 0, 0);
  110. assert(QLIST_EMPTY(&synic->sint_routes));
  111. }
  112. static void synic_class_init(ObjectClass *klass, void *data)
  113. {
  114. DeviceClass *dc = DEVICE_CLASS(klass);
  115. dc->realize = synic_realize;
  116. dc->reset = synic_reset;
  117. dc->user_creatable = false;
  118. }
  119. void hyperv_synic_add(CPUState *cs)
  120. {
  121. Object *obj;
  122. SynICState *synic;
  123. obj = object_new(TYPE_SYNIC);
  124. synic = SYNIC(obj);
  125. synic->cs = cs;
  126. object_property_add_child(OBJECT(cs), "synic", obj);
  127. object_unref(obj);
  128. qdev_realize(DEVICE(obj), NULL, &error_abort);
  129. synic_enabled = true;
  130. }
  131. void hyperv_synic_reset(CPUState *cs)
  132. {
  133. SynICState *synic = get_synic(cs);
  134. if (synic) {
  135. device_cold_reset(DEVICE(synic));
  136. }
  137. }
  138. static const TypeInfo synic_type_info = {
  139. .name = TYPE_SYNIC,
  140. .parent = TYPE_DEVICE,
  141. .instance_size = sizeof(SynICState),
  142. .class_init = synic_class_init,
  143. };
  144. static void synic_register_types(void)
  145. {
  146. type_register_static(&synic_type_info);
  147. }
  148. type_init(synic_register_types)
  149. /*
  150. * KVM has its own message producers (SynIC timers). To guarantee
  151. * serialization with both KVM vcpu and the guest cpu, the messages are first
  152. * staged in an intermediate area and then posted to the SynIC message page in
  153. * the vcpu thread.
  154. */
  155. typedef struct HvSintStagedMessage {
  156. /* message content staged by hyperv_post_msg */
  157. struct hyperv_message msg;
  158. /* callback + data (r/o) to complete the processing in a BH */
  159. HvSintMsgCb cb;
  160. void *cb_data;
  161. /* message posting status filled by cpu_post_msg */
  162. int status;
  163. /* passing the buck: */
  164. enum {
  165. /* initial state */
  166. HV_STAGED_MSG_FREE,
  167. /*
  168. * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
  169. * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
  170. */
  171. HV_STAGED_MSG_BUSY,
  172. /*
  173. * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
  174. * notify the guest, records the status, marks the posting done (BUSY
  175. * -> POSTED), and schedules sint_msg_bh BH
  176. */
  177. HV_STAGED_MSG_POSTED,
  178. /*
  179. * sint_msg_bh (BH) verifies that the posting is done, runs the
  180. * callback, and starts over (POSTED -> FREE)
  181. */
  182. } state;
  183. } HvSintStagedMessage;
  184. struct HvSintRoute {
  185. uint32_t sint;
  186. SynICState *synic;
  187. int gsi;
  188. EventNotifier sint_set_notifier;
  189. EventNotifier sint_ack_notifier;
  190. HvSintStagedMessage *staged_msg;
  191. unsigned refcount;
  192. QLIST_ENTRY(HvSintRoute) link;
  193. };
  194. static CPUState *hyperv_find_vcpu(uint32_t vp_index)
  195. {
  196. CPUState *cs = qemu_get_cpu(vp_index);
  197. assert(hyperv_vp_index(cs) == vp_index);
  198. return cs;
  199. }
  200. /*
  201. * BH to complete the processing of a staged message.
  202. */
  203. static void sint_msg_bh(void *opaque)
  204. {
  205. HvSintRoute *sint_route = opaque;
  206. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  207. if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
  208. /* status nor ready yet (spurious ack from guest?), ignore */
  209. return;
  210. }
  211. staged_msg->cb(staged_msg->cb_data, staged_msg->status);
  212. staged_msg->status = 0;
  213. /* staged message processing finished, ready to start over */
  214. qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
  215. /* drop the reference taken in hyperv_post_msg */
  216. hyperv_sint_route_unref(sint_route);
  217. }
  218. /*
  219. * Worker to transfer the message from the staging area into the SynIC message
  220. * page in vcpu context.
  221. */
  222. static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
  223. {
  224. HvSintRoute *sint_route = data.host_ptr;
  225. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  226. SynICState *synic = sint_route->synic;
  227. struct hyperv_message *dst_msg;
  228. bool wait_for_sint_ack = false;
  229. assert(staged_msg->state == HV_STAGED_MSG_BUSY);
  230. if (!synic->msg_page_addr) {
  231. staged_msg->status = -ENXIO;
  232. goto posted;
  233. }
  234. dst_msg = &synic->msg_page->slot[sint_route->sint];
  235. if (dst_msg->header.message_type != HV_MESSAGE_NONE) {
  236. dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING;
  237. staged_msg->status = -EAGAIN;
  238. wait_for_sint_ack = true;
  239. } else {
  240. memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg));
  241. staged_msg->status = hyperv_sint_route_set_sint(sint_route);
  242. }
  243. memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
  244. posted:
  245. qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
  246. /*
  247. * Notify the msg originator of the progress made; if the slot was busy we
  248. * set msg_pending flag in it so it will be the guest who will do EOM and
  249. * trigger the notification from KVM via sint_ack_notifier
  250. */
  251. if (!wait_for_sint_ack) {
  252. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh,
  253. sint_route);
  254. }
  255. }
  256. /*
  257. * Post a Hyper-V message to the staging area, for delivery to guest in the
  258. * vcpu thread.
  259. */
  260. int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
  261. {
  262. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  263. assert(staged_msg);
  264. /* grab the staging area */
  265. if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
  266. HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
  267. return -EAGAIN;
  268. }
  269. memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg));
  270. /* hold a reference on sint_route until the callback is finished */
  271. hyperv_sint_route_ref(sint_route);
  272. /* schedule message posting attempt in vcpu thread */
  273. async_run_on_cpu(sint_route->synic->cs, cpu_post_msg,
  274. RUN_ON_CPU_HOST_PTR(sint_route));
  275. return 0;
  276. }
  277. static void sint_ack_handler(EventNotifier *notifier)
  278. {
  279. HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
  280. sint_ack_notifier);
  281. event_notifier_test_and_clear(notifier);
  282. /*
  283. * the guest consumed the previous message so complete the current one with
  284. * -EAGAIN and let the msg originator retry
  285. */
  286. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route);
  287. }
  288. /*
  289. * Set given event flag for a given sint on a given vcpu, and signal the sint.
  290. */
  291. int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
  292. {
  293. int ret;
  294. SynICState *synic = sint_route->synic;
  295. unsigned long *flags, set_mask;
  296. unsigned set_idx;
  297. if (eventno > HV_EVENT_FLAGS_COUNT) {
  298. return -EINVAL;
  299. }
  300. if (!synic->sctl_enabled || !synic->event_page_addr) {
  301. return -ENXIO;
  302. }
  303. set_idx = BIT_WORD(eventno);
  304. set_mask = BIT_MASK(eventno);
  305. flags = synic->event_page->slot[sint_route->sint].flags;
  306. if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
  307. memory_region_set_dirty(&synic->event_page_mr, 0,
  308. sizeof(*synic->event_page));
  309. ret = hyperv_sint_route_set_sint(sint_route);
  310. } else {
  311. ret = 0;
  312. }
  313. return ret;
  314. }
  315. HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint,
  316. HvSintMsgCb cb, void *cb_data)
  317. {
  318. HvSintRoute *sint_route = NULL;
  319. EventNotifier *ack_notifier = NULL;
  320. int r, gsi;
  321. CPUState *cs;
  322. SynICState *synic;
  323. bool ack_event_initialized = false;
  324. cs = hyperv_find_vcpu(vp_index);
  325. if (!cs) {
  326. return NULL;
  327. }
  328. synic = get_synic(cs);
  329. if (!synic) {
  330. return NULL;
  331. }
  332. sint_route = g_new0(HvSintRoute, 1);
  333. if (!sint_route) {
  334. return NULL;
  335. }
  336. sint_route->synic = synic;
  337. sint_route->sint = sint;
  338. sint_route->refcount = 1;
  339. ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL;
  340. if (ack_notifier) {
  341. sint_route->staged_msg = g_new0(HvSintStagedMessage, 1);
  342. if (!sint_route->staged_msg) {
  343. goto cleanup_err_sint;
  344. }
  345. sint_route->staged_msg->cb = cb;
  346. sint_route->staged_msg->cb_data = cb_data;
  347. r = event_notifier_init(ack_notifier, false);
  348. if (r) {
  349. goto cleanup_err_sint;
  350. }
  351. event_notifier_set_handler(ack_notifier, sint_ack_handler);
  352. ack_event_initialized = true;
  353. }
  354. /* See if we are done or we need to setup a GSI for this SintRoute */
  355. if (!synic->sctl_enabled) {
  356. goto cleanup;
  357. }
  358. /* We need to setup a GSI for this SintRoute */
  359. r = event_notifier_init(&sint_route->sint_set_notifier, false);
  360. if (r) {
  361. goto cleanup_err_sint;
  362. }
  363. gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint);
  364. if (gsi < 0) {
  365. goto cleanup_err_sint_notifier;
  366. }
  367. r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
  368. &sint_route->sint_set_notifier,
  369. ack_notifier, gsi);
  370. if (r) {
  371. goto cleanup_err_irqfd;
  372. }
  373. sint_route->gsi = gsi;
  374. cleanup:
  375. qemu_mutex_lock(&synic->sint_routes_mutex);
  376. QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link);
  377. qemu_mutex_unlock(&synic->sint_routes_mutex);
  378. return sint_route;
  379. cleanup_err_irqfd:
  380. kvm_irqchip_release_virq(kvm_state, gsi);
  381. cleanup_err_sint_notifier:
  382. event_notifier_cleanup(&sint_route->sint_set_notifier);
  383. cleanup_err_sint:
  384. if (ack_notifier) {
  385. if (ack_event_initialized) {
  386. event_notifier_set_handler(ack_notifier, NULL);
  387. event_notifier_cleanup(ack_notifier);
  388. }
  389. g_free(sint_route->staged_msg);
  390. }
  391. g_free(sint_route);
  392. return NULL;
  393. }
  394. void hyperv_sint_route_ref(HvSintRoute *sint_route)
  395. {
  396. sint_route->refcount++;
  397. }
  398. void hyperv_sint_route_unref(HvSintRoute *sint_route)
  399. {
  400. SynICState *synic;
  401. if (!sint_route) {
  402. return;
  403. }
  404. assert(sint_route->refcount > 0);
  405. if (--sint_route->refcount) {
  406. return;
  407. }
  408. synic = sint_route->synic;
  409. qemu_mutex_lock(&synic->sint_routes_mutex);
  410. QLIST_REMOVE(sint_route, link);
  411. qemu_mutex_unlock(&synic->sint_routes_mutex);
  412. if (sint_route->gsi) {
  413. kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
  414. &sint_route->sint_set_notifier,
  415. sint_route->gsi);
  416. kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
  417. event_notifier_cleanup(&sint_route->sint_set_notifier);
  418. }
  419. if (sint_route->staged_msg) {
  420. event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
  421. event_notifier_cleanup(&sint_route->sint_ack_notifier);
  422. g_free(sint_route->staged_msg);
  423. }
  424. g_free(sint_route);
  425. }
  426. int hyperv_sint_route_set_sint(HvSintRoute *sint_route)
  427. {
  428. if (!sint_route->gsi) {
  429. return 0;
  430. }
  431. return event_notifier_set(&sint_route->sint_set_notifier);
  432. }
  433. typedef struct MsgHandler {
  434. struct rcu_head rcu;
  435. QLIST_ENTRY(MsgHandler) link;
  436. uint32_t conn_id;
  437. HvMsgHandler handler;
  438. void *data;
  439. } MsgHandler;
  440. typedef struct EventFlagHandler {
  441. struct rcu_head rcu;
  442. QLIST_ENTRY(EventFlagHandler) link;
  443. uint32_t conn_id;
  444. EventNotifier *notifier;
  445. } EventFlagHandler;
  446. static QLIST_HEAD(, MsgHandler) msg_handlers;
  447. static QLIST_HEAD(, EventFlagHandler) event_flag_handlers;
  448. static QemuMutex handlers_mutex;
  449. static void __attribute__((constructor)) hv_init(void)
  450. {
  451. QLIST_INIT(&msg_handlers);
  452. QLIST_INIT(&event_flag_handlers);
  453. qemu_mutex_init(&handlers_mutex);
  454. }
  455. int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
  456. {
  457. int ret;
  458. MsgHandler *mh;
  459. QEMU_LOCK_GUARD(&handlers_mutex);
  460. QLIST_FOREACH(mh, &msg_handlers, link) {
  461. if (mh->conn_id == conn_id) {
  462. if (handler) {
  463. ret = -EEXIST;
  464. } else {
  465. QLIST_REMOVE_RCU(mh, link);
  466. g_free_rcu(mh, rcu);
  467. ret = 0;
  468. }
  469. return ret;
  470. }
  471. }
  472. if (handler) {
  473. mh = g_new(MsgHandler, 1);
  474. mh->conn_id = conn_id;
  475. mh->handler = handler;
  476. mh->data = data;
  477. QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link);
  478. ret = 0;
  479. } else {
  480. ret = -ENOENT;
  481. }
  482. return ret;
  483. }
  484. uint16_t hyperv_hcall_post_message(uint64_t param, bool fast)
  485. {
  486. uint16_t ret;
  487. hwaddr len;
  488. struct hyperv_post_message_input *msg;
  489. MsgHandler *mh;
  490. if (fast) {
  491. return HV_STATUS_INVALID_HYPERCALL_CODE;
  492. }
  493. if (param & (__alignof__(*msg) - 1)) {
  494. return HV_STATUS_INVALID_ALIGNMENT;
  495. }
  496. len = sizeof(*msg);
  497. msg = cpu_physical_memory_map(param, &len, 0);
  498. if (len < sizeof(*msg)) {
  499. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  500. goto unmap;
  501. }
  502. if (msg->payload_size > sizeof(msg->payload)) {
  503. ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
  504. goto unmap;
  505. }
  506. ret = HV_STATUS_INVALID_CONNECTION_ID;
  507. WITH_RCU_READ_LOCK_GUARD() {
  508. QLIST_FOREACH_RCU(mh, &msg_handlers, link) {
  509. if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) {
  510. ret = mh->handler(msg, mh->data);
  511. break;
  512. }
  513. }
  514. }
  515. unmap:
  516. cpu_physical_memory_unmap(msg, len, 0, 0);
  517. return ret;
  518. }
  519. static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  520. {
  521. int ret;
  522. EventFlagHandler *handler;
  523. QEMU_LOCK_GUARD(&handlers_mutex);
  524. QLIST_FOREACH(handler, &event_flag_handlers, link) {
  525. if (handler->conn_id == conn_id) {
  526. if (notifier) {
  527. ret = -EEXIST;
  528. } else {
  529. QLIST_REMOVE_RCU(handler, link);
  530. g_free_rcu(handler, rcu);
  531. ret = 0;
  532. }
  533. return ret;
  534. }
  535. }
  536. if (notifier) {
  537. handler = g_new(EventFlagHandler, 1);
  538. handler->conn_id = conn_id;
  539. handler->notifier = notifier;
  540. QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link);
  541. ret = 0;
  542. } else {
  543. ret = -ENOENT;
  544. }
  545. return ret;
  546. }
  547. static bool process_event_flags_userspace;
  548. int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  549. {
  550. if (!process_event_flags_userspace &&
  551. !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) {
  552. process_event_flags_userspace = true;
  553. warn_report("Hyper-V event signaling is not supported by this kernel; "
  554. "using slower userspace hypercall processing");
  555. }
  556. if (!process_event_flags_userspace) {
  557. struct kvm_hyperv_eventfd hvevfd = {
  558. .conn_id = conn_id,
  559. .fd = notifier ? event_notifier_get_fd(notifier) : -1,
  560. .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN,
  561. };
  562. return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd);
  563. }
  564. return set_event_flag_handler(conn_id, notifier);
  565. }
  566. uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast)
  567. {
  568. EventFlagHandler *handler;
  569. if (unlikely(!fast)) {
  570. hwaddr addr = param;
  571. if (addr & (__alignof__(addr) - 1)) {
  572. return HV_STATUS_INVALID_ALIGNMENT;
  573. }
  574. param = ldq_phys(&address_space_memory, addr);
  575. }
  576. /*
  577. * Per spec, bits 32-47 contain the extra "flag number". However, we
  578. * have no use for it, and in all known usecases it is zero, so just
  579. * report lookup failure if it isn't.
  580. */
  581. if (param & 0xffff00000000ULL) {
  582. return HV_STATUS_INVALID_PORT_ID;
  583. }
  584. /* remaining bits are reserved-zero */
  585. if (param & ~HV_CONNECTION_ID_MASK) {
  586. return HV_STATUS_INVALID_HYPERCALL_INPUT;
  587. }
  588. RCU_READ_LOCK_GUARD();
  589. QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) {
  590. if (handler->conn_id == param) {
  591. event_notifier_set(handler->notifier);
  592. return 0;
  593. }
  594. }
  595. return HV_STATUS_INVALID_CONNECTION_ID;
  596. }
  597. static HvSynDbgHandler hv_syndbg_handler;
  598. static void *hv_syndbg_context;
  599. void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context)
  600. {
  601. assert(!hv_syndbg_handler);
  602. hv_syndbg_handler = handler;
  603. hv_syndbg_context = context;
  604. }
  605. uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa)
  606. {
  607. uint16_t ret;
  608. HvSynDbgMsg msg;
  609. struct hyperv_reset_debug_session_output *reset_dbg_session = NULL;
  610. hwaddr len;
  611. if (!hv_syndbg_handler) {
  612. ret = HV_STATUS_INVALID_HYPERCALL_CODE;
  613. goto cleanup;
  614. }
  615. len = sizeof(*reset_dbg_session);
  616. reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1);
  617. if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) {
  618. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  619. goto cleanup;
  620. }
  621. msg.type = HV_SYNDBG_MSG_CONNECTION_INFO;
  622. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  623. if (ret) {
  624. goto cleanup;
  625. }
  626. reset_dbg_session->host_ip = msg.u.connection_info.host_ip;
  627. reset_dbg_session->host_port = msg.u.connection_info.host_port;
  628. /* The following fields are only used as validation for KDVM */
  629. memset(&reset_dbg_session->host_mac, 0,
  630. sizeof(reset_dbg_session->host_mac));
  631. reset_dbg_session->target_ip = msg.u.connection_info.host_ip;
  632. reset_dbg_session->target_port = msg.u.connection_info.host_port;
  633. memset(&reset_dbg_session->target_mac, 0,
  634. sizeof(reset_dbg_session->target_mac));
  635. cleanup:
  636. if (reset_dbg_session) {
  637. cpu_physical_memory_unmap(reset_dbg_session,
  638. sizeof(*reset_dbg_session), 1, len);
  639. }
  640. return ret;
  641. }
  642. uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa,
  643. bool fast)
  644. {
  645. uint16_t ret;
  646. struct hyperv_retrieve_debug_data_input *debug_data_in = NULL;
  647. struct hyperv_retrieve_debug_data_output *debug_data_out = NULL;
  648. hwaddr in_len, out_len;
  649. HvSynDbgMsg msg;
  650. if (fast || !hv_syndbg_handler) {
  651. ret = HV_STATUS_INVALID_HYPERCALL_CODE;
  652. goto cleanup;
  653. }
  654. in_len = sizeof(*debug_data_in);
  655. debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
  656. if (!debug_data_in || in_len < sizeof(*debug_data_in)) {
  657. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  658. goto cleanup;
  659. }
  660. out_len = sizeof(*debug_data_out);
  661. debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
  662. if (!debug_data_out || out_len < sizeof(*debug_data_out)) {
  663. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  664. goto cleanup;
  665. }
  666. msg.type = HV_SYNDBG_MSG_RECV;
  667. msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out);
  668. msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out);
  669. msg.u.recv.options = debug_data_in->options;
  670. msg.u.recv.timeout = debug_data_in->timeout;
  671. msg.u.recv.is_raw = true;
  672. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  673. if (ret == HV_STATUS_NO_DATA) {
  674. debug_data_out->retrieved_count = 0;
  675. debug_data_out->remaining_count = debug_data_in->count;
  676. goto cleanup;
  677. } else if (ret != HV_STATUS_SUCCESS) {
  678. goto cleanup;
  679. }
  680. debug_data_out->retrieved_count = msg.u.recv.retrieved_count;
  681. debug_data_out->remaining_count =
  682. debug_data_in->count - msg.u.recv.retrieved_count;
  683. cleanup:
  684. if (debug_data_out) {
  685. cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1,
  686. out_len);
  687. }
  688. if (debug_data_in) {
  689. cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0,
  690. in_len);
  691. }
  692. return ret;
  693. }
  694. uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast)
  695. {
  696. uint16_t ret;
  697. struct hyperv_post_debug_data_input *post_data_in = NULL;
  698. struct hyperv_post_debug_data_output *post_data_out = NULL;
  699. hwaddr in_len, out_len;
  700. HvSynDbgMsg msg;
  701. if (fast || !hv_syndbg_handler) {
  702. ret = HV_STATUS_INVALID_HYPERCALL_CODE;
  703. goto cleanup;
  704. }
  705. in_len = sizeof(*post_data_in);
  706. post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
  707. if (!post_data_in || in_len < sizeof(*post_data_in)) {
  708. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  709. goto cleanup;
  710. }
  711. if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) {
  712. ret = HV_STATUS_INVALID_PARAMETER;
  713. goto cleanup;
  714. }
  715. out_len = sizeof(*post_data_out);
  716. post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
  717. if (!post_data_out || out_len < sizeof(*post_data_out)) {
  718. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  719. goto cleanup;
  720. }
  721. msg.type = HV_SYNDBG_MSG_SEND;
  722. msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in);
  723. msg.u.send.count = post_data_in->count;
  724. msg.u.send.is_raw = true;
  725. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  726. if (ret != HV_STATUS_SUCCESS) {
  727. goto cleanup;
  728. }
  729. post_data_out->pending_count = msg.u.send.pending_count;
  730. ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS :
  731. HV_STATUS_SUCCESS;
  732. cleanup:
  733. if (post_data_out) {
  734. cpu_physical_memory_unmap(post_data_out,
  735. sizeof(*post_data_out), 1, out_len);
  736. }
  737. if (post_data_in) {
  738. cpu_physical_memory_unmap(post_data_in,
  739. sizeof(*post_data_in), 0, in_len);
  740. }
  741. return ret;
  742. }
  743. uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count)
  744. {
  745. HvSynDbgMsg msg;
  746. if (!hv_syndbg_handler) {
  747. return HV_SYNDBG_STATUS_INVALID;
  748. }
  749. msg.type = HV_SYNDBG_MSG_SEND;
  750. msg.u.send.buf_gpa = ingpa;
  751. msg.u.send.count = count;
  752. msg.u.send.is_raw = false;
  753. if (hv_syndbg_handler(hv_syndbg_context, &msg)) {
  754. return HV_SYNDBG_STATUS_INVALID;
  755. }
  756. return HV_SYNDBG_STATUS_SEND_SUCCESS;
  757. }
  758. uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count)
  759. {
  760. uint16_t ret;
  761. HvSynDbgMsg msg;
  762. if (!hv_syndbg_handler) {
  763. return HV_SYNDBG_STATUS_INVALID;
  764. }
  765. msg.type = HV_SYNDBG_MSG_RECV;
  766. msg.u.recv.buf_gpa = ingpa;
  767. msg.u.recv.count = count;
  768. msg.u.recv.options = 0;
  769. msg.u.recv.timeout = 0;
  770. msg.u.recv.is_raw = false;
  771. ret = hv_syndbg_handler(hv_syndbg_context, &msg);
  772. if (ret != HV_STATUS_SUCCESS) {
  773. return 0;
  774. }
  775. return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS,
  776. msg.u.recv.retrieved_count);
  777. }
  778. void hyperv_syndbg_set_pending_page(uint64_t ingpa)
  779. {
  780. HvSynDbgMsg msg;
  781. if (!hv_syndbg_handler) {
  782. return;
  783. }
  784. msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE;
  785. msg.u.pending_page.buf_gpa = ingpa;
  786. hv_syndbg_handler(hv_syndbg_context, &msg);
  787. }
  788. uint64_t hyperv_syndbg_query_options(void)
  789. {
  790. HvSynDbgMsg msg;
  791. if (!hv_syndbg_handler) {
  792. return 0;
  793. }
  794. msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS;
  795. if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) {
  796. return 0;
  797. }
  798. return msg.u.query_options.options;
  799. }