2
0

hyperv.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /*
  2. * Hyper-V guest/hypervisor interaction
  3. *
  4. * Copyright (c) 2015-2018 Virtuozzo International GmbH.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/main-loop.h"
  11. #include "qemu/module.h"
  12. #include "qapi/error.h"
  13. #include "exec/address-spaces.h"
  14. #include "sysemu/kvm.h"
  15. #include "qemu/bitops.h"
  16. #include "qemu/error-report.h"
  17. #include "qemu/lockable.h"
  18. #include "qemu/queue.h"
  19. #include "qemu/rcu.h"
  20. #include "qemu/rcu_queue.h"
  21. #include "hw/hyperv/hyperv.h"
  22. typedef struct SynICState {
  23. DeviceState parent_obj;
  24. CPUState *cs;
  25. bool enabled;
  26. hwaddr msg_page_addr;
  27. hwaddr event_page_addr;
  28. MemoryRegion msg_page_mr;
  29. MemoryRegion event_page_mr;
  30. struct hyperv_message_page *msg_page;
  31. struct hyperv_event_flags_page *event_page;
  32. } SynICState;
  33. #define TYPE_SYNIC "hyperv-synic"
  34. #define SYNIC(obj) OBJECT_CHECK(SynICState, (obj), TYPE_SYNIC)
  35. static bool synic_enabled;
  36. bool hyperv_is_synic_enabled(void)
  37. {
  38. return synic_enabled;
  39. }
  40. static SynICState *get_synic(CPUState *cs)
  41. {
  42. return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
  43. }
  44. static void synic_update(SynICState *synic, bool enable,
  45. hwaddr msg_page_addr, hwaddr event_page_addr)
  46. {
  47. synic->enabled = enable;
  48. if (synic->msg_page_addr != msg_page_addr) {
  49. if (synic->msg_page_addr) {
  50. memory_region_del_subregion(get_system_memory(),
  51. &synic->msg_page_mr);
  52. }
  53. if (msg_page_addr) {
  54. memory_region_add_subregion(get_system_memory(), msg_page_addr,
  55. &synic->msg_page_mr);
  56. }
  57. synic->msg_page_addr = msg_page_addr;
  58. }
  59. if (synic->event_page_addr != event_page_addr) {
  60. if (synic->event_page_addr) {
  61. memory_region_del_subregion(get_system_memory(),
  62. &synic->event_page_mr);
  63. }
  64. if (event_page_addr) {
  65. memory_region_add_subregion(get_system_memory(), event_page_addr,
  66. &synic->event_page_mr);
  67. }
  68. synic->event_page_addr = event_page_addr;
  69. }
  70. }
  71. void hyperv_synic_update(CPUState *cs, bool enable,
  72. hwaddr msg_page_addr, hwaddr event_page_addr)
  73. {
  74. SynICState *synic = get_synic(cs);
  75. if (!synic) {
  76. return;
  77. }
  78. synic_update(synic, enable, msg_page_addr, event_page_addr);
  79. }
  80. static void synic_realize(DeviceState *dev, Error **errp)
  81. {
  82. Object *obj = OBJECT(dev);
  83. SynICState *synic = SYNIC(dev);
  84. char *msgp_name, *eventp_name;
  85. uint32_t vp_index;
  86. /* memory region names have to be globally unique */
  87. vp_index = hyperv_vp_index(synic->cs);
  88. msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index);
  89. eventp_name = g_strdup_printf("synic-%u-event-page", vp_index);
  90. memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name,
  91. sizeof(*synic->msg_page), &error_abort);
  92. memory_region_init_ram(&synic->event_page_mr, obj, eventp_name,
  93. sizeof(*synic->event_page), &error_abort);
  94. synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr);
  95. synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr);
  96. g_free(msgp_name);
  97. g_free(eventp_name);
  98. }
  99. static void synic_reset(DeviceState *dev)
  100. {
  101. SynICState *synic = SYNIC(dev);
  102. memset(synic->msg_page, 0, sizeof(*synic->msg_page));
  103. memset(synic->event_page, 0, sizeof(*synic->event_page));
  104. synic_update(synic, false, 0, 0);
  105. }
  106. static void synic_class_init(ObjectClass *klass, void *data)
  107. {
  108. DeviceClass *dc = DEVICE_CLASS(klass);
  109. dc->realize = synic_realize;
  110. dc->reset = synic_reset;
  111. dc->user_creatable = false;
  112. }
  113. void hyperv_synic_add(CPUState *cs)
  114. {
  115. Object *obj;
  116. SynICState *synic;
  117. obj = object_new(TYPE_SYNIC);
  118. synic = SYNIC(obj);
  119. synic->cs = cs;
  120. object_property_add_child(OBJECT(cs), "synic", obj);
  121. object_unref(obj);
  122. qdev_realize(DEVICE(obj), NULL, &error_abort);
  123. synic_enabled = true;
  124. }
  125. void hyperv_synic_reset(CPUState *cs)
  126. {
  127. SynICState *synic = get_synic(cs);
  128. if (synic) {
  129. device_legacy_reset(DEVICE(synic));
  130. }
  131. }
  132. static const TypeInfo synic_type_info = {
  133. .name = TYPE_SYNIC,
  134. .parent = TYPE_DEVICE,
  135. .instance_size = sizeof(SynICState),
  136. .class_init = synic_class_init,
  137. };
  138. static void synic_register_types(void)
  139. {
  140. type_register_static(&synic_type_info);
  141. }
  142. type_init(synic_register_types)
  143. /*
  144. * KVM has its own message producers (SynIC timers). To guarantee
  145. * serialization with both KVM vcpu and the guest cpu, the messages are first
  146. * staged in an intermediate area and then posted to the SynIC message page in
  147. * the vcpu thread.
  148. */
  149. typedef struct HvSintStagedMessage {
  150. /* message content staged by hyperv_post_msg */
  151. struct hyperv_message msg;
  152. /* callback + data (r/o) to complete the processing in a BH */
  153. HvSintMsgCb cb;
  154. void *cb_data;
  155. /* message posting status filled by cpu_post_msg */
  156. int status;
  157. /* passing the buck: */
  158. enum {
  159. /* initial state */
  160. HV_STAGED_MSG_FREE,
  161. /*
  162. * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
  163. * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
  164. */
  165. HV_STAGED_MSG_BUSY,
  166. /*
  167. * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
  168. * notify the guest, records the status, marks the posting done (BUSY
  169. * -> POSTED), and schedules sint_msg_bh BH
  170. */
  171. HV_STAGED_MSG_POSTED,
  172. /*
  173. * sint_msg_bh (BH) verifies that the posting is done, runs the
  174. * callback, and starts over (POSTED -> FREE)
  175. */
  176. } state;
  177. } HvSintStagedMessage;
  178. struct HvSintRoute {
  179. uint32_t sint;
  180. SynICState *synic;
  181. int gsi;
  182. EventNotifier sint_set_notifier;
  183. EventNotifier sint_ack_notifier;
  184. HvSintStagedMessage *staged_msg;
  185. unsigned refcount;
  186. };
  187. static CPUState *hyperv_find_vcpu(uint32_t vp_index)
  188. {
  189. CPUState *cs = qemu_get_cpu(vp_index);
  190. assert(hyperv_vp_index(cs) == vp_index);
  191. return cs;
  192. }
  193. /*
  194. * BH to complete the processing of a staged message.
  195. */
  196. static void sint_msg_bh(void *opaque)
  197. {
  198. HvSintRoute *sint_route = opaque;
  199. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  200. if (atomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
  201. /* status nor ready yet (spurious ack from guest?), ignore */
  202. return;
  203. }
  204. staged_msg->cb(staged_msg->cb_data, staged_msg->status);
  205. staged_msg->status = 0;
  206. /* staged message processing finished, ready to start over */
  207. atomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
  208. /* drop the reference taken in hyperv_post_msg */
  209. hyperv_sint_route_unref(sint_route);
  210. }
  211. /*
  212. * Worker to transfer the message from the staging area into the SynIC message
  213. * page in vcpu context.
  214. */
  215. static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
  216. {
  217. HvSintRoute *sint_route = data.host_ptr;
  218. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  219. SynICState *synic = sint_route->synic;
  220. struct hyperv_message *dst_msg;
  221. bool wait_for_sint_ack = false;
  222. assert(staged_msg->state == HV_STAGED_MSG_BUSY);
  223. if (!synic->enabled || !synic->msg_page_addr) {
  224. staged_msg->status = -ENXIO;
  225. goto posted;
  226. }
  227. dst_msg = &synic->msg_page->slot[sint_route->sint];
  228. if (dst_msg->header.message_type != HV_MESSAGE_NONE) {
  229. dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING;
  230. staged_msg->status = -EAGAIN;
  231. wait_for_sint_ack = true;
  232. } else {
  233. memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg));
  234. staged_msg->status = hyperv_sint_route_set_sint(sint_route);
  235. }
  236. memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
  237. posted:
  238. atomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
  239. /*
  240. * Notify the msg originator of the progress made; if the slot was busy we
  241. * set msg_pending flag in it so it will be the guest who will do EOM and
  242. * trigger the notification from KVM via sint_ack_notifier
  243. */
  244. if (!wait_for_sint_ack) {
  245. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh,
  246. sint_route);
  247. }
  248. }
  249. /*
  250. * Post a Hyper-V message to the staging area, for delivery to guest in the
  251. * vcpu thread.
  252. */
  253. int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
  254. {
  255. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  256. assert(staged_msg);
  257. /* grab the staging area */
  258. if (atomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
  259. HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
  260. return -EAGAIN;
  261. }
  262. memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg));
  263. /* hold a reference on sint_route until the callback is finished */
  264. hyperv_sint_route_ref(sint_route);
  265. /* schedule message posting attempt in vcpu thread */
  266. async_run_on_cpu(sint_route->synic->cs, cpu_post_msg,
  267. RUN_ON_CPU_HOST_PTR(sint_route));
  268. return 0;
  269. }
  270. static void sint_ack_handler(EventNotifier *notifier)
  271. {
  272. HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
  273. sint_ack_notifier);
  274. event_notifier_test_and_clear(notifier);
  275. /*
  276. * the guest consumed the previous message so complete the current one with
  277. * -EAGAIN and let the msg originator retry
  278. */
  279. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route);
  280. }
  281. /*
  282. * Set given event flag for a given sint on a given vcpu, and signal the sint.
  283. */
  284. int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
  285. {
  286. int ret;
  287. SynICState *synic = sint_route->synic;
  288. unsigned long *flags, set_mask;
  289. unsigned set_idx;
  290. if (eventno > HV_EVENT_FLAGS_COUNT) {
  291. return -EINVAL;
  292. }
  293. if (!synic->enabled || !synic->event_page_addr) {
  294. return -ENXIO;
  295. }
  296. set_idx = BIT_WORD(eventno);
  297. set_mask = BIT_MASK(eventno);
  298. flags = synic->event_page->slot[sint_route->sint].flags;
  299. if ((atomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
  300. memory_region_set_dirty(&synic->event_page_mr, 0,
  301. sizeof(*synic->event_page));
  302. ret = hyperv_sint_route_set_sint(sint_route);
  303. } else {
  304. ret = 0;
  305. }
  306. return ret;
  307. }
  308. HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint,
  309. HvSintMsgCb cb, void *cb_data)
  310. {
  311. HvSintRoute *sint_route;
  312. EventNotifier *ack_notifier;
  313. int r, gsi;
  314. CPUState *cs;
  315. SynICState *synic;
  316. cs = hyperv_find_vcpu(vp_index);
  317. if (!cs) {
  318. return NULL;
  319. }
  320. synic = get_synic(cs);
  321. if (!synic) {
  322. return NULL;
  323. }
  324. sint_route = g_new0(HvSintRoute, 1);
  325. r = event_notifier_init(&sint_route->sint_set_notifier, false);
  326. if (r) {
  327. goto err;
  328. }
  329. ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL;
  330. if (ack_notifier) {
  331. sint_route->staged_msg = g_new0(HvSintStagedMessage, 1);
  332. sint_route->staged_msg->cb = cb;
  333. sint_route->staged_msg->cb_data = cb_data;
  334. r = event_notifier_init(ack_notifier, false);
  335. if (r) {
  336. goto err_sint_set_notifier;
  337. }
  338. event_notifier_set_handler(ack_notifier, sint_ack_handler);
  339. }
  340. gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint);
  341. if (gsi < 0) {
  342. goto err_gsi;
  343. }
  344. r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
  345. &sint_route->sint_set_notifier,
  346. ack_notifier, gsi);
  347. if (r) {
  348. goto err_irqfd;
  349. }
  350. sint_route->gsi = gsi;
  351. sint_route->synic = synic;
  352. sint_route->sint = sint;
  353. sint_route->refcount = 1;
  354. return sint_route;
  355. err_irqfd:
  356. kvm_irqchip_release_virq(kvm_state, gsi);
  357. err_gsi:
  358. if (ack_notifier) {
  359. event_notifier_set_handler(ack_notifier, NULL);
  360. event_notifier_cleanup(ack_notifier);
  361. g_free(sint_route->staged_msg);
  362. }
  363. err_sint_set_notifier:
  364. event_notifier_cleanup(&sint_route->sint_set_notifier);
  365. err:
  366. g_free(sint_route);
  367. return NULL;
  368. }
  369. void hyperv_sint_route_ref(HvSintRoute *sint_route)
  370. {
  371. sint_route->refcount++;
  372. }
  373. void hyperv_sint_route_unref(HvSintRoute *sint_route)
  374. {
  375. if (!sint_route) {
  376. return;
  377. }
  378. assert(sint_route->refcount > 0);
  379. if (--sint_route->refcount) {
  380. return;
  381. }
  382. kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
  383. &sint_route->sint_set_notifier,
  384. sint_route->gsi);
  385. kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
  386. if (sint_route->staged_msg) {
  387. event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
  388. event_notifier_cleanup(&sint_route->sint_ack_notifier);
  389. g_free(sint_route->staged_msg);
  390. }
  391. event_notifier_cleanup(&sint_route->sint_set_notifier);
  392. g_free(sint_route);
  393. }
  394. int hyperv_sint_route_set_sint(HvSintRoute *sint_route)
  395. {
  396. return event_notifier_set(&sint_route->sint_set_notifier);
  397. }
  398. typedef struct MsgHandler {
  399. struct rcu_head rcu;
  400. QLIST_ENTRY(MsgHandler) link;
  401. uint32_t conn_id;
  402. HvMsgHandler handler;
  403. void *data;
  404. } MsgHandler;
  405. typedef struct EventFlagHandler {
  406. struct rcu_head rcu;
  407. QLIST_ENTRY(EventFlagHandler) link;
  408. uint32_t conn_id;
  409. EventNotifier *notifier;
  410. } EventFlagHandler;
  411. static QLIST_HEAD(, MsgHandler) msg_handlers;
  412. static QLIST_HEAD(, EventFlagHandler) event_flag_handlers;
  413. static QemuMutex handlers_mutex;
  414. static void __attribute__((constructor)) hv_init(void)
  415. {
  416. QLIST_INIT(&msg_handlers);
  417. QLIST_INIT(&event_flag_handlers);
  418. qemu_mutex_init(&handlers_mutex);
  419. }
  420. int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
  421. {
  422. int ret;
  423. MsgHandler *mh;
  424. QEMU_LOCK_GUARD(&handlers_mutex);
  425. QLIST_FOREACH(mh, &msg_handlers, link) {
  426. if (mh->conn_id == conn_id) {
  427. if (handler) {
  428. ret = -EEXIST;
  429. } else {
  430. QLIST_REMOVE_RCU(mh, link);
  431. g_free_rcu(mh, rcu);
  432. ret = 0;
  433. }
  434. return ret;
  435. }
  436. }
  437. if (handler) {
  438. mh = g_new(MsgHandler, 1);
  439. mh->conn_id = conn_id;
  440. mh->handler = handler;
  441. mh->data = data;
  442. QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link);
  443. ret = 0;
  444. } else {
  445. ret = -ENOENT;
  446. }
  447. return ret;
  448. }
  449. uint16_t hyperv_hcall_post_message(uint64_t param, bool fast)
  450. {
  451. uint16_t ret;
  452. hwaddr len;
  453. struct hyperv_post_message_input *msg;
  454. MsgHandler *mh;
  455. if (fast) {
  456. return HV_STATUS_INVALID_HYPERCALL_CODE;
  457. }
  458. if (param & (__alignof__(*msg) - 1)) {
  459. return HV_STATUS_INVALID_ALIGNMENT;
  460. }
  461. len = sizeof(*msg);
  462. msg = cpu_physical_memory_map(param, &len, 0);
  463. if (len < sizeof(*msg)) {
  464. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  465. goto unmap;
  466. }
  467. if (msg->payload_size > sizeof(msg->payload)) {
  468. ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
  469. goto unmap;
  470. }
  471. ret = HV_STATUS_INVALID_CONNECTION_ID;
  472. WITH_RCU_READ_LOCK_GUARD() {
  473. QLIST_FOREACH_RCU(mh, &msg_handlers, link) {
  474. if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) {
  475. ret = mh->handler(msg, mh->data);
  476. break;
  477. }
  478. }
  479. }
  480. unmap:
  481. cpu_physical_memory_unmap(msg, len, 0, 0);
  482. return ret;
  483. }
  484. static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  485. {
  486. int ret;
  487. EventFlagHandler *handler;
  488. QEMU_LOCK_GUARD(&handlers_mutex);
  489. QLIST_FOREACH(handler, &event_flag_handlers, link) {
  490. if (handler->conn_id == conn_id) {
  491. if (notifier) {
  492. ret = -EEXIST;
  493. } else {
  494. QLIST_REMOVE_RCU(handler, link);
  495. g_free_rcu(handler, rcu);
  496. ret = 0;
  497. }
  498. return ret;
  499. }
  500. }
  501. if (notifier) {
  502. handler = g_new(EventFlagHandler, 1);
  503. handler->conn_id = conn_id;
  504. handler->notifier = notifier;
  505. QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link);
  506. ret = 0;
  507. } else {
  508. ret = -ENOENT;
  509. }
  510. return ret;
  511. }
  512. static bool process_event_flags_userspace;
  513. int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  514. {
  515. if (!process_event_flags_userspace &&
  516. !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) {
  517. process_event_flags_userspace = true;
  518. warn_report("Hyper-V event signaling is not supported by this kernel; "
  519. "using slower userspace hypercall processing");
  520. }
  521. if (!process_event_flags_userspace) {
  522. struct kvm_hyperv_eventfd hvevfd = {
  523. .conn_id = conn_id,
  524. .fd = notifier ? event_notifier_get_fd(notifier) : -1,
  525. .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN,
  526. };
  527. return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd);
  528. }
  529. return set_event_flag_handler(conn_id, notifier);
  530. }
  531. uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast)
  532. {
  533. EventFlagHandler *handler;
  534. if (unlikely(!fast)) {
  535. hwaddr addr = param;
  536. if (addr & (__alignof__(addr) - 1)) {
  537. return HV_STATUS_INVALID_ALIGNMENT;
  538. }
  539. param = ldq_phys(&address_space_memory, addr);
  540. }
  541. /*
  542. * Per spec, bits 32-47 contain the extra "flag number". However, we
  543. * have no use for it, and in all known usecases it is zero, so just
  544. * report lookup failure if it isn't.
  545. */
  546. if (param & 0xffff00000000ULL) {
  547. return HV_STATUS_INVALID_PORT_ID;
  548. }
  549. /* remaining bits are reserved-zero */
  550. if (param & ~HV_CONNECTION_ID_MASK) {
  551. return HV_STATUS_INVALID_HYPERCALL_INPUT;
  552. }
  553. RCU_READ_LOCK_GUARD();
  554. QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) {
  555. if (handler->conn_id == param) {
  556. event_notifier_set(handler->notifier);
  557. return 0;
  558. }
  559. }
  560. return HV_STATUS_INVALID_CONNECTION_ID;
  561. }