2
0

hyperv.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /*
  2. * Hyper-V guest/hypervisor interaction
  3. *
  4. * Copyright (c) 2015-2018 Virtuozzo International GmbH.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/main-loop.h"
  11. #include "qemu/module.h"
  12. #include "qapi/error.h"
  13. #include "exec/address-spaces.h"
  14. #include "sysemu/kvm.h"
  15. #include "qemu/bitops.h"
  16. #include "qemu/error-report.h"
  17. #include "qemu/queue.h"
  18. #include "qemu/rcu.h"
  19. #include "qemu/rcu_queue.h"
  20. #include "hw/hyperv/hyperv.h"
  21. typedef struct SynICState {
  22. DeviceState parent_obj;
  23. CPUState *cs;
  24. bool enabled;
  25. hwaddr msg_page_addr;
  26. hwaddr event_page_addr;
  27. MemoryRegion msg_page_mr;
  28. MemoryRegion event_page_mr;
  29. struct hyperv_message_page *msg_page;
  30. struct hyperv_event_flags_page *event_page;
  31. } SynICState;
  32. #define TYPE_SYNIC "hyperv-synic"
  33. #define SYNIC(obj) OBJECT_CHECK(SynICState, (obj), TYPE_SYNIC)
  34. static SynICState *get_synic(CPUState *cs)
  35. {
  36. return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
  37. }
  38. static void synic_update(SynICState *synic, bool enable,
  39. hwaddr msg_page_addr, hwaddr event_page_addr)
  40. {
  41. synic->enabled = enable;
  42. if (synic->msg_page_addr != msg_page_addr) {
  43. if (synic->msg_page_addr) {
  44. memory_region_del_subregion(get_system_memory(),
  45. &synic->msg_page_mr);
  46. }
  47. if (msg_page_addr) {
  48. memory_region_add_subregion(get_system_memory(), msg_page_addr,
  49. &synic->msg_page_mr);
  50. }
  51. synic->msg_page_addr = msg_page_addr;
  52. }
  53. if (synic->event_page_addr != event_page_addr) {
  54. if (synic->event_page_addr) {
  55. memory_region_del_subregion(get_system_memory(),
  56. &synic->event_page_mr);
  57. }
  58. if (event_page_addr) {
  59. memory_region_add_subregion(get_system_memory(), event_page_addr,
  60. &synic->event_page_mr);
  61. }
  62. synic->event_page_addr = event_page_addr;
  63. }
  64. }
  65. void hyperv_synic_update(CPUState *cs, bool enable,
  66. hwaddr msg_page_addr, hwaddr event_page_addr)
  67. {
  68. SynICState *synic = get_synic(cs);
  69. if (!synic) {
  70. return;
  71. }
  72. synic_update(synic, enable, msg_page_addr, event_page_addr);
  73. }
  74. static void synic_realize(DeviceState *dev, Error **errp)
  75. {
  76. Object *obj = OBJECT(dev);
  77. SynICState *synic = SYNIC(dev);
  78. char *msgp_name, *eventp_name;
  79. uint32_t vp_index;
  80. /* memory region names have to be globally unique */
  81. vp_index = hyperv_vp_index(synic->cs);
  82. msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index);
  83. eventp_name = g_strdup_printf("synic-%u-event-page", vp_index);
  84. memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name,
  85. sizeof(*synic->msg_page), &error_abort);
  86. memory_region_init_ram(&synic->event_page_mr, obj, eventp_name,
  87. sizeof(*synic->event_page), &error_abort);
  88. synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr);
  89. synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr);
  90. g_free(msgp_name);
  91. g_free(eventp_name);
  92. }
  93. static void synic_reset(DeviceState *dev)
  94. {
  95. SynICState *synic = SYNIC(dev);
  96. memset(synic->msg_page, 0, sizeof(*synic->msg_page));
  97. memset(synic->event_page, 0, sizeof(*synic->event_page));
  98. synic_update(synic, false, 0, 0);
  99. }
  100. static void synic_class_init(ObjectClass *klass, void *data)
  101. {
  102. DeviceClass *dc = DEVICE_CLASS(klass);
  103. dc->realize = synic_realize;
  104. dc->reset = synic_reset;
  105. dc->user_creatable = false;
  106. }
  107. void hyperv_synic_add(CPUState *cs)
  108. {
  109. Object *obj;
  110. SynICState *synic;
  111. obj = object_new(TYPE_SYNIC);
  112. synic = SYNIC(obj);
  113. synic->cs = cs;
  114. object_property_add_child(OBJECT(cs), "synic", obj, &error_abort);
  115. object_unref(obj);
  116. object_property_set_bool(obj, true, "realized", &error_abort);
  117. }
  118. void hyperv_synic_reset(CPUState *cs)
  119. {
  120. SynICState *synic = get_synic(cs);
  121. if (synic) {
  122. device_reset(DEVICE(synic));
  123. }
  124. }
  125. static const TypeInfo synic_type_info = {
  126. .name = TYPE_SYNIC,
  127. .parent = TYPE_DEVICE,
  128. .instance_size = sizeof(SynICState),
  129. .class_init = synic_class_init,
  130. };
  131. static void synic_register_types(void)
  132. {
  133. type_register_static(&synic_type_info);
  134. }
  135. type_init(synic_register_types)
  136. /*
  137. * KVM has its own message producers (SynIC timers). To guarantee
  138. * serialization with both KVM vcpu and the guest cpu, the messages are first
  139. * staged in an intermediate area and then posted to the SynIC message page in
  140. * the vcpu thread.
  141. */
  142. typedef struct HvSintStagedMessage {
  143. /* message content staged by hyperv_post_msg */
  144. struct hyperv_message msg;
  145. /* callback + data (r/o) to complete the processing in a BH */
  146. HvSintMsgCb cb;
  147. void *cb_data;
  148. /* message posting status filled by cpu_post_msg */
  149. int status;
  150. /* passing the buck: */
  151. enum {
  152. /* initial state */
  153. HV_STAGED_MSG_FREE,
  154. /*
  155. * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
  156. * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
  157. */
  158. HV_STAGED_MSG_BUSY,
  159. /*
  160. * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
  161. * notify the guest, records the status, marks the posting done (BUSY
  162. * -> POSTED), and schedules sint_msg_bh BH
  163. */
  164. HV_STAGED_MSG_POSTED,
  165. /*
  166. * sint_msg_bh (BH) verifies that the posting is done, runs the
  167. * callback, and starts over (POSTED -> FREE)
  168. */
  169. } state;
  170. } HvSintStagedMessage;
  171. struct HvSintRoute {
  172. uint32_t sint;
  173. SynICState *synic;
  174. int gsi;
  175. EventNotifier sint_set_notifier;
  176. EventNotifier sint_ack_notifier;
  177. HvSintStagedMessage *staged_msg;
  178. unsigned refcount;
  179. };
  180. static CPUState *hyperv_find_vcpu(uint32_t vp_index)
  181. {
  182. CPUState *cs = qemu_get_cpu(vp_index);
  183. assert(hyperv_vp_index(cs) == vp_index);
  184. return cs;
  185. }
  186. /*
  187. * BH to complete the processing of a staged message.
  188. */
  189. static void sint_msg_bh(void *opaque)
  190. {
  191. HvSintRoute *sint_route = opaque;
  192. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  193. if (atomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
  194. /* status nor ready yet (spurious ack from guest?), ignore */
  195. return;
  196. }
  197. staged_msg->cb(staged_msg->cb_data, staged_msg->status);
  198. staged_msg->status = 0;
  199. /* staged message processing finished, ready to start over */
  200. atomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
  201. /* drop the reference taken in hyperv_post_msg */
  202. hyperv_sint_route_unref(sint_route);
  203. }
  204. /*
  205. * Worker to transfer the message from the staging area into the SynIC message
  206. * page in vcpu context.
  207. */
  208. static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
  209. {
  210. HvSintRoute *sint_route = data.host_ptr;
  211. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  212. SynICState *synic = sint_route->synic;
  213. struct hyperv_message *dst_msg;
  214. bool wait_for_sint_ack = false;
  215. assert(staged_msg->state == HV_STAGED_MSG_BUSY);
  216. if (!synic->enabled || !synic->msg_page_addr) {
  217. staged_msg->status = -ENXIO;
  218. goto posted;
  219. }
  220. dst_msg = &synic->msg_page->slot[sint_route->sint];
  221. if (dst_msg->header.message_type != HV_MESSAGE_NONE) {
  222. dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING;
  223. staged_msg->status = -EAGAIN;
  224. wait_for_sint_ack = true;
  225. } else {
  226. memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg));
  227. staged_msg->status = hyperv_sint_route_set_sint(sint_route);
  228. }
  229. memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
  230. posted:
  231. atomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
  232. /*
  233. * Notify the msg originator of the progress made; if the slot was busy we
  234. * set msg_pending flag in it so it will be the guest who will do EOM and
  235. * trigger the notification from KVM via sint_ack_notifier
  236. */
  237. if (!wait_for_sint_ack) {
  238. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh,
  239. sint_route);
  240. }
  241. }
  242. /*
  243. * Post a Hyper-V message to the staging area, for delivery to guest in the
  244. * vcpu thread.
  245. */
  246. int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
  247. {
  248. HvSintStagedMessage *staged_msg = sint_route->staged_msg;
  249. assert(staged_msg);
  250. /* grab the staging area */
  251. if (atomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
  252. HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
  253. return -EAGAIN;
  254. }
  255. memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg));
  256. /* hold a reference on sint_route until the callback is finished */
  257. hyperv_sint_route_ref(sint_route);
  258. /* schedule message posting attempt in vcpu thread */
  259. async_run_on_cpu(sint_route->synic->cs, cpu_post_msg,
  260. RUN_ON_CPU_HOST_PTR(sint_route));
  261. return 0;
  262. }
  263. static void sint_ack_handler(EventNotifier *notifier)
  264. {
  265. HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
  266. sint_ack_notifier);
  267. event_notifier_test_and_clear(notifier);
  268. /*
  269. * the guest consumed the previous message so complete the current one with
  270. * -EAGAIN and let the msg originator retry
  271. */
  272. aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route);
  273. }
  274. /*
  275. * Set given event flag for a given sint on a given vcpu, and signal the sint.
  276. */
  277. int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
  278. {
  279. int ret;
  280. SynICState *synic = sint_route->synic;
  281. unsigned long *flags, set_mask;
  282. unsigned set_idx;
  283. if (eventno > HV_EVENT_FLAGS_COUNT) {
  284. return -EINVAL;
  285. }
  286. if (!synic->enabled || !synic->event_page_addr) {
  287. return -ENXIO;
  288. }
  289. set_idx = BIT_WORD(eventno);
  290. set_mask = BIT_MASK(eventno);
  291. flags = synic->event_page->slot[sint_route->sint].flags;
  292. if ((atomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
  293. memory_region_set_dirty(&synic->event_page_mr, 0,
  294. sizeof(*synic->event_page));
  295. ret = hyperv_sint_route_set_sint(sint_route);
  296. } else {
  297. ret = 0;
  298. }
  299. return ret;
  300. }
  301. HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint,
  302. HvSintMsgCb cb, void *cb_data)
  303. {
  304. HvSintRoute *sint_route;
  305. EventNotifier *ack_notifier;
  306. int r, gsi;
  307. CPUState *cs;
  308. SynICState *synic;
  309. cs = hyperv_find_vcpu(vp_index);
  310. if (!cs) {
  311. return NULL;
  312. }
  313. synic = get_synic(cs);
  314. if (!synic) {
  315. return NULL;
  316. }
  317. sint_route = g_new0(HvSintRoute, 1);
  318. r = event_notifier_init(&sint_route->sint_set_notifier, false);
  319. if (r) {
  320. goto err;
  321. }
  322. ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL;
  323. if (ack_notifier) {
  324. sint_route->staged_msg = g_new0(HvSintStagedMessage, 1);
  325. sint_route->staged_msg->cb = cb;
  326. sint_route->staged_msg->cb_data = cb_data;
  327. r = event_notifier_init(ack_notifier, false);
  328. if (r) {
  329. goto err_sint_set_notifier;
  330. }
  331. event_notifier_set_handler(ack_notifier, sint_ack_handler);
  332. }
  333. gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint);
  334. if (gsi < 0) {
  335. goto err_gsi;
  336. }
  337. r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
  338. &sint_route->sint_set_notifier,
  339. ack_notifier, gsi);
  340. if (r) {
  341. goto err_irqfd;
  342. }
  343. sint_route->gsi = gsi;
  344. sint_route->synic = synic;
  345. sint_route->sint = sint;
  346. sint_route->refcount = 1;
  347. return sint_route;
  348. err_irqfd:
  349. kvm_irqchip_release_virq(kvm_state, gsi);
  350. err_gsi:
  351. if (ack_notifier) {
  352. event_notifier_set_handler(ack_notifier, NULL);
  353. event_notifier_cleanup(ack_notifier);
  354. g_free(sint_route->staged_msg);
  355. }
  356. err_sint_set_notifier:
  357. event_notifier_cleanup(&sint_route->sint_set_notifier);
  358. err:
  359. g_free(sint_route);
  360. return NULL;
  361. }
  362. void hyperv_sint_route_ref(HvSintRoute *sint_route)
  363. {
  364. sint_route->refcount++;
  365. }
  366. void hyperv_sint_route_unref(HvSintRoute *sint_route)
  367. {
  368. if (!sint_route) {
  369. return;
  370. }
  371. assert(sint_route->refcount > 0);
  372. if (--sint_route->refcount) {
  373. return;
  374. }
  375. kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
  376. &sint_route->sint_set_notifier,
  377. sint_route->gsi);
  378. kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
  379. if (sint_route->staged_msg) {
  380. event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
  381. event_notifier_cleanup(&sint_route->sint_ack_notifier);
  382. g_free(sint_route->staged_msg);
  383. }
  384. event_notifier_cleanup(&sint_route->sint_set_notifier);
  385. g_free(sint_route);
  386. }
  387. int hyperv_sint_route_set_sint(HvSintRoute *sint_route)
  388. {
  389. return event_notifier_set(&sint_route->sint_set_notifier);
  390. }
  391. typedef struct MsgHandler {
  392. struct rcu_head rcu;
  393. QLIST_ENTRY(MsgHandler) link;
  394. uint32_t conn_id;
  395. HvMsgHandler handler;
  396. void *data;
  397. } MsgHandler;
  398. typedef struct EventFlagHandler {
  399. struct rcu_head rcu;
  400. QLIST_ENTRY(EventFlagHandler) link;
  401. uint32_t conn_id;
  402. EventNotifier *notifier;
  403. } EventFlagHandler;
  404. static QLIST_HEAD(, MsgHandler) msg_handlers;
  405. static QLIST_HEAD(, EventFlagHandler) event_flag_handlers;
  406. static QemuMutex handlers_mutex;
  407. static void __attribute__((constructor)) hv_init(void)
  408. {
  409. QLIST_INIT(&msg_handlers);
  410. QLIST_INIT(&event_flag_handlers);
  411. qemu_mutex_init(&handlers_mutex);
  412. }
  413. int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
  414. {
  415. int ret;
  416. MsgHandler *mh;
  417. qemu_mutex_lock(&handlers_mutex);
  418. QLIST_FOREACH(mh, &msg_handlers, link) {
  419. if (mh->conn_id == conn_id) {
  420. if (handler) {
  421. ret = -EEXIST;
  422. } else {
  423. QLIST_REMOVE_RCU(mh, link);
  424. g_free_rcu(mh, rcu);
  425. ret = 0;
  426. }
  427. goto unlock;
  428. }
  429. }
  430. if (handler) {
  431. mh = g_new(MsgHandler, 1);
  432. mh->conn_id = conn_id;
  433. mh->handler = handler;
  434. mh->data = data;
  435. QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link);
  436. ret = 0;
  437. } else {
  438. ret = -ENOENT;
  439. }
  440. unlock:
  441. qemu_mutex_unlock(&handlers_mutex);
  442. return ret;
  443. }
  444. uint16_t hyperv_hcall_post_message(uint64_t param, bool fast)
  445. {
  446. uint16_t ret;
  447. hwaddr len;
  448. struct hyperv_post_message_input *msg;
  449. MsgHandler *mh;
  450. if (fast) {
  451. return HV_STATUS_INVALID_HYPERCALL_CODE;
  452. }
  453. if (param & (__alignof__(*msg) - 1)) {
  454. return HV_STATUS_INVALID_ALIGNMENT;
  455. }
  456. len = sizeof(*msg);
  457. msg = cpu_physical_memory_map(param, &len, 0);
  458. if (len < sizeof(*msg)) {
  459. ret = HV_STATUS_INSUFFICIENT_MEMORY;
  460. goto unmap;
  461. }
  462. if (msg->payload_size > sizeof(msg->payload)) {
  463. ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
  464. goto unmap;
  465. }
  466. ret = HV_STATUS_INVALID_CONNECTION_ID;
  467. rcu_read_lock();
  468. QLIST_FOREACH_RCU(mh, &msg_handlers, link) {
  469. if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) {
  470. ret = mh->handler(msg, mh->data);
  471. break;
  472. }
  473. }
  474. rcu_read_unlock();
  475. unmap:
  476. cpu_physical_memory_unmap(msg, len, 0, 0);
  477. return ret;
  478. }
  479. static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  480. {
  481. int ret;
  482. EventFlagHandler *handler;
  483. qemu_mutex_lock(&handlers_mutex);
  484. QLIST_FOREACH(handler, &event_flag_handlers, link) {
  485. if (handler->conn_id == conn_id) {
  486. if (notifier) {
  487. ret = -EEXIST;
  488. } else {
  489. QLIST_REMOVE_RCU(handler, link);
  490. g_free_rcu(handler, rcu);
  491. ret = 0;
  492. }
  493. goto unlock;
  494. }
  495. }
  496. if (notifier) {
  497. handler = g_new(EventFlagHandler, 1);
  498. handler->conn_id = conn_id;
  499. handler->notifier = notifier;
  500. QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link);
  501. ret = 0;
  502. } else {
  503. ret = -ENOENT;
  504. }
  505. unlock:
  506. qemu_mutex_unlock(&handlers_mutex);
  507. return ret;
  508. }
  509. static bool process_event_flags_userspace;
  510. int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
  511. {
  512. if (!process_event_flags_userspace &&
  513. !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) {
  514. process_event_flags_userspace = true;
  515. warn_report("Hyper-V event signaling is not supported by this kernel; "
  516. "using slower userspace hypercall processing");
  517. }
  518. if (!process_event_flags_userspace) {
  519. struct kvm_hyperv_eventfd hvevfd = {
  520. .conn_id = conn_id,
  521. .fd = notifier ? event_notifier_get_fd(notifier) : -1,
  522. .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN,
  523. };
  524. return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd);
  525. }
  526. return set_event_flag_handler(conn_id, notifier);
  527. }
  528. uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast)
  529. {
  530. uint16_t ret;
  531. EventFlagHandler *handler;
  532. if (unlikely(!fast)) {
  533. hwaddr addr = param;
  534. if (addr & (__alignof__(addr) - 1)) {
  535. return HV_STATUS_INVALID_ALIGNMENT;
  536. }
  537. param = ldq_phys(&address_space_memory, addr);
  538. }
  539. /*
  540. * Per spec, bits 32-47 contain the extra "flag number". However, we
  541. * have no use for it, and in all known usecases it is zero, so just
  542. * report lookup failure if it isn't.
  543. */
  544. if (param & 0xffff00000000ULL) {
  545. return HV_STATUS_INVALID_PORT_ID;
  546. }
  547. /* remaining bits are reserved-zero */
  548. if (param & ~HV_CONNECTION_ID_MASK) {
  549. return HV_STATUS_INVALID_HYPERCALL_INPUT;
  550. }
  551. ret = HV_STATUS_INVALID_CONNECTION_ID;
  552. rcu_read_lock();
  553. QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) {
  554. if (handler->conn_id == param) {
  555. event_notifier_set(handler->notifier);
  556. ret = 0;
  557. break;
  558. }
  559. }
  560. rcu_read_unlock();
  561. return ret;
  562. }