ccw.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. /*
  2. * vfio based subchannel assignment support
  3. *
  4. * Copyright 2017 IBM Corp.
  5. * Copyright 2019 Red Hat, Inc.
  6. *
  7. * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
  8. * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  9. * Pierre Morel <pmorel@linux.vnet.ibm.com>
  10. * Cornelia Huck <cohuck@redhat.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2 or (at
  13. * your option) any later version. See the COPYING file in the top-level
  14. * directory.
  15. */
  16. #include "qemu/osdep.h"
  17. #include <linux/vfio.h>
  18. #include <linux/vfio_ccw.h>
  19. #include <sys/ioctl.h>
  20. #include "qapi/error.h"
  21. #include "hw/sysbus.h"
  22. #include "hw/vfio/vfio.h"
  23. #include "hw/vfio/vfio-common.h"
  24. #include "hw/s390x/s390-ccw.h"
  25. #include "hw/s390x/vfio-ccw.h"
  26. #include "hw/qdev-properties.h"
  27. #include "hw/s390x/ccw-device.h"
  28. #include "exec/address-spaces.h"
  29. #include "qemu/error-report.h"
  30. #include "qemu/main-loop.h"
  31. #include "qemu/module.h"
  32. struct VFIOCCWDevice {
  33. S390CCWDevice cdev;
  34. VFIODevice vdev;
  35. uint64_t io_region_size;
  36. uint64_t io_region_offset;
  37. struct ccw_io_region *io_region;
  38. uint64_t async_cmd_region_size;
  39. uint64_t async_cmd_region_offset;
  40. struct ccw_cmd_region *async_cmd_region;
  41. EventNotifier io_notifier;
  42. bool force_orb_pfch;
  43. bool warned_orb_pfch;
  44. };
  45. static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
  46. const char *msg)
  47. {
  48. warn_report_once_cond(&vcdev->warned_orb_pfch,
  49. "vfio-ccw (devno %x.%x.%04x): %s",
  50. sch->cssid, sch->ssid, sch->devno, msg);
  51. }
  52. static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
  53. {
  54. vdev->needs_reset = false;
  55. }
  56. /*
  57. * We don't need vfio_hot_reset_multi and vfio_eoi operations for
  58. * vfio_ccw device now.
  59. */
  60. struct VFIODeviceOps vfio_ccw_ops = {
  61. .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
  62. };
  63. static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
  64. {
  65. S390CCWDevice *cdev = sch->driver_data;
  66. VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
  67. struct ccw_io_region *region = vcdev->io_region;
  68. int ret;
  69. if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH)) {
  70. if (!(vcdev->force_orb_pfch)) {
  71. warn_once_pfch(vcdev, sch, "requires PFCH flag set");
  72. sch_gen_unit_exception(sch);
  73. css_inject_io_interrupt(sch);
  74. return IOINST_CC_EXPECTED;
  75. } else {
  76. sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
  77. warn_once_pfch(vcdev, sch, "PFCH flag forced");
  78. }
  79. }
  80. QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
  81. QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
  82. QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
  83. memset(region, 0, sizeof(*region));
  84. memcpy(region->orb_area, &sch->orb, sizeof(ORB));
  85. memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
  86. again:
  87. ret = pwrite(vcdev->vdev.fd, region,
  88. vcdev->io_region_size, vcdev->io_region_offset);
  89. if (ret != vcdev->io_region_size) {
  90. if (errno == EAGAIN) {
  91. goto again;
  92. }
  93. error_report("vfio-ccw: wirte I/O region failed with errno=%d", errno);
  94. ret = -errno;
  95. } else {
  96. ret = region->ret_code;
  97. }
  98. switch (ret) {
  99. case 0:
  100. return IOINST_CC_EXPECTED;
  101. case -EBUSY:
  102. return IOINST_CC_BUSY;
  103. case -ENODEV:
  104. case -EACCES:
  105. return IOINST_CC_NOT_OPERATIONAL;
  106. case -EFAULT:
  107. default:
  108. sch_gen_unit_exception(sch);
  109. css_inject_io_interrupt(sch);
  110. return IOINST_CC_EXPECTED;
  111. }
  112. }
  113. static int vfio_ccw_handle_clear(SubchDev *sch)
  114. {
  115. S390CCWDevice *cdev = sch->driver_data;
  116. VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
  117. struct ccw_cmd_region *region = vcdev->async_cmd_region;
  118. int ret;
  119. if (!vcdev->async_cmd_region) {
  120. /* Async command region not available, fall back to emulation */
  121. return -ENOSYS;
  122. }
  123. memset(region, 0, sizeof(*region));
  124. region->command = VFIO_CCW_ASYNC_CMD_CSCH;
  125. again:
  126. ret = pwrite(vcdev->vdev.fd, region,
  127. vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
  128. if (ret != vcdev->async_cmd_region_size) {
  129. if (errno == EAGAIN) {
  130. goto again;
  131. }
  132. error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
  133. ret = -errno;
  134. } else {
  135. ret = region->ret_code;
  136. }
  137. switch (ret) {
  138. case 0:
  139. case -ENODEV:
  140. case -EACCES:
  141. return 0;
  142. case -EFAULT:
  143. default:
  144. sch_gen_unit_exception(sch);
  145. css_inject_io_interrupt(sch);
  146. return 0;
  147. }
  148. }
  149. static int vfio_ccw_handle_halt(SubchDev *sch)
  150. {
  151. S390CCWDevice *cdev = sch->driver_data;
  152. VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
  153. struct ccw_cmd_region *region = vcdev->async_cmd_region;
  154. int ret;
  155. if (!vcdev->async_cmd_region) {
  156. /* Async command region not available, fall back to emulation */
  157. return -ENOSYS;
  158. }
  159. memset(region, 0, sizeof(*region));
  160. region->command = VFIO_CCW_ASYNC_CMD_HSCH;
  161. again:
  162. ret = pwrite(vcdev->vdev.fd, region,
  163. vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
  164. if (ret != vcdev->async_cmd_region_size) {
  165. if (errno == EAGAIN) {
  166. goto again;
  167. }
  168. error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
  169. ret = -errno;
  170. } else {
  171. ret = region->ret_code;
  172. }
  173. switch (ret) {
  174. case 0:
  175. case -EBUSY:
  176. case -ENODEV:
  177. case -EACCES:
  178. return 0;
  179. case -EFAULT:
  180. default:
  181. sch_gen_unit_exception(sch);
  182. css_inject_io_interrupt(sch);
  183. return 0;
  184. }
  185. }
  186. static void vfio_ccw_reset(DeviceState *dev)
  187. {
  188. CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
  189. S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
  190. VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
  191. ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
  192. }
  193. static void vfio_ccw_io_notifier_handler(void *opaque)
  194. {
  195. VFIOCCWDevice *vcdev = opaque;
  196. struct ccw_io_region *region = vcdev->io_region;
  197. S390CCWDevice *cdev = S390_CCW_DEVICE(vcdev);
  198. CcwDevice *ccw_dev = CCW_DEVICE(cdev);
  199. SubchDev *sch = ccw_dev->sch;
  200. SCHIB *schib = &sch->curr_status;
  201. SCSW s;
  202. IRB irb;
  203. int size;
  204. if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
  205. return;
  206. }
  207. size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
  208. vcdev->io_region_offset);
  209. if (size == -1) {
  210. switch (errno) {
  211. case ENODEV:
  212. /* Generate a deferred cc 3 condition. */
  213. schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
  214. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  215. schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
  216. goto read_err;
  217. case EFAULT:
  218. /* Memory problem, generate channel data check. */
  219. schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
  220. schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
  221. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  222. schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
  223. SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
  224. goto read_err;
  225. default:
  226. /* Error, generate channel program check. */
  227. schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
  228. schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
  229. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  230. schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
  231. SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
  232. goto read_err;
  233. }
  234. } else if (size != vcdev->io_region_size) {
  235. /* Information transfer error, generate channel-control check. */
  236. schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
  237. schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
  238. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  239. schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
  240. SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
  241. goto read_err;
  242. }
  243. memcpy(&irb, region->irb_area, sizeof(IRB));
  244. /* Update control block via irb. */
  245. s = schib->scsw;
  246. copy_scsw_to_guest(&s, &irb.scsw);
  247. schib->scsw = s;
  248. /* If a uint check is pending, copy sense data. */
  249. if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
  250. (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
  251. memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
  252. }
  253. read_err:
  254. css_inject_io_interrupt(sch);
  255. }
  256. static void vfio_ccw_register_io_notifier(VFIOCCWDevice *vcdev, Error **errp)
  257. {
  258. VFIODevice *vdev = &vcdev->vdev;
  259. struct vfio_irq_info *irq_info;
  260. size_t argsz;
  261. int fd;
  262. if (vdev->num_irqs < VFIO_CCW_IO_IRQ_INDEX + 1) {
  263. error_setg(errp, "vfio: unexpected number of io irqs %u",
  264. vdev->num_irqs);
  265. return;
  266. }
  267. argsz = sizeof(*irq_info);
  268. irq_info = g_malloc0(argsz);
  269. irq_info->index = VFIO_CCW_IO_IRQ_INDEX;
  270. irq_info->argsz = argsz;
  271. if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
  272. irq_info) < 0 || irq_info->count < 1) {
  273. error_setg_errno(errp, errno, "vfio: Error getting irq info");
  274. goto out_free_info;
  275. }
  276. if (event_notifier_init(&vcdev->io_notifier, 0)) {
  277. error_setg_errno(errp, errno,
  278. "vfio: Unable to init event notifier for IO");
  279. goto out_free_info;
  280. }
  281. fd = event_notifier_get_fd(&vcdev->io_notifier);
  282. qemu_set_fd_handler(fd, vfio_ccw_io_notifier_handler, NULL, vcdev);
  283. if (vfio_set_irq_signaling(vdev, VFIO_CCW_IO_IRQ_INDEX, 0,
  284. VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
  285. qemu_set_fd_handler(fd, NULL, NULL, vcdev);
  286. event_notifier_cleanup(&vcdev->io_notifier);
  287. }
  288. out_free_info:
  289. g_free(irq_info);
  290. }
  291. static void vfio_ccw_unregister_io_notifier(VFIOCCWDevice *vcdev)
  292. {
  293. Error *err = NULL;
  294. if (vfio_set_irq_signaling(&vcdev->vdev, VFIO_CCW_IO_IRQ_INDEX, 0,
  295. VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
  296. error_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
  297. }
  298. qemu_set_fd_handler(event_notifier_get_fd(&vcdev->io_notifier),
  299. NULL, NULL, vcdev);
  300. event_notifier_cleanup(&vcdev->io_notifier);
  301. }
  302. static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
  303. {
  304. VFIODevice *vdev = &vcdev->vdev;
  305. struct vfio_region_info *info;
  306. int ret;
  307. /* Sanity check device */
  308. if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
  309. error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
  310. return;
  311. }
  312. /*
  313. * We always expect at least the I/O region to be present. We also
  314. * may have a variable number of regions governed by capabilities.
  315. */
  316. if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
  317. error_setg(errp, "vfio: too few regions (%u), expected at least %u",
  318. vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
  319. return;
  320. }
  321. ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
  322. if (ret) {
  323. error_setg_errno(errp, -ret, "vfio: Error getting config info");
  324. return;
  325. }
  326. vcdev->io_region_size = info->size;
  327. if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
  328. error_setg(errp, "vfio: Unexpected size of the I/O region");
  329. g_free(info);
  330. return;
  331. }
  332. vcdev->io_region_offset = info->offset;
  333. vcdev->io_region = g_malloc0(info->size);
  334. /* check for the optional async command region */
  335. ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
  336. VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
  337. if (!ret) {
  338. vcdev->async_cmd_region_size = info->size;
  339. if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
  340. error_setg(errp, "vfio: Unexpected size of the async cmd region");
  341. g_free(vcdev->io_region);
  342. g_free(info);
  343. return;
  344. }
  345. vcdev->async_cmd_region_offset = info->offset;
  346. vcdev->async_cmd_region = g_malloc0(info->size);
  347. }
  348. g_free(info);
  349. }
  350. static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
  351. {
  352. g_free(vcdev->async_cmd_region);
  353. g_free(vcdev->io_region);
  354. }
  355. static void vfio_ccw_put_device(VFIOCCWDevice *vcdev)
  356. {
  357. g_free(vcdev->vdev.name);
  358. vfio_put_base_device(&vcdev->vdev);
  359. }
  360. static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev,
  361. Error **errp)
  362. {
  363. char *name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid,
  364. vcdev->cdev.hostid.ssid,
  365. vcdev->cdev.hostid.devid);
  366. VFIODevice *vbasedev;
  367. QLIST_FOREACH(vbasedev, &group->device_list, next) {
  368. if (strcmp(vbasedev->name, name) == 0) {
  369. error_setg(errp, "vfio: subchannel %s has already been attached",
  370. name);
  371. goto out_err;
  372. }
  373. }
  374. /*
  375. * All vfio-ccw devices are believed to operate in a way compatible with
  376. * memory ballooning, ie. pages pinned in the host are in the current
  377. * working set of the guest driver and therefore never overlap with pages
  378. * available to the guest balloon driver. This needs to be set before
  379. * vfio_get_device() for vfio common to handle the balloon inhibitor.
  380. */
  381. vcdev->vdev.balloon_allowed = true;
  382. if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) {
  383. goto out_err;
  384. }
  385. vcdev->vdev.ops = &vfio_ccw_ops;
  386. vcdev->vdev.type = VFIO_DEVICE_TYPE_CCW;
  387. vcdev->vdev.name = name;
  388. vcdev->vdev.dev = &vcdev->cdev.parent_obj.parent_obj;
  389. return;
  390. out_err:
  391. g_free(name);
  392. }
  393. static VFIOGroup *vfio_ccw_get_group(S390CCWDevice *cdev, Error **errp)
  394. {
  395. char *tmp, group_path[PATH_MAX];
  396. ssize_t len;
  397. int groupid;
  398. tmp = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/%s/iommu_group",
  399. cdev->hostid.cssid, cdev->hostid.ssid,
  400. cdev->hostid.devid, cdev->mdevid);
  401. len = readlink(tmp, group_path, sizeof(group_path));
  402. g_free(tmp);
  403. if (len <= 0 || len >= sizeof(group_path)) {
  404. error_setg(errp, "vfio: no iommu_group found");
  405. return NULL;
  406. }
  407. group_path[len] = 0;
  408. if (sscanf(basename(group_path), "%d", &groupid) != 1) {
  409. error_setg(errp, "vfio: failed to read %s", group_path);
  410. return NULL;
  411. }
  412. return vfio_get_group(groupid, &address_space_memory, errp);
  413. }
  414. static void vfio_ccw_realize(DeviceState *dev, Error **errp)
  415. {
  416. VFIOGroup *group;
  417. CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
  418. S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
  419. VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
  420. S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
  421. Error *err = NULL;
  422. /* Call the class init function for subchannel. */
  423. if (cdc->realize) {
  424. cdc->realize(cdev, vcdev->vdev.sysfsdev, &err);
  425. if (err) {
  426. goto out_err_propagate;
  427. }
  428. }
  429. group = vfio_ccw_get_group(cdev, &err);
  430. if (!group) {
  431. goto out_group_err;
  432. }
  433. vfio_ccw_get_device(group, vcdev, &err);
  434. if (err) {
  435. goto out_device_err;
  436. }
  437. vfio_ccw_get_region(vcdev, &err);
  438. if (err) {
  439. goto out_region_err;
  440. }
  441. vfio_ccw_register_io_notifier(vcdev, &err);
  442. if (err) {
  443. goto out_notifier_err;
  444. }
  445. return;
  446. out_notifier_err:
  447. vfio_ccw_put_region(vcdev);
  448. out_region_err:
  449. vfio_ccw_put_device(vcdev);
  450. out_device_err:
  451. vfio_put_group(group);
  452. out_group_err:
  453. if (cdc->unrealize) {
  454. cdc->unrealize(cdev, NULL);
  455. }
  456. out_err_propagate:
  457. error_propagate(errp, err);
  458. }
  459. static void vfio_ccw_unrealize(DeviceState *dev, Error **errp)
  460. {
  461. CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
  462. S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
  463. VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
  464. S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
  465. VFIOGroup *group = vcdev->vdev.group;
  466. vfio_ccw_unregister_io_notifier(vcdev);
  467. vfio_ccw_put_region(vcdev);
  468. vfio_ccw_put_device(vcdev);
  469. vfio_put_group(group);
  470. if (cdc->unrealize) {
  471. cdc->unrealize(cdev, errp);
  472. }
  473. }
  474. static Property vfio_ccw_properties[] = {
  475. DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
  476. DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
  477. DEFINE_PROP_END_OF_LIST(),
  478. };
  479. static const VMStateDescription vfio_ccw_vmstate = {
  480. .name = "vfio-ccw",
  481. .unmigratable = 1,
  482. };
  483. static void vfio_ccw_class_init(ObjectClass *klass, void *data)
  484. {
  485. DeviceClass *dc = DEVICE_CLASS(klass);
  486. S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
  487. dc->props = vfio_ccw_properties;
  488. dc->vmsd = &vfio_ccw_vmstate;
  489. dc->desc = "VFIO-based subchannel assignment";
  490. set_bit(DEVICE_CATEGORY_MISC, dc->categories);
  491. dc->realize = vfio_ccw_realize;
  492. dc->unrealize = vfio_ccw_unrealize;
  493. dc->reset = vfio_ccw_reset;
  494. cdc->handle_request = vfio_ccw_handle_request;
  495. cdc->handle_halt = vfio_ccw_handle_halt;
  496. cdc->handle_clear = vfio_ccw_handle_clear;
  497. }
  498. static const TypeInfo vfio_ccw_info = {
  499. .name = TYPE_VFIO_CCW,
  500. .parent = TYPE_S390_CCW,
  501. .instance_size = sizeof(VFIOCCWDevice),
  502. .class_init = vfio_ccw_class_init,
  503. };
  504. static void register_vfio_ccw_type(void)
  505. {
  506. type_register_static(&vfio_ccw_info);
  507. }
  508. type_init(register_vfio_ccw_type)