2
0

virtio-pci.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073
  1. /*
  2. * Virtio PCI Bindings
  3. *
  4. * Copyright IBM, Corp. 2007
  5. * Copyright (c) 2009 CodeSourcery
  6. *
  7. * Authors:
  8. * Anthony Liguori <aliguori@us.ibm.com>
  9. * Paul Brook <paul@codesourcery.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2. See
  12. * the COPYING file in the top-level directory.
  13. *
  14. * Contributions after 2012-01-13 are licensed under the terms of the
  15. * GNU GPL, version 2 or (at your option) any later version.
  16. */
  17. #include "qemu/osdep.h"
  18. #include "exec/memop.h"
  19. #include "standard-headers/linux/virtio_pci.h"
  20. #include "hw/virtio/virtio.h"
  21. #include "migration/qemu-file-types.h"
  22. #include "hw/pci/pci.h"
  23. #include "hw/pci/pci_bus.h"
  24. #include "hw/qdev-properties.h"
  25. #include "qapi/error.h"
  26. #include "qemu/error-report.h"
  27. #include "qemu/module.h"
  28. #include "hw/pci/msi.h"
  29. #include "hw/pci/msix.h"
  30. #include "hw/loader.h"
  31. #include "sysemu/kvm.h"
  32. #include "virtio-pci.h"
  33. #include "qemu/range.h"
  34. #include "hw/virtio/virtio-bus.h"
  35. #include "qapi/visitor.h"
  36. #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
  37. #undef VIRTIO_PCI_CONFIG
  38. /* The remaining space is defined by each driver as the per-driver
  39. * configuration space */
  40. #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
  41. static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
  42. VirtIOPCIProxy *dev);
  43. static void virtio_pci_reset(DeviceState *qdev);
  44. /* virtio device */
  45. /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
  46. static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
  47. {
  48. return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
  49. }
  50. /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
  51. * be careful and test performance if you change this.
  52. */
  53. static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
  54. {
  55. return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
  56. }
  57. static void virtio_pci_notify(DeviceState *d, uint16_t vector)
  58. {
  59. VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
  60. if (msix_enabled(&proxy->pci_dev))
  61. msix_notify(&proxy->pci_dev, vector);
  62. else {
  63. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  64. pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
  65. }
  66. }
  67. static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
  68. {
  69. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  70. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  71. pci_device_save(&proxy->pci_dev, f);
  72. msix_save(&proxy->pci_dev, f);
  73. if (msix_present(&proxy->pci_dev))
  74. qemu_put_be16(f, vdev->config_vector);
  75. }
  76. static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
  77. .name = "virtio_pci/modern_queue_state",
  78. .version_id = 1,
  79. .minimum_version_id = 1,
  80. .fields = (VMStateField[]) {
  81. VMSTATE_UINT16(num, VirtIOPCIQueue),
  82. VMSTATE_UNUSED(1), /* enabled was stored as be16 */
  83. VMSTATE_BOOL(enabled, VirtIOPCIQueue),
  84. VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
  85. VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
  86. VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
  87. VMSTATE_END_OF_LIST()
  88. }
  89. };
  90. static bool virtio_pci_modern_state_needed(void *opaque)
  91. {
  92. VirtIOPCIProxy *proxy = opaque;
  93. return virtio_pci_modern(proxy);
  94. }
  95. static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
  96. .name = "virtio_pci/modern_state",
  97. .version_id = 1,
  98. .minimum_version_id = 1,
  99. .needed = &virtio_pci_modern_state_needed,
  100. .fields = (VMStateField[]) {
  101. VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
  102. VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
  103. VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
  104. VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
  105. vmstate_virtio_pci_modern_queue_state,
  106. VirtIOPCIQueue),
  107. VMSTATE_END_OF_LIST()
  108. }
  109. };
  110. static const VMStateDescription vmstate_virtio_pci = {
  111. .name = "virtio_pci",
  112. .version_id = 1,
  113. .minimum_version_id = 1,
  114. .minimum_version_id_old = 1,
  115. .fields = (VMStateField[]) {
  116. VMSTATE_END_OF_LIST()
  117. },
  118. .subsections = (const VMStateDescription*[]) {
  119. &vmstate_virtio_pci_modern_state_sub,
  120. NULL
  121. }
  122. };
  123. static bool virtio_pci_has_extra_state(DeviceState *d)
  124. {
  125. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  126. return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
  127. }
  128. static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
  129. {
  130. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  131. vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
  132. }
  133. static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
  134. {
  135. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  136. return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
  137. }
  138. static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
  139. {
  140. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  141. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  142. if (msix_present(&proxy->pci_dev))
  143. qemu_put_be16(f, virtio_queue_vector(vdev, n));
  144. }
  145. static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
  146. {
  147. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  148. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  149. int ret;
  150. ret = pci_device_load(&proxy->pci_dev, f);
  151. if (ret) {
  152. return ret;
  153. }
  154. msix_unuse_all_vectors(&proxy->pci_dev);
  155. msix_load(&proxy->pci_dev, f);
  156. if (msix_present(&proxy->pci_dev)) {
  157. qemu_get_be16s(f, &vdev->config_vector);
  158. } else {
  159. vdev->config_vector = VIRTIO_NO_VECTOR;
  160. }
  161. if (vdev->config_vector != VIRTIO_NO_VECTOR) {
  162. return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
  163. }
  164. return 0;
  165. }
  166. static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
  167. {
  168. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  169. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  170. uint16_t vector;
  171. if (msix_present(&proxy->pci_dev)) {
  172. qemu_get_be16s(f, &vector);
  173. } else {
  174. vector = VIRTIO_NO_VECTOR;
  175. }
  176. virtio_queue_set_vector(vdev, n, vector);
  177. if (vector != VIRTIO_NO_VECTOR) {
  178. return msix_vector_use(&proxy->pci_dev, vector);
  179. }
  180. return 0;
  181. }
  182. static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
  183. {
  184. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  185. return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
  186. }
  187. #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
  188. static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
  189. {
  190. return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
  191. QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
  192. }
  193. static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
  194. int n, bool assign)
  195. {
  196. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  197. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  198. VirtQueue *vq = virtio_get_queue(vdev, n);
  199. bool legacy = virtio_pci_legacy(proxy);
  200. bool modern = virtio_pci_modern(proxy);
  201. bool fast_mmio = kvm_ioeventfd_any_length_enabled();
  202. bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
  203. MemoryRegion *modern_mr = &proxy->notify.mr;
  204. MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
  205. MemoryRegion *legacy_mr = &proxy->bar;
  206. hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
  207. virtio_get_queue_index(vq);
  208. hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
  209. if (assign) {
  210. if (modern) {
  211. if (fast_mmio) {
  212. memory_region_add_eventfd(modern_mr, modern_addr, 0,
  213. false, n, notifier);
  214. } else {
  215. memory_region_add_eventfd(modern_mr, modern_addr, 2,
  216. false, n, notifier);
  217. }
  218. if (modern_pio) {
  219. memory_region_add_eventfd(modern_notify_mr, 0, 2,
  220. true, n, notifier);
  221. }
  222. }
  223. if (legacy) {
  224. memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
  225. true, n, notifier);
  226. }
  227. } else {
  228. if (modern) {
  229. if (fast_mmio) {
  230. memory_region_del_eventfd(modern_mr, modern_addr, 0,
  231. false, n, notifier);
  232. } else {
  233. memory_region_del_eventfd(modern_mr, modern_addr, 2,
  234. false, n, notifier);
  235. }
  236. if (modern_pio) {
  237. memory_region_del_eventfd(modern_notify_mr, 0, 2,
  238. true, n, notifier);
  239. }
  240. }
  241. if (legacy) {
  242. memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
  243. true, n, notifier);
  244. }
  245. }
  246. return 0;
  247. }
  248. static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
  249. {
  250. virtio_bus_start_ioeventfd(&proxy->bus);
  251. }
  252. static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
  253. {
  254. virtio_bus_stop_ioeventfd(&proxy->bus);
  255. }
  256. static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
  257. {
  258. VirtIOPCIProxy *proxy = opaque;
  259. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  260. hwaddr pa;
  261. switch (addr) {
  262. case VIRTIO_PCI_GUEST_FEATURES:
  263. /* Guest does not negotiate properly? We have to assume nothing. */
  264. if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
  265. val = virtio_bus_get_vdev_bad_features(&proxy->bus);
  266. }
  267. virtio_set_features(vdev, val);
  268. break;
  269. case VIRTIO_PCI_QUEUE_PFN:
  270. pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
  271. if (pa == 0) {
  272. virtio_pci_reset(DEVICE(proxy));
  273. }
  274. else
  275. virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
  276. break;
  277. case VIRTIO_PCI_QUEUE_SEL:
  278. if (val < VIRTIO_QUEUE_MAX)
  279. vdev->queue_sel = val;
  280. break;
  281. case VIRTIO_PCI_QUEUE_NOTIFY:
  282. if (val < VIRTIO_QUEUE_MAX) {
  283. virtio_queue_notify(vdev, val);
  284. }
  285. break;
  286. case VIRTIO_PCI_STATUS:
  287. if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
  288. virtio_pci_stop_ioeventfd(proxy);
  289. }
  290. virtio_set_status(vdev, val & 0xFF);
  291. if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
  292. virtio_pci_start_ioeventfd(proxy);
  293. }
  294. if (vdev->status == 0) {
  295. virtio_pci_reset(DEVICE(proxy));
  296. }
  297. /* Linux before 2.6.34 drives the device without enabling
  298. the PCI device bus master bit. Enable it automatically
  299. for the guest. This is a PCI spec violation but so is
  300. initiating DMA with bus master bit clear. */
  301. if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
  302. pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
  303. proxy->pci_dev.config[PCI_COMMAND] |
  304. PCI_COMMAND_MASTER, 1);
  305. }
  306. break;
  307. case VIRTIO_MSI_CONFIG_VECTOR:
  308. msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
  309. /* Make it possible for guest to discover an error took place. */
  310. if (msix_vector_use(&proxy->pci_dev, val) < 0)
  311. val = VIRTIO_NO_VECTOR;
  312. vdev->config_vector = val;
  313. break;
  314. case VIRTIO_MSI_QUEUE_VECTOR:
  315. msix_vector_unuse(&proxy->pci_dev,
  316. virtio_queue_vector(vdev, vdev->queue_sel));
  317. /* Make it possible for guest to discover an error took place. */
  318. if (msix_vector_use(&proxy->pci_dev, val) < 0)
  319. val = VIRTIO_NO_VECTOR;
  320. virtio_queue_set_vector(vdev, vdev->queue_sel, val);
  321. break;
  322. default:
  323. error_report("%s: unexpected address 0x%x value 0x%x",
  324. __func__, addr, val);
  325. break;
  326. }
  327. }
  328. static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
  329. {
  330. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  331. uint32_t ret = 0xFFFFFFFF;
  332. switch (addr) {
  333. case VIRTIO_PCI_HOST_FEATURES:
  334. ret = vdev->host_features;
  335. break;
  336. case VIRTIO_PCI_GUEST_FEATURES:
  337. ret = vdev->guest_features;
  338. break;
  339. case VIRTIO_PCI_QUEUE_PFN:
  340. ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
  341. >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
  342. break;
  343. case VIRTIO_PCI_QUEUE_NUM:
  344. ret = virtio_queue_get_num(vdev, vdev->queue_sel);
  345. break;
  346. case VIRTIO_PCI_QUEUE_SEL:
  347. ret = vdev->queue_sel;
  348. break;
  349. case VIRTIO_PCI_STATUS:
  350. ret = vdev->status;
  351. break;
  352. case VIRTIO_PCI_ISR:
  353. /* reading from the ISR also clears it. */
  354. ret = atomic_xchg(&vdev->isr, 0);
  355. pci_irq_deassert(&proxy->pci_dev);
  356. break;
  357. case VIRTIO_MSI_CONFIG_VECTOR:
  358. ret = vdev->config_vector;
  359. break;
  360. case VIRTIO_MSI_QUEUE_VECTOR:
  361. ret = virtio_queue_vector(vdev, vdev->queue_sel);
  362. break;
  363. default:
  364. break;
  365. }
  366. return ret;
  367. }
  368. static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
  369. unsigned size)
  370. {
  371. VirtIOPCIProxy *proxy = opaque;
  372. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  373. uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
  374. uint64_t val = 0;
  375. if (addr < config) {
  376. return virtio_ioport_read(proxy, addr);
  377. }
  378. addr -= config;
  379. switch (size) {
  380. case 1:
  381. val = virtio_config_readb(vdev, addr);
  382. break;
  383. case 2:
  384. val = virtio_config_readw(vdev, addr);
  385. if (virtio_is_big_endian(vdev)) {
  386. val = bswap16(val);
  387. }
  388. break;
  389. case 4:
  390. val = virtio_config_readl(vdev, addr);
  391. if (virtio_is_big_endian(vdev)) {
  392. val = bswap32(val);
  393. }
  394. break;
  395. }
  396. return val;
  397. }
  398. static void virtio_pci_config_write(void *opaque, hwaddr addr,
  399. uint64_t val, unsigned size)
  400. {
  401. VirtIOPCIProxy *proxy = opaque;
  402. uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
  403. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  404. if (addr < config) {
  405. virtio_ioport_write(proxy, addr, val);
  406. return;
  407. }
  408. addr -= config;
  409. /*
  410. * Virtio-PCI is odd. Ioports are LE but config space is target native
  411. * endian.
  412. */
  413. switch (size) {
  414. case 1:
  415. virtio_config_writeb(vdev, addr, val);
  416. break;
  417. case 2:
  418. if (virtio_is_big_endian(vdev)) {
  419. val = bswap16(val);
  420. }
  421. virtio_config_writew(vdev, addr, val);
  422. break;
  423. case 4:
  424. if (virtio_is_big_endian(vdev)) {
  425. val = bswap32(val);
  426. }
  427. virtio_config_writel(vdev, addr, val);
  428. break;
  429. }
  430. }
  431. static const MemoryRegionOps virtio_pci_config_ops = {
  432. .read = virtio_pci_config_read,
  433. .write = virtio_pci_config_write,
  434. .impl = {
  435. .min_access_size = 1,
  436. .max_access_size = 4,
  437. },
  438. .endianness = DEVICE_LITTLE_ENDIAN,
  439. };
  440. static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
  441. hwaddr *off, int len)
  442. {
  443. int i;
  444. VirtIOPCIRegion *reg;
  445. for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
  446. reg = &proxy->regs[i];
  447. if (*off >= reg->offset &&
  448. *off + len <= reg->offset + reg->size) {
  449. *off -= reg->offset;
  450. return &reg->mr;
  451. }
  452. }
  453. return NULL;
  454. }
  455. /* Below are generic functions to do memcpy from/to an address space,
  456. * without byteswaps, with input validation.
  457. *
  458. * As regular address_space_* APIs all do some kind of byteswap at least for
  459. * some host/target combinations, we are forced to explicitly convert to a
  460. * known-endianness integer value.
  461. * It doesn't really matter which endian format to go through, so the code
  462. * below selects the endian that causes the least amount of work on the given
  463. * host.
  464. *
  465. * Note: host pointer must be aligned.
  466. */
  467. static
  468. void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
  469. const uint8_t *buf, int len)
  470. {
  471. uint64_t val;
  472. MemoryRegion *mr;
  473. /* address_space_* APIs assume an aligned address.
  474. * As address is under guest control, handle illegal values.
  475. */
  476. addr &= ~(len - 1);
  477. mr = virtio_address_space_lookup(proxy, &addr, len);
  478. if (!mr) {
  479. return;
  480. }
  481. /* Make sure caller aligned buf properly */
  482. assert(!(((uintptr_t)buf) & (len - 1)));
  483. switch (len) {
  484. case 1:
  485. val = pci_get_byte(buf);
  486. break;
  487. case 2:
  488. val = pci_get_word(buf);
  489. break;
  490. case 4:
  491. val = pci_get_long(buf);
  492. break;
  493. default:
  494. /* As length is under guest control, handle illegal values. */
  495. return;
  496. }
  497. memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE,
  498. MEMTXATTRS_UNSPECIFIED);
  499. }
  500. static void
  501. virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
  502. uint8_t *buf, int len)
  503. {
  504. uint64_t val;
  505. MemoryRegion *mr;
  506. /* address_space_* APIs assume an aligned address.
  507. * As address is under guest control, handle illegal values.
  508. */
  509. addr &= ~(len - 1);
  510. mr = virtio_address_space_lookup(proxy, &addr, len);
  511. if (!mr) {
  512. return;
  513. }
  514. /* Make sure caller aligned buf properly */
  515. assert(!(((uintptr_t)buf) & (len - 1)));
  516. memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE,
  517. MEMTXATTRS_UNSPECIFIED);
  518. switch (len) {
  519. case 1:
  520. pci_set_byte(buf, val);
  521. break;
  522. case 2:
  523. pci_set_word(buf, val);
  524. break;
  525. case 4:
  526. pci_set_long(buf, val);
  527. break;
  528. default:
  529. /* As length is under guest control, handle illegal values. */
  530. break;
  531. }
  532. }
  533. static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
  534. uint32_t val, int len)
  535. {
  536. VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
  537. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  538. struct virtio_pci_cfg_cap *cfg;
  539. pci_default_write_config(pci_dev, address, val, len);
  540. if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
  541. pcie_cap_flr_write_config(pci_dev, address, val, len);
  542. }
  543. if (range_covers_byte(address, len, PCI_COMMAND) &&
  544. !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
  545. virtio_pci_stop_ioeventfd(proxy);
  546. virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
  547. }
  548. if (proxy->config_cap &&
  549. ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
  550. pci_cfg_data),
  551. sizeof cfg->pci_cfg_data)) {
  552. uint32_t off;
  553. uint32_t len;
  554. cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
  555. off = le32_to_cpu(cfg->cap.offset);
  556. len = le32_to_cpu(cfg->cap.length);
  557. if (len == 1 || len == 2 || len == 4) {
  558. assert(len <= sizeof cfg->pci_cfg_data);
  559. virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len);
  560. }
  561. }
  562. }
  563. static uint32_t virtio_read_config(PCIDevice *pci_dev,
  564. uint32_t address, int len)
  565. {
  566. VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
  567. struct virtio_pci_cfg_cap *cfg;
  568. if (proxy->config_cap &&
  569. ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
  570. pci_cfg_data),
  571. sizeof cfg->pci_cfg_data)) {
  572. uint32_t off;
  573. uint32_t len;
  574. cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
  575. off = le32_to_cpu(cfg->cap.offset);
  576. len = le32_to_cpu(cfg->cap.length);
  577. if (len == 1 || len == 2 || len == 4) {
  578. assert(len <= sizeof cfg->pci_cfg_data);
  579. virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len);
  580. }
  581. }
  582. return pci_default_read_config(pci_dev, address, len);
  583. }
  584. static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
  585. unsigned int queue_no,
  586. unsigned int vector)
  587. {
  588. VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
  589. int ret;
  590. if (irqfd->users == 0) {
  591. ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
  592. if (ret < 0) {
  593. return ret;
  594. }
  595. irqfd->virq = ret;
  596. }
  597. irqfd->users++;
  598. return 0;
  599. }
  600. static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
  601. unsigned int vector)
  602. {
  603. VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
  604. if (--irqfd->users == 0) {
  605. kvm_irqchip_release_virq(kvm_state, irqfd->virq);
  606. }
  607. }
  608. static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
  609. unsigned int queue_no,
  610. unsigned int vector)
  611. {
  612. VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
  613. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  614. VirtQueue *vq = virtio_get_queue(vdev, queue_no);
  615. EventNotifier *n = virtio_queue_get_guest_notifier(vq);
  616. return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
  617. }
  618. static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
  619. unsigned int queue_no,
  620. unsigned int vector)
  621. {
  622. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  623. VirtQueue *vq = virtio_get_queue(vdev, queue_no);
  624. EventNotifier *n = virtio_queue_get_guest_notifier(vq);
  625. VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
  626. int ret;
  627. ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
  628. assert(ret == 0);
  629. }
  630. static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
  631. {
  632. PCIDevice *dev = &proxy->pci_dev;
  633. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  634. VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
  635. unsigned int vector;
  636. int ret, queue_no;
  637. for (queue_no = 0; queue_no < nvqs; queue_no++) {
  638. if (!virtio_queue_get_num(vdev, queue_no)) {
  639. break;
  640. }
  641. vector = virtio_queue_vector(vdev, queue_no);
  642. if (vector >= msix_nr_vectors_allocated(dev)) {
  643. continue;
  644. }
  645. ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
  646. if (ret < 0) {
  647. goto undo;
  648. }
  649. /* If guest supports masking, set up irqfd now.
  650. * Otherwise, delay until unmasked in the frontend.
  651. */
  652. if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
  653. ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
  654. if (ret < 0) {
  655. kvm_virtio_pci_vq_vector_release(proxy, vector);
  656. goto undo;
  657. }
  658. }
  659. }
  660. return 0;
  661. undo:
  662. while (--queue_no >= 0) {
  663. vector = virtio_queue_vector(vdev, queue_no);
  664. if (vector >= msix_nr_vectors_allocated(dev)) {
  665. continue;
  666. }
  667. if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
  668. kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
  669. }
  670. kvm_virtio_pci_vq_vector_release(proxy, vector);
  671. }
  672. return ret;
  673. }
  674. static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
  675. {
  676. PCIDevice *dev = &proxy->pci_dev;
  677. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  678. unsigned int vector;
  679. int queue_no;
  680. VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
  681. for (queue_no = 0; queue_no < nvqs; queue_no++) {
  682. if (!virtio_queue_get_num(vdev, queue_no)) {
  683. break;
  684. }
  685. vector = virtio_queue_vector(vdev, queue_no);
  686. if (vector >= msix_nr_vectors_allocated(dev)) {
  687. continue;
  688. }
  689. /* If guest supports masking, clean up irqfd now.
  690. * Otherwise, it was cleaned when masked in the frontend.
  691. */
  692. if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
  693. kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
  694. }
  695. kvm_virtio_pci_vq_vector_release(proxy, vector);
  696. }
  697. }
  698. static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
  699. unsigned int queue_no,
  700. unsigned int vector,
  701. MSIMessage msg)
  702. {
  703. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  704. VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
  705. VirtQueue *vq = virtio_get_queue(vdev, queue_no);
  706. EventNotifier *n = virtio_queue_get_guest_notifier(vq);
  707. VirtIOIRQFD *irqfd;
  708. int ret = 0;
  709. if (proxy->vector_irqfd) {
  710. irqfd = &proxy->vector_irqfd[vector];
  711. if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
  712. ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
  713. &proxy->pci_dev);
  714. if (ret < 0) {
  715. return ret;
  716. }
  717. kvm_irqchip_commit_routes(kvm_state);
  718. }
  719. }
  720. /* If guest supports masking, irqfd is already setup, unmask it.
  721. * Otherwise, set it up now.
  722. */
  723. if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
  724. k->guest_notifier_mask(vdev, queue_no, false);
  725. /* Test after unmasking to avoid losing events. */
  726. if (k->guest_notifier_pending &&
  727. k->guest_notifier_pending(vdev, queue_no)) {
  728. event_notifier_set(n);
  729. }
  730. } else {
  731. ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
  732. }
  733. return ret;
  734. }
  735. static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
  736. unsigned int queue_no,
  737. unsigned int vector)
  738. {
  739. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  740. VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
  741. /* If guest supports masking, keep irqfd but mask it.
  742. * Otherwise, clean it up now.
  743. */
  744. if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
  745. k->guest_notifier_mask(vdev, queue_no, true);
  746. } else {
  747. kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
  748. }
  749. }
  750. static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
  751. MSIMessage msg)
  752. {
  753. VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
  754. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  755. VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
  756. int ret, index, unmasked = 0;
  757. while (vq) {
  758. index = virtio_get_queue_index(vq);
  759. if (!virtio_queue_get_num(vdev, index)) {
  760. break;
  761. }
  762. if (index < proxy->nvqs_with_notifiers) {
  763. ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
  764. if (ret < 0) {
  765. goto undo;
  766. }
  767. ++unmasked;
  768. }
  769. vq = virtio_vector_next_queue(vq);
  770. }
  771. return 0;
  772. undo:
  773. vq = virtio_vector_first_queue(vdev, vector);
  774. while (vq && unmasked >= 0) {
  775. index = virtio_get_queue_index(vq);
  776. if (index < proxy->nvqs_with_notifiers) {
  777. virtio_pci_vq_vector_mask(proxy, index, vector);
  778. --unmasked;
  779. }
  780. vq = virtio_vector_next_queue(vq);
  781. }
  782. return ret;
  783. }
  784. static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
  785. {
  786. VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
  787. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  788. VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
  789. int index;
  790. while (vq) {
  791. index = virtio_get_queue_index(vq);
  792. if (!virtio_queue_get_num(vdev, index)) {
  793. break;
  794. }
  795. if (index < proxy->nvqs_with_notifiers) {
  796. virtio_pci_vq_vector_mask(proxy, index, vector);
  797. }
  798. vq = virtio_vector_next_queue(vq);
  799. }
  800. }
  801. static void virtio_pci_vector_poll(PCIDevice *dev,
  802. unsigned int vector_start,
  803. unsigned int vector_end)
  804. {
  805. VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
  806. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  807. VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
  808. int queue_no;
  809. unsigned int vector;
  810. EventNotifier *notifier;
  811. VirtQueue *vq;
  812. for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
  813. if (!virtio_queue_get_num(vdev, queue_no)) {
  814. break;
  815. }
  816. vector = virtio_queue_vector(vdev, queue_no);
  817. if (vector < vector_start || vector >= vector_end ||
  818. !msix_is_masked(dev, vector)) {
  819. continue;
  820. }
  821. vq = virtio_get_queue(vdev, queue_no);
  822. notifier = virtio_queue_get_guest_notifier(vq);
  823. if (k->guest_notifier_pending) {
  824. if (k->guest_notifier_pending(vdev, queue_no)) {
  825. msix_set_pending(dev, vector);
  826. }
  827. } else if (event_notifier_test_and_clear(notifier)) {
  828. msix_set_pending(dev, vector);
  829. }
  830. }
  831. }
  832. static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
  833. bool with_irqfd)
  834. {
  835. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  836. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  837. VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
  838. VirtQueue *vq = virtio_get_queue(vdev, n);
  839. EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
  840. if (assign) {
  841. int r = event_notifier_init(notifier, 0);
  842. if (r < 0) {
  843. return r;
  844. }
  845. virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
  846. } else {
  847. virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
  848. event_notifier_cleanup(notifier);
  849. }
  850. if (!msix_enabled(&proxy->pci_dev) &&
  851. vdev->use_guest_notifier_mask &&
  852. vdc->guest_notifier_mask) {
  853. vdc->guest_notifier_mask(vdev, n, !assign);
  854. }
  855. return 0;
  856. }
  857. static bool virtio_pci_query_guest_notifiers(DeviceState *d)
  858. {
  859. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  860. return msix_enabled(&proxy->pci_dev);
  861. }
  862. static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
  863. {
  864. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  865. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  866. VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
  867. int r, n;
  868. bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
  869. kvm_msi_via_irqfd_enabled();
  870. nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
  871. /* When deassigning, pass a consistent nvqs value
  872. * to avoid leaking notifiers.
  873. */
  874. assert(assign || nvqs == proxy->nvqs_with_notifiers);
  875. proxy->nvqs_with_notifiers = nvqs;
  876. /* Must unset vector notifier while guest notifier is still assigned */
  877. if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
  878. msix_unset_vector_notifiers(&proxy->pci_dev);
  879. if (proxy->vector_irqfd) {
  880. kvm_virtio_pci_vector_release(proxy, nvqs);
  881. g_free(proxy->vector_irqfd);
  882. proxy->vector_irqfd = NULL;
  883. }
  884. }
  885. for (n = 0; n < nvqs; n++) {
  886. if (!virtio_queue_get_num(vdev, n)) {
  887. break;
  888. }
  889. r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
  890. if (r < 0) {
  891. goto assign_error;
  892. }
  893. }
  894. /* Must set vector notifier after guest notifier has been assigned */
  895. if ((with_irqfd || k->guest_notifier_mask) && assign) {
  896. if (with_irqfd) {
  897. proxy->vector_irqfd =
  898. g_malloc0(sizeof(*proxy->vector_irqfd) *
  899. msix_nr_vectors_allocated(&proxy->pci_dev));
  900. r = kvm_virtio_pci_vector_use(proxy, nvqs);
  901. if (r < 0) {
  902. goto assign_error;
  903. }
  904. }
  905. r = msix_set_vector_notifiers(&proxy->pci_dev,
  906. virtio_pci_vector_unmask,
  907. virtio_pci_vector_mask,
  908. virtio_pci_vector_poll);
  909. if (r < 0) {
  910. goto notifiers_error;
  911. }
  912. }
  913. return 0;
  914. notifiers_error:
  915. if (with_irqfd) {
  916. assert(assign);
  917. kvm_virtio_pci_vector_release(proxy, nvqs);
  918. }
  919. assign_error:
  920. /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
  921. assert(assign);
  922. while (--n >= 0) {
  923. virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
  924. }
  925. return r;
  926. }
  927. static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
  928. MemoryRegion *mr, bool assign)
  929. {
  930. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  931. int offset;
  932. if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
  933. virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
  934. return -1;
  935. }
  936. if (assign) {
  937. offset = virtio_pci_queue_mem_mult(proxy) * n;
  938. memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
  939. } else {
  940. memory_region_del_subregion(&proxy->notify.mr, mr);
  941. }
  942. return 0;
  943. }
  944. static void virtio_pci_vmstate_change(DeviceState *d, bool running)
  945. {
  946. VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
  947. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  948. if (running) {
  949. /* Old QEMU versions did not set bus master enable on status write.
  950. * Detect DRIVER set and enable it.
  951. */
  952. if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
  953. (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
  954. !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
  955. pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
  956. proxy->pci_dev.config[PCI_COMMAND] |
  957. PCI_COMMAND_MASTER, 1);
  958. }
  959. virtio_pci_start_ioeventfd(proxy);
  960. } else {
  961. virtio_pci_stop_ioeventfd(proxy);
  962. }
  963. }
  964. /*
  965. * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
  966. */
  967. static int virtio_pci_query_nvectors(DeviceState *d)
  968. {
  969. VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
  970. return proxy->nvectors;
  971. }
  972. static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
  973. {
  974. VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
  975. PCIDevice *dev = &proxy->pci_dev;
  976. return pci_get_address_space(dev);
  977. }
  978. static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
  979. struct virtio_pci_cap *cap)
  980. {
  981. PCIDevice *dev = &proxy->pci_dev;
  982. int offset;
  983. offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
  984. cap->cap_len, &error_abort);
  985. assert(cap->cap_len >= sizeof *cap);
  986. memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
  987. cap->cap_len - PCI_CAP_FLAGS);
  988. return offset;
  989. }
  990. static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
  991. unsigned size)
  992. {
  993. VirtIOPCIProxy *proxy = opaque;
  994. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  995. uint32_t val = 0;
  996. int i;
  997. switch (addr) {
  998. case VIRTIO_PCI_COMMON_DFSELECT:
  999. val = proxy->dfselect;
  1000. break;
  1001. case VIRTIO_PCI_COMMON_DF:
  1002. if (proxy->dfselect <= 1) {
  1003. VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
  1004. val = (vdev->host_features & ~vdc->legacy_features) >>
  1005. (32 * proxy->dfselect);
  1006. }
  1007. break;
  1008. case VIRTIO_PCI_COMMON_GFSELECT:
  1009. val = proxy->gfselect;
  1010. break;
  1011. case VIRTIO_PCI_COMMON_GF:
  1012. if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
  1013. val = proxy->guest_features[proxy->gfselect];
  1014. }
  1015. break;
  1016. case VIRTIO_PCI_COMMON_MSIX:
  1017. val = vdev->config_vector;
  1018. break;
  1019. case VIRTIO_PCI_COMMON_NUMQ:
  1020. for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
  1021. if (virtio_queue_get_num(vdev, i)) {
  1022. val = i + 1;
  1023. }
  1024. }
  1025. break;
  1026. case VIRTIO_PCI_COMMON_STATUS:
  1027. val = vdev->status;
  1028. break;
  1029. case VIRTIO_PCI_COMMON_CFGGENERATION:
  1030. val = vdev->generation;
  1031. break;
  1032. case VIRTIO_PCI_COMMON_Q_SELECT:
  1033. val = vdev->queue_sel;
  1034. break;
  1035. case VIRTIO_PCI_COMMON_Q_SIZE:
  1036. val = virtio_queue_get_num(vdev, vdev->queue_sel);
  1037. break;
  1038. case VIRTIO_PCI_COMMON_Q_MSIX:
  1039. val = virtio_queue_vector(vdev, vdev->queue_sel);
  1040. break;
  1041. case VIRTIO_PCI_COMMON_Q_ENABLE:
  1042. val = proxy->vqs[vdev->queue_sel].enabled;
  1043. break;
  1044. case VIRTIO_PCI_COMMON_Q_NOFF:
  1045. /* Simply map queues in order */
  1046. val = vdev->queue_sel;
  1047. break;
  1048. case VIRTIO_PCI_COMMON_Q_DESCLO:
  1049. val = proxy->vqs[vdev->queue_sel].desc[0];
  1050. break;
  1051. case VIRTIO_PCI_COMMON_Q_DESCHI:
  1052. val = proxy->vqs[vdev->queue_sel].desc[1];
  1053. break;
  1054. case VIRTIO_PCI_COMMON_Q_AVAILLO:
  1055. val = proxy->vqs[vdev->queue_sel].avail[0];
  1056. break;
  1057. case VIRTIO_PCI_COMMON_Q_AVAILHI:
  1058. val = proxy->vqs[vdev->queue_sel].avail[1];
  1059. break;
  1060. case VIRTIO_PCI_COMMON_Q_USEDLO:
  1061. val = proxy->vqs[vdev->queue_sel].used[0];
  1062. break;
  1063. case VIRTIO_PCI_COMMON_Q_USEDHI:
  1064. val = proxy->vqs[vdev->queue_sel].used[1];
  1065. break;
  1066. default:
  1067. val = 0;
  1068. }
  1069. return val;
  1070. }
  1071. static void virtio_pci_common_write(void *opaque, hwaddr addr,
  1072. uint64_t val, unsigned size)
  1073. {
  1074. VirtIOPCIProxy *proxy = opaque;
  1075. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  1076. switch (addr) {
  1077. case VIRTIO_PCI_COMMON_DFSELECT:
  1078. proxy->dfselect = val;
  1079. break;
  1080. case VIRTIO_PCI_COMMON_GFSELECT:
  1081. proxy->gfselect = val;
  1082. break;
  1083. case VIRTIO_PCI_COMMON_GF:
  1084. if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
  1085. proxy->guest_features[proxy->gfselect] = val;
  1086. virtio_set_features(vdev,
  1087. (((uint64_t)proxy->guest_features[1]) << 32) |
  1088. proxy->guest_features[0]);
  1089. }
  1090. break;
  1091. case VIRTIO_PCI_COMMON_MSIX:
  1092. msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
  1093. /* Make it possible for guest to discover an error took place. */
  1094. if (msix_vector_use(&proxy->pci_dev, val) < 0) {
  1095. val = VIRTIO_NO_VECTOR;
  1096. }
  1097. vdev->config_vector = val;
  1098. break;
  1099. case VIRTIO_PCI_COMMON_STATUS:
  1100. if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
  1101. virtio_pci_stop_ioeventfd(proxy);
  1102. }
  1103. virtio_set_status(vdev, val & 0xFF);
  1104. if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
  1105. virtio_pci_start_ioeventfd(proxy);
  1106. }
  1107. if (vdev->status == 0) {
  1108. virtio_pci_reset(DEVICE(proxy));
  1109. }
  1110. break;
  1111. case VIRTIO_PCI_COMMON_Q_SELECT:
  1112. if (val < VIRTIO_QUEUE_MAX) {
  1113. vdev->queue_sel = val;
  1114. }
  1115. break;
  1116. case VIRTIO_PCI_COMMON_Q_SIZE:
  1117. proxy->vqs[vdev->queue_sel].num = val;
  1118. break;
  1119. case VIRTIO_PCI_COMMON_Q_MSIX:
  1120. msix_vector_unuse(&proxy->pci_dev,
  1121. virtio_queue_vector(vdev, vdev->queue_sel));
  1122. /* Make it possible for guest to discover an error took place. */
  1123. if (msix_vector_use(&proxy->pci_dev, val) < 0) {
  1124. val = VIRTIO_NO_VECTOR;
  1125. }
  1126. virtio_queue_set_vector(vdev, vdev->queue_sel, val);
  1127. break;
  1128. case VIRTIO_PCI_COMMON_Q_ENABLE:
  1129. virtio_queue_set_num(vdev, vdev->queue_sel,
  1130. proxy->vqs[vdev->queue_sel].num);
  1131. virtio_queue_set_rings(vdev, vdev->queue_sel,
  1132. ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
  1133. proxy->vqs[vdev->queue_sel].desc[0],
  1134. ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
  1135. proxy->vqs[vdev->queue_sel].avail[0],
  1136. ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
  1137. proxy->vqs[vdev->queue_sel].used[0]);
  1138. proxy->vqs[vdev->queue_sel].enabled = 1;
  1139. break;
  1140. case VIRTIO_PCI_COMMON_Q_DESCLO:
  1141. proxy->vqs[vdev->queue_sel].desc[0] = val;
  1142. break;
  1143. case VIRTIO_PCI_COMMON_Q_DESCHI:
  1144. proxy->vqs[vdev->queue_sel].desc[1] = val;
  1145. break;
  1146. case VIRTIO_PCI_COMMON_Q_AVAILLO:
  1147. proxy->vqs[vdev->queue_sel].avail[0] = val;
  1148. break;
  1149. case VIRTIO_PCI_COMMON_Q_AVAILHI:
  1150. proxy->vqs[vdev->queue_sel].avail[1] = val;
  1151. break;
  1152. case VIRTIO_PCI_COMMON_Q_USEDLO:
  1153. proxy->vqs[vdev->queue_sel].used[0] = val;
  1154. break;
  1155. case VIRTIO_PCI_COMMON_Q_USEDHI:
  1156. proxy->vqs[vdev->queue_sel].used[1] = val;
  1157. break;
  1158. default:
  1159. break;
  1160. }
  1161. }
  1162. static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
  1163. unsigned size)
  1164. {
  1165. return 0;
  1166. }
  1167. static void virtio_pci_notify_write(void *opaque, hwaddr addr,
  1168. uint64_t val, unsigned size)
  1169. {
  1170. VirtIODevice *vdev = opaque;
  1171. VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent);
  1172. unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
  1173. if (queue < VIRTIO_QUEUE_MAX) {
  1174. virtio_queue_notify(vdev, queue);
  1175. }
  1176. }
  1177. static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
  1178. uint64_t val, unsigned size)
  1179. {
  1180. VirtIODevice *vdev = opaque;
  1181. unsigned queue = val;
  1182. if (queue < VIRTIO_QUEUE_MAX) {
  1183. virtio_queue_notify(vdev, queue);
  1184. }
  1185. }
  1186. static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
  1187. unsigned size)
  1188. {
  1189. VirtIOPCIProxy *proxy = opaque;
  1190. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  1191. uint64_t val = atomic_xchg(&vdev->isr, 0);
  1192. pci_irq_deassert(&proxy->pci_dev);
  1193. return val;
  1194. }
  1195. static void virtio_pci_isr_write(void *opaque, hwaddr addr,
  1196. uint64_t val, unsigned size)
  1197. {
  1198. }
  1199. static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
  1200. unsigned size)
  1201. {
  1202. VirtIODevice *vdev = opaque;
  1203. uint64_t val = 0;
  1204. switch (size) {
  1205. case 1:
  1206. val = virtio_config_modern_readb(vdev, addr);
  1207. break;
  1208. case 2:
  1209. val = virtio_config_modern_readw(vdev, addr);
  1210. break;
  1211. case 4:
  1212. val = virtio_config_modern_readl(vdev, addr);
  1213. break;
  1214. }
  1215. return val;
  1216. }
  1217. static void virtio_pci_device_write(void *opaque, hwaddr addr,
  1218. uint64_t val, unsigned size)
  1219. {
  1220. VirtIODevice *vdev = opaque;
  1221. switch (size) {
  1222. case 1:
  1223. virtio_config_modern_writeb(vdev, addr, val);
  1224. break;
  1225. case 2:
  1226. virtio_config_modern_writew(vdev, addr, val);
  1227. break;
  1228. case 4:
  1229. virtio_config_modern_writel(vdev, addr, val);
  1230. break;
  1231. }
  1232. }
  1233. static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
  1234. {
  1235. static const MemoryRegionOps common_ops = {
  1236. .read = virtio_pci_common_read,
  1237. .write = virtio_pci_common_write,
  1238. .impl = {
  1239. .min_access_size = 1,
  1240. .max_access_size = 4,
  1241. },
  1242. .endianness = DEVICE_LITTLE_ENDIAN,
  1243. };
  1244. static const MemoryRegionOps isr_ops = {
  1245. .read = virtio_pci_isr_read,
  1246. .write = virtio_pci_isr_write,
  1247. .impl = {
  1248. .min_access_size = 1,
  1249. .max_access_size = 4,
  1250. },
  1251. .endianness = DEVICE_LITTLE_ENDIAN,
  1252. };
  1253. static const MemoryRegionOps device_ops = {
  1254. .read = virtio_pci_device_read,
  1255. .write = virtio_pci_device_write,
  1256. .impl = {
  1257. .min_access_size = 1,
  1258. .max_access_size = 4,
  1259. },
  1260. .endianness = DEVICE_LITTLE_ENDIAN,
  1261. };
  1262. static const MemoryRegionOps notify_ops = {
  1263. .read = virtio_pci_notify_read,
  1264. .write = virtio_pci_notify_write,
  1265. .impl = {
  1266. .min_access_size = 1,
  1267. .max_access_size = 4,
  1268. },
  1269. .endianness = DEVICE_LITTLE_ENDIAN,
  1270. };
  1271. static const MemoryRegionOps notify_pio_ops = {
  1272. .read = virtio_pci_notify_read,
  1273. .write = virtio_pci_notify_write_pio,
  1274. .impl = {
  1275. .min_access_size = 1,
  1276. .max_access_size = 4,
  1277. },
  1278. .endianness = DEVICE_LITTLE_ENDIAN,
  1279. };
  1280. memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
  1281. &common_ops,
  1282. proxy,
  1283. "virtio-pci-common",
  1284. proxy->common.size);
  1285. memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
  1286. &isr_ops,
  1287. proxy,
  1288. "virtio-pci-isr",
  1289. proxy->isr.size);
  1290. memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
  1291. &device_ops,
  1292. virtio_bus_get_device(&proxy->bus),
  1293. "virtio-pci-device",
  1294. proxy->device.size);
  1295. memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
  1296. &notify_ops,
  1297. virtio_bus_get_device(&proxy->bus),
  1298. "virtio-pci-notify",
  1299. proxy->notify.size);
  1300. memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
  1301. &notify_pio_ops,
  1302. virtio_bus_get_device(&proxy->bus),
  1303. "virtio-pci-notify-pio",
  1304. proxy->notify_pio.size);
  1305. }
  1306. static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
  1307. VirtIOPCIRegion *region,
  1308. struct virtio_pci_cap *cap,
  1309. MemoryRegion *mr,
  1310. uint8_t bar)
  1311. {
  1312. memory_region_add_subregion(mr, region->offset, &region->mr);
  1313. cap->cfg_type = region->type;
  1314. cap->bar = bar;
  1315. cap->offset = cpu_to_le32(region->offset);
  1316. cap->length = cpu_to_le32(region->size);
  1317. virtio_pci_add_mem_cap(proxy, cap);
  1318. }
  1319. static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
  1320. VirtIOPCIRegion *region,
  1321. struct virtio_pci_cap *cap)
  1322. {
  1323. virtio_pci_modern_region_map(proxy, region, cap,
  1324. &proxy->modern_bar, proxy->modern_mem_bar_idx);
  1325. }
  1326. static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
  1327. VirtIOPCIRegion *region,
  1328. struct virtio_pci_cap *cap)
  1329. {
  1330. virtio_pci_modern_region_map(proxy, region, cap,
  1331. &proxy->io_bar, proxy->modern_io_bar_idx);
  1332. }
  1333. static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
  1334. VirtIOPCIRegion *region)
  1335. {
  1336. memory_region_del_subregion(&proxy->modern_bar,
  1337. &region->mr);
  1338. }
  1339. static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
  1340. VirtIOPCIRegion *region)
  1341. {
  1342. memory_region_del_subregion(&proxy->io_bar,
  1343. &region->mr);
  1344. }
  1345. static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
  1346. {
  1347. VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
  1348. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  1349. if (virtio_pci_modern(proxy)) {
  1350. virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
  1351. }
  1352. virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
  1353. }
  1354. /* This is called by virtio-bus just after the device is plugged. */
  1355. static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
  1356. {
  1357. VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
  1358. VirtioBusState *bus = &proxy->bus;
  1359. bool legacy = virtio_pci_legacy(proxy);
  1360. bool modern;
  1361. bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
  1362. uint8_t *config;
  1363. uint32_t size;
  1364. VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  1365. /*
  1366. * Virtio capabilities present without
  1367. * VIRTIO_F_VERSION_1 confuses guests
  1368. */
  1369. if (!proxy->ignore_backend_features &&
  1370. !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
  1371. virtio_pci_disable_modern(proxy);
  1372. if (!legacy) {
  1373. error_setg(errp, "Device doesn't support modern mode, and legacy"
  1374. " mode is disabled");
  1375. error_append_hint(errp, "Set disable-legacy to off\n");
  1376. return;
  1377. }
  1378. }
  1379. modern = virtio_pci_modern(proxy);
  1380. config = proxy->pci_dev.config;
  1381. if (proxy->class_code) {
  1382. pci_config_set_class(config, proxy->class_code);
  1383. }
  1384. if (legacy) {
  1385. if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
  1386. error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
  1387. " neither legacy nor transitional device");
  1388. return ;
  1389. }
  1390. /*
  1391. * Legacy and transitional devices use specific subsystem IDs.
  1392. * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
  1393. * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
  1394. */
  1395. pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
  1396. } else {
  1397. /* pure virtio-1.0 */
  1398. pci_set_word(config + PCI_VENDOR_ID,
  1399. PCI_VENDOR_ID_REDHAT_QUMRANET);
  1400. pci_set_word(config + PCI_DEVICE_ID,
  1401. 0x1040 + virtio_bus_get_vdev_id(bus));
  1402. pci_config_set_revision(config, 1);
  1403. }
  1404. config[PCI_INTERRUPT_PIN] = 1;
  1405. if (modern) {
  1406. struct virtio_pci_cap cap = {
  1407. .cap_len = sizeof cap,
  1408. };
  1409. struct virtio_pci_notify_cap notify = {
  1410. .cap.cap_len = sizeof notify,
  1411. .notify_off_multiplier =
  1412. cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
  1413. };
  1414. struct virtio_pci_cfg_cap cfg = {
  1415. .cap.cap_len = sizeof cfg,
  1416. .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
  1417. };
  1418. struct virtio_pci_notify_cap notify_pio = {
  1419. .cap.cap_len = sizeof notify,
  1420. .notify_off_multiplier = cpu_to_le32(0x0),
  1421. };
  1422. struct virtio_pci_cfg_cap *cfg_mask;
  1423. virtio_pci_modern_regions_init(proxy);
  1424. virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
  1425. virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
  1426. virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
  1427. virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
  1428. if (modern_pio) {
  1429. memory_region_init(&proxy->io_bar, OBJECT(proxy),
  1430. "virtio-pci-io", 0x4);
  1431. pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
  1432. PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
  1433. virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
  1434. &notify_pio.cap);
  1435. }
  1436. pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
  1437. PCI_BASE_ADDRESS_SPACE_MEMORY |
  1438. PCI_BASE_ADDRESS_MEM_PREFETCH |
  1439. PCI_BASE_ADDRESS_MEM_TYPE_64,
  1440. &proxy->modern_bar);
  1441. proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
  1442. cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
  1443. pci_set_byte(&cfg_mask->cap.bar, ~0x0);
  1444. pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
  1445. pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
  1446. pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
  1447. }
  1448. if (proxy->nvectors) {
  1449. int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
  1450. proxy->msix_bar_idx, NULL);
  1451. if (err) {
  1452. /* Notice when a system that supports MSIx can't initialize it */
  1453. if (err != -ENOTSUP) {
  1454. warn_report("unable to init msix vectors to %" PRIu32,
  1455. proxy->nvectors);
  1456. }
  1457. proxy->nvectors = 0;
  1458. }
  1459. }
  1460. proxy->pci_dev.config_write = virtio_write_config;
  1461. proxy->pci_dev.config_read = virtio_read_config;
  1462. if (legacy) {
  1463. size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
  1464. + virtio_bus_get_vdev_config_len(bus);
  1465. size = pow2ceil(size);
  1466. memory_region_init_io(&proxy->bar, OBJECT(proxy),
  1467. &virtio_pci_config_ops,
  1468. proxy, "virtio-pci", size);
  1469. pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
  1470. PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
  1471. }
  1472. }
  1473. static void virtio_pci_device_unplugged(DeviceState *d)
  1474. {
  1475. VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
  1476. bool modern = virtio_pci_modern(proxy);
  1477. bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
  1478. virtio_pci_stop_ioeventfd(proxy);
  1479. if (modern) {
  1480. virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
  1481. virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
  1482. virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
  1483. virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
  1484. if (modern_pio) {
  1485. virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
  1486. }
  1487. }
  1488. }
  1489. static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
  1490. {
  1491. VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
  1492. VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
  1493. bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
  1494. !pci_bus_is_root(pci_get_bus(pci_dev));
  1495. if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
  1496. proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
  1497. }
  1498. /*
  1499. * virtio pci bar layout used by default.
  1500. * subclasses can re-arrange things if needed.
  1501. *
  1502. * region 0 -- virtio legacy io bar
  1503. * region 1 -- msi-x bar
  1504. * region 4+5 -- virtio modern memory (64bit) bar
  1505. *
  1506. */
  1507. proxy->legacy_io_bar_idx = 0;
  1508. proxy->msix_bar_idx = 1;
  1509. proxy->modern_io_bar_idx = 2;
  1510. proxy->modern_mem_bar_idx = 4;
  1511. proxy->common.offset = 0x0;
  1512. proxy->common.size = 0x1000;
  1513. proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
  1514. proxy->isr.offset = 0x1000;
  1515. proxy->isr.size = 0x1000;
  1516. proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
  1517. proxy->device.offset = 0x2000;
  1518. proxy->device.size = 0x1000;
  1519. proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
  1520. proxy->notify.offset = 0x3000;
  1521. proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
  1522. proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
  1523. proxy->notify_pio.offset = 0x0;
  1524. proxy->notify_pio.size = 0x4;
  1525. proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
  1526. /* subclasses can enforce modern, so do this unconditionally */
  1527. memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
  1528. /* PCI BAR regions must be powers of 2 */
  1529. pow2ceil(proxy->notify.offset + proxy->notify.size));
  1530. if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
  1531. proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
  1532. }
  1533. if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
  1534. error_setg(errp, "device cannot work as neither modern nor legacy mode"
  1535. " is enabled");
  1536. error_append_hint(errp, "Set either disable-modern or disable-legacy"
  1537. " to off\n");
  1538. return;
  1539. }
  1540. if (pcie_port && pci_is_express(pci_dev)) {
  1541. int pos;
  1542. pos = pcie_endpoint_cap_init(pci_dev, 0);
  1543. assert(pos > 0);
  1544. pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
  1545. PCI_PM_SIZEOF, errp);
  1546. if (pos < 0) {
  1547. return;
  1548. }
  1549. pci_dev->exp.pm_cap = pos;
  1550. /*
  1551. * Indicates that this function complies with revision 1.2 of the
  1552. * PCI Power Management Interface Specification.
  1553. */
  1554. pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
  1555. if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
  1556. /* Init error enabling flags */
  1557. pcie_cap_deverr_init(pci_dev);
  1558. }
  1559. if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
  1560. /* Init Link Control Register */
  1561. pcie_cap_lnkctl_init(pci_dev);
  1562. }
  1563. if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
  1564. /* Init Power Management Control Register */
  1565. pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
  1566. PCI_PM_CTRL_STATE_MASK);
  1567. }
  1568. if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
  1569. pcie_ats_init(pci_dev, 256);
  1570. }
  1571. if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
  1572. /* Set Function Level Reset capability bit */
  1573. pcie_cap_flr_init(pci_dev);
  1574. }
  1575. } else {
  1576. /*
  1577. * make future invocations of pci_is_express() return false
  1578. * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
  1579. */
  1580. pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
  1581. }
  1582. virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
  1583. if (k->realize) {
  1584. k->realize(proxy, errp);
  1585. }
  1586. }
  1587. static void virtio_pci_exit(PCIDevice *pci_dev)
  1588. {
  1589. msix_uninit_exclusive_bar(pci_dev);
  1590. }
  1591. static void virtio_pci_reset(DeviceState *qdev)
  1592. {
  1593. VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
  1594. VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
  1595. PCIDevice *dev = PCI_DEVICE(qdev);
  1596. int i;
  1597. virtio_pci_stop_ioeventfd(proxy);
  1598. virtio_bus_reset(bus);
  1599. msix_unuse_all_vectors(&proxy->pci_dev);
  1600. for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
  1601. proxy->vqs[i].enabled = 0;
  1602. proxy->vqs[i].num = 0;
  1603. proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
  1604. proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
  1605. proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
  1606. }
  1607. if (pci_is_express(dev)) {
  1608. pcie_cap_deverr_reset(dev);
  1609. pcie_cap_lnkctl_reset(dev);
  1610. pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0);
  1611. }
  1612. }
  1613. static Property virtio_pci_properties[] = {
  1614. DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
  1615. VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
  1616. DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
  1617. VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
  1618. DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
  1619. VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
  1620. DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
  1621. VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
  1622. DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
  1623. VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
  1624. DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
  1625. ignore_backend_features, false),
  1626. DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
  1627. VIRTIO_PCI_FLAG_ATS_BIT, false),
  1628. DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
  1629. VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
  1630. DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
  1631. VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
  1632. DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
  1633. VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
  1634. DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags,
  1635. VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
  1636. DEFINE_PROP_END_OF_LIST(),
  1637. };
  1638. static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
  1639. {
  1640. VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
  1641. VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
  1642. PCIDevice *pci_dev = &proxy->pci_dev;
  1643. if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
  1644. virtio_pci_modern(proxy)) {
  1645. pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
  1646. }
  1647. vpciklass->parent_dc_realize(qdev, errp);
  1648. }
  1649. static void virtio_pci_class_init(ObjectClass *klass, void *data)
  1650. {
  1651. DeviceClass *dc = DEVICE_CLASS(klass);
  1652. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1653. VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
  1654. dc->props = virtio_pci_properties;
  1655. k->realize = virtio_pci_realize;
  1656. k->exit = virtio_pci_exit;
  1657. k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
  1658. k->revision = VIRTIO_PCI_ABI_VERSION;
  1659. k->class_id = PCI_CLASS_OTHERS;
  1660. device_class_set_parent_realize(dc, virtio_pci_dc_realize,
  1661. &vpciklass->parent_dc_realize);
  1662. dc->reset = virtio_pci_reset;
  1663. }
  1664. static const TypeInfo virtio_pci_info = {
  1665. .name = TYPE_VIRTIO_PCI,
  1666. .parent = TYPE_PCI_DEVICE,
  1667. .instance_size = sizeof(VirtIOPCIProxy),
  1668. .class_init = virtio_pci_class_init,
  1669. .class_size = sizeof(VirtioPCIClass),
  1670. .abstract = true,
  1671. };
  1672. static Property virtio_pci_generic_properties[] = {
  1673. DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
  1674. ON_OFF_AUTO_AUTO),
  1675. DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
  1676. DEFINE_PROP_END_OF_LIST(),
  1677. };
  1678. static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
  1679. {
  1680. const VirtioPCIDeviceTypeInfo *t = data;
  1681. if (t->class_init) {
  1682. t->class_init(klass, NULL);
  1683. }
  1684. }
  1685. static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
  1686. {
  1687. DeviceClass *dc = DEVICE_CLASS(klass);
  1688. dc->props = virtio_pci_generic_properties;
  1689. }
  1690. static void virtio_pci_transitional_instance_init(Object *obj)
  1691. {
  1692. VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
  1693. proxy->disable_legacy = ON_OFF_AUTO_OFF;
  1694. proxy->disable_modern = false;
  1695. }
  1696. static void virtio_pci_non_transitional_instance_init(Object *obj)
  1697. {
  1698. VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
  1699. proxy->disable_legacy = ON_OFF_AUTO_ON;
  1700. proxy->disable_modern = false;
  1701. }
  1702. void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
  1703. {
  1704. char *base_name = NULL;
  1705. TypeInfo base_type_info = {
  1706. .name = t->base_name,
  1707. .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI,
  1708. .instance_size = t->instance_size,
  1709. .instance_init = t->instance_init,
  1710. .class_size = t->class_size,
  1711. .abstract = true,
  1712. .interfaces = t->interfaces,
  1713. };
  1714. TypeInfo generic_type_info = {
  1715. .name = t->generic_name,
  1716. .parent = base_type_info.name,
  1717. .class_init = virtio_pci_generic_class_init,
  1718. .interfaces = (InterfaceInfo[]) {
  1719. { INTERFACE_PCIE_DEVICE },
  1720. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1721. { }
  1722. },
  1723. };
  1724. if (!base_type_info.name) {
  1725. /* No base type -> register a single generic device type */
  1726. /* use intermediate %s-base-type to add generic device props */
  1727. base_name = g_strdup_printf("%s-base-type", t->generic_name);
  1728. base_type_info.name = base_name;
  1729. base_type_info.class_init = virtio_pci_generic_class_init;
  1730. generic_type_info.parent = base_name;
  1731. generic_type_info.class_init = virtio_pci_base_class_init;
  1732. generic_type_info.class_data = (void *)t;
  1733. assert(!t->non_transitional_name);
  1734. assert(!t->transitional_name);
  1735. } else {
  1736. base_type_info.class_init = virtio_pci_base_class_init;
  1737. base_type_info.class_data = (void *)t;
  1738. }
  1739. type_register(&base_type_info);
  1740. if (generic_type_info.name) {
  1741. type_register(&generic_type_info);
  1742. }
  1743. if (t->non_transitional_name) {
  1744. const TypeInfo non_transitional_type_info = {
  1745. .name = t->non_transitional_name,
  1746. .parent = base_type_info.name,
  1747. .instance_init = virtio_pci_non_transitional_instance_init,
  1748. .interfaces = (InterfaceInfo[]) {
  1749. { INTERFACE_PCIE_DEVICE },
  1750. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1751. { }
  1752. },
  1753. };
  1754. type_register(&non_transitional_type_info);
  1755. }
  1756. if (t->transitional_name) {
  1757. const TypeInfo transitional_type_info = {
  1758. .name = t->transitional_name,
  1759. .parent = base_type_info.name,
  1760. .instance_init = virtio_pci_transitional_instance_init,
  1761. .interfaces = (InterfaceInfo[]) {
  1762. /*
  1763. * Transitional virtio devices work only as Conventional PCI
  1764. * devices because they require PIO ports.
  1765. */
  1766. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1767. { }
  1768. },
  1769. };
  1770. type_register(&transitional_type_info);
  1771. }
  1772. g_free(base_name);
  1773. }
  1774. /* virtio-pci-bus */
  1775. static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
  1776. VirtIOPCIProxy *dev)
  1777. {
  1778. DeviceState *qdev = DEVICE(dev);
  1779. char virtio_bus_name[] = "virtio-bus";
  1780. qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
  1781. virtio_bus_name);
  1782. }
  1783. static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
  1784. {
  1785. BusClass *bus_class = BUS_CLASS(klass);
  1786. VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
  1787. bus_class->max_dev = 1;
  1788. k->notify = virtio_pci_notify;
  1789. k->save_config = virtio_pci_save_config;
  1790. k->load_config = virtio_pci_load_config;
  1791. k->save_queue = virtio_pci_save_queue;
  1792. k->load_queue = virtio_pci_load_queue;
  1793. k->save_extra_state = virtio_pci_save_extra_state;
  1794. k->load_extra_state = virtio_pci_load_extra_state;
  1795. k->has_extra_state = virtio_pci_has_extra_state;
  1796. k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
  1797. k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
  1798. k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
  1799. k->vmstate_change = virtio_pci_vmstate_change;
  1800. k->pre_plugged = virtio_pci_pre_plugged;
  1801. k->device_plugged = virtio_pci_device_plugged;
  1802. k->device_unplugged = virtio_pci_device_unplugged;
  1803. k->query_nvectors = virtio_pci_query_nvectors;
  1804. k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
  1805. k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
  1806. k->get_dma_as = virtio_pci_get_dma_as;
  1807. }
  1808. static const TypeInfo virtio_pci_bus_info = {
  1809. .name = TYPE_VIRTIO_PCI_BUS,
  1810. .parent = TYPE_VIRTIO_BUS,
  1811. .instance_size = sizeof(VirtioPCIBusState),
  1812. .class_init = virtio_pci_bus_class_init,
  1813. };
  1814. static void virtio_pci_register_types(void)
  1815. {
  1816. /* Base types: */
  1817. type_register_static(&virtio_pci_bus_info);
  1818. type_register_static(&virtio_pci_info);
  1819. }
  1820. type_init(virtio_pci_register_types)