2
0

pci.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820
  1. /*
  2. * QEMU PCI bus manager
  3. *
  4. * Copyright (c) 2004 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "qemu/datadir.h"
  26. #include "qemu/units.h"
  27. #include "hw/irq.h"
  28. #include "hw/pci/pci.h"
  29. #include "hw/pci/pci_bridge.h"
  30. #include "hw/pci/pci_bus.h"
  31. #include "hw/pci/pci_host.h"
  32. #include "hw/qdev-properties.h"
  33. #include "hw/qdev-properties-system.h"
  34. #include "migration/qemu-file-types.h"
  35. #include "migration/vmstate.h"
  36. #include "net/net.h"
  37. #include "sysemu/numa.h"
  38. #include "sysemu/sysemu.h"
  39. #include "hw/loader.h"
  40. #include "qemu/error-report.h"
  41. #include "qemu/range.h"
  42. #include "trace.h"
  43. #include "hw/pci/msi.h"
  44. #include "hw/pci/msix.h"
  45. #include "hw/hotplug.h"
  46. #include "hw/boards.h"
  47. #include "qapi/error.h"
  48. #include "qemu/cutils.h"
  49. #include "pci-internal.h"
  50. #include "hw/xen/xen.h"
  51. #include "hw/i386/kvm/xen_evtchn.h"
  52. //#define DEBUG_PCI
  53. #ifdef DEBUG_PCI
  54. # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
  55. #else
  56. # define PCI_DPRINTF(format, ...) do { } while (0)
  57. #endif
  58. bool pci_available = true;
  59. static char *pcibus_get_dev_path(DeviceState *dev);
  60. static char *pcibus_get_fw_dev_path(DeviceState *dev);
  61. static void pcibus_reset(BusState *qbus);
  62. static Property pci_props[] = {
  63. DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
  64. DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
  65. DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, -1),
  66. DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
  67. DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
  68. QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
  69. DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
  70. QEMU_PCIE_LNKSTA_DLLLA_BITNR, true),
  71. DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
  72. QEMU_PCIE_EXTCAP_INIT_BITNR, true),
  73. DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
  74. failover_pair_id),
  75. DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
  76. DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
  77. QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
  78. DEFINE_PROP_END_OF_LIST()
  79. };
  80. static const VMStateDescription vmstate_pcibus = {
  81. .name = "PCIBUS",
  82. .version_id = 1,
  83. .minimum_version_id = 1,
  84. .fields = (VMStateField[]) {
  85. VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL),
  86. VMSTATE_VARRAY_INT32(irq_count, PCIBus,
  87. nirq, 0, vmstate_info_int32,
  88. int32_t),
  89. VMSTATE_END_OF_LIST()
  90. }
  91. };
  92. static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
  93. {
  94. return a - b;
  95. }
  96. static GSequence *pci_acpi_index_list(void)
  97. {
  98. static GSequence *used_acpi_index_list;
  99. if (!used_acpi_index_list) {
  100. used_acpi_index_list = g_sequence_new(NULL);
  101. }
  102. return used_acpi_index_list;
  103. }
  104. static void pci_init_bus_master(PCIDevice *pci_dev)
  105. {
  106. AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
  107. memory_region_init_alias(&pci_dev->bus_master_enable_region,
  108. OBJECT(pci_dev), "bus master",
  109. dma_as->root, 0, memory_region_size(dma_as->root));
  110. memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
  111. memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
  112. &pci_dev->bus_master_enable_region);
  113. }
  114. static void pcibus_machine_done(Notifier *notifier, void *data)
  115. {
  116. PCIBus *bus = container_of(notifier, PCIBus, machine_done);
  117. int i;
  118. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  119. if (bus->devices[i]) {
  120. pci_init_bus_master(bus->devices[i]);
  121. }
  122. }
  123. }
  124. static void pci_bus_realize(BusState *qbus, Error **errp)
  125. {
  126. PCIBus *bus = PCI_BUS(qbus);
  127. bus->machine_done.notify = pcibus_machine_done;
  128. qemu_add_machine_init_done_notifier(&bus->machine_done);
  129. vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_pcibus, bus);
  130. }
  131. static void pcie_bus_realize(BusState *qbus, Error **errp)
  132. {
  133. PCIBus *bus = PCI_BUS(qbus);
  134. Error *local_err = NULL;
  135. pci_bus_realize(qbus, &local_err);
  136. if (local_err) {
  137. error_propagate(errp, local_err);
  138. return;
  139. }
  140. /*
  141. * A PCI-E bus can support extended config space if it's the root
  142. * bus, or if the bus/bridge above it does as well
  143. */
  144. if (pci_bus_is_root(bus)) {
  145. bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
  146. } else {
  147. PCIBus *parent_bus = pci_get_bus(bus->parent_dev);
  148. if (pci_bus_allows_extended_config_space(parent_bus)) {
  149. bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
  150. }
  151. }
  152. }
  153. static void pci_bus_unrealize(BusState *qbus)
  154. {
  155. PCIBus *bus = PCI_BUS(qbus);
  156. qemu_remove_machine_init_done_notifier(&bus->machine_done);
  157. vmstate_unregister(NULL, &vmstate_pcibus, bus);
  158. }
  159. static int pcibus_num(PCIBus *bus)
  160. {
  161. if (pci_bus_is_root(bus)) {
  162. return 0; /* pci host bridge */
  163. }
  164. return bus->parent_dev->config[PCI_SECONDARY_BUS];
  165. }
  166. static uint16_t pcibus_numa_node(PCIBus *bus)
  167. {
  168. return NUMA_NODE_UNASSIGNED;
  169. }
  170. static void pci_bus_class_init(ObjectClass *klass, void *data)
  171. {
  172. BusClass *k = BUS_CLASS(klass);
  173. PCIBusClass *pbc = PCI_BUS_CLASS(klass);
  174. k->print_dev = pcibus_dev_print;
  175. k->get_dev_path = pcibus_get_dev_path;
  176. k->get_fw_dev_path = pcibus_get_fw_dev_path;
  177. k->realize = pci_bus_realize;
  178. k->unrealize = pci_bus_unrealize;
  179. k->reset = pcibus_reset;
  180. pbc->bus_num = pcibus_num;
  181. pbc->numa_node = pcibus_numa_node;
  182. }
  183. static const TypeInfo pci_bus_info = {
  184. .name = TYPE_PCI_BUS,
  185. .parent = TYPE_BUS,
  186. .instance_size = sizeof(PCIBus),
  187. .class_size = sizeof(PCIBusClass),
  188. .class_init = pci_bus_class_init,
  189. };
  190. static const TypeInfo cxl_interface_info = {
  191. .name = INTERFACE_CXL_DEVICE,
  192. .parent = TYPE_INTERFACE,
  193. };
  194. static const TypeInfo pcie_interface_info = {
  195. .name = INTERFACE_PCIE_DEVICE,
  196. .parent = TYPE_INTERFACE,
  197. };
  198. static const TypeInfo conventional_pci_interface_info = {
  199. .name = INTERFACE_CONVENTIONAL_PCI_DEVICE,
  200. .parent = TYPE_INTERFACE,
  201. };
  202. static void pcie_bus_class_init(ObjectClass *klass, void *data)
  203. {
  204. BusClass *k = BUS_CLASS(klass);
  205. k->realize = pcie_bus_realize;
  206. }
  207. static const TypeInfo pcie_bus_info = {
  208. .name = TYPE_PCIE_BUS,
  209. .parent = TYPE_PCI_BUS,
  210. .class_init = pcie_bus_class_init,
  211. };
  212. static const TypeInfo cxl_bus_info = {
  213. .name = TYPE_CXL_BUS,
  214. .parent = TYPE_PCIE_BUS,
  215. .class_init = pcie_bus_class_init,
  216. };
  217. static void pci_update_mappings(PCIDevice *d);
  218. static void pci_irq_handler(void *opaque, int irq_num, int level);
  219. static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **);
  220. static void pci_del_option_rom(PCIDevice *pdev);
  221. static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
  222. static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
  223. PCIHostStateList pci_host_bridges;
  224. int pci_bar(PCIDevice *d, int reg)
  225. {
  226. uint8_t type;
  227. /* PCIe virtual functions do not have their own BARs */
  228. assert(!pci_is_vf(d));
  229. if (reg != PCI_ROM_SLOT)
  230. return PCI_BASE_ADDRESS_0 + reg * 4;
  231. type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
  232. return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
  233. }
  234. static inline int pci_irq_state(PCIDevice *d, int irq_num)
  235. {
  236. return (d->irq_state >> irq_num) & 0x1;
  237. }
  238. static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
  239. {
  240. d->irq_state &= ~(0x1 << irq_num);
  241. d->irq_state |= level << irq_num;
  242. }
  243. static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change)
  244. {
  245. assert(irq_num >= 0);
  246. assert(irq_num < bus->nirq);
  247. bus->irq_count[irq_num] += change;
  248. bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
  249. }
  250. static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
  251. {
  252. PCIBus *bus;
  253. for (;;) {
  254. int dev_irq = irq_num;
  255. bus = pci_get_bus(pci_dev);
  256. assert(bus->map_irq);
  257. irq_num = bus->map_irq(pci_dev, irq_num);
  258. trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
  259. pci_bus_is_root(bus) ? "root-complex"
  260. : DEVICE(bus->parent_dev)->canonical_path);
  261. if (bus->set_irq)
  262. break;
  263. pci_dev = bus->parent_dev;
  264. }
  265. pci_bus_change_irq_level(bus, irq_num, change);
  266. }
  267. int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
  268. {
  269. assert(irq_num >= 0);
  270. assert(irq_num < bus->nirq);
  271. return !!bus->irq_count[irq_num];
  272. }
  273. /* Update interrupt status bit in config space on interrupt
  274. * state change. */
  275. static void pci_update_irq_status(PCIDevice *dev)
  276. {
  277. if (dev->irq_state) {
  278. dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
  279. } else {
  280. dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
  281. }
  282. }
  283. void pci_device_deassert_intx(PCIDevice *dev)
  284. {
  285. int i;
  286. for (i = 0; i < PCI_NUM_PINS; ++i) {
  287. pci_irq_handler(dev, i, 0);
  288. }
  289. }
  290. static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
  291. {
  292. MemTxAttrs attrs = {};
  293. /*
  294. * Xen uses the high bits of the address to contain some of the bits
  295. * of the PIRQ#. Therefore we can't just send the write cycle and
  296. * trust that it's caught by the APIC at 0xfee00000 because the
  297. * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
  298. * So we intercept the delivery here instead of in kvm_send_msi().
  299. */
  300. if (xen_mode == XEN_EMULATE &&
  301. xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
  302. return;
  303. }
  304. attrs.requester_id = pci_requester_id(dev);
  305. address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
  306. attrs, NULL);
  307. }
  308. static void pci_reset_regions(PCIDevice *dev)
  309. {
  310. int r;
  311. if (pci_is_vf(dev)) {
  312. return;
  313. }
  314. for (r = 0; r < PCI_NUM_REGIONS; ++r) {
  315. PCIIORegion *region = &dev->io_regions[r];
  316. if (!region->size) {
  317. continue;
  318. }
  319. if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
  320. region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  321. pci_set_quad(dev->config + pci_bar(dev, r), region->type);
  322. } else {
  323. pci_set_long(dev->config + pci_bar(dev, r), region->type);
  324. }
  325. }
  326. }
  327. static void pci_do_device_reset(PCIDevice *dev)
  328. {
  329. pci_device_deassert_intx(dev);
  330. assert(dev->irq_state == 0);
  331. /* Clear all writable bits */
  332. pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
  333. pci_get_word(dev->wmask + PCI_COMMAND) |
  334. pci_get_word(dev->w1cmask + PCI_COMMAND));
  335. pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
  336. pci_get_word(dev->wmask + PCI_STATUS) |
  337. pci_get_word(dev->w1cmask + PCI_STATUS));
  338. /* Some devices make bits of PCI_INTERRUPT_LINE read only */
  339. pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE,
  340. pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
  341. pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
  342. dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
  343. pci_reset_regions(dev);
  344. pci_update_mappings(dev);
  345. msi_reset(dev);
  346. msix_reset(dev);
  347. }
  348. /*
  349. * This function is called on #RST and FLR.
  350. * FLR if PCI_EXP_DEVCTL_BCR_FLR is set
  351. */
  352. void pci_device_reset(PCIDevice *dev)
  353. {
  354. device_cold_reset(&dev->qdev);
  355. pci_do_device_reset(dev);
  356. }
  357. /*
  358. * Trigger pci bus reset under a given bus.
  359. * Called via bus_cold_reset on RST# assert, after the devices
  360. * have been reset device_cold_reset-ed already.
  361. */
  362. static void pcibus_reset(BusState *qbus)
  363. {
  364. PCIBus *bus = DO_UPCAST(PCIBus, qbus, qbus);
  365. int i;
  366. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  367. if (bus->devices[i]) {
  368. pci_do_device_reset(bus->devices[i]);
  369. }
  370. }
  371. for (i = 0; i < bus->nirq; i++) {
  372. assert(bus->irq_count[i] == 0);
  373. }
  374. }
  375. static void pci_host_bus_register(DeviceState *host)
  376. {
  377. PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
  378. QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
  379. }
  380. static void pci_host_bus_unregister(DeviceState *host)
  381. {
  382. PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
  383. QLIST_REMOVE(host_bridge, next);
  384. }
  385. PCIBus *pci_device_root_bus(const PCIDevice *d)
  386. {
  387. PCIBus *bus = pci_get_bus(d);
  388. while (!pci_bus_is_root(bus)) {
  389. d = bus->parent_dev;
  390. assert(d != NULL);
  391. bus = pci_get_bus(d);
  392. }
  393. return bus;
  394. }
  395. const char *pci_root_bus_path(PCIDevice *dev)
  396. {
  397. PCIBus *rootbus = pci_device_root_bus(dev);
  398. PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
  399. PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
  400. assert(host_bridge->bus == rootbus);
  401. if (hc->root_bus_path) {
  402. return (*hc->root_bus_path)(host_bridge, rootbus);
  403. }
  404. return rootbus->qbus.name;
  405. }
  406. bool pci_bus_bypass_iommu(PCIBus *bus)
  407. {
  408. PCIBus *rootbus = bus;
  409. PCIHostState *host_bridge;
  410. if (!pci_bus_is_root(bus)) {
  411. rootbus = pci_device_root_bus(bus->parent_dev);
  412. }
  413. host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
  414. assert(host_bridge->bus == rootbus);
  415. return host_bridge->bypass_iommu;
  416. }
  417. static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
  418. MemoryRegion *address_space_mem,
  419. MemoryRegion *address_space_io,
  420. uint8_t devfn_min)
  421. {
  422. assert(PCI_FUNC(devfn_min) == 0);
  423. bus->devfn_min = devfn_min;
  424. bus->slot_reserved_mask = 0x0;
  425. bus->address_space_mem = address_space_mem;
  426. bus->address_space_io = address_space_io;
  427. bus->flags |= PCI_BUS_IS_ROOT;
  428. /* host bridge */
  429. QLIST_INIT(&bus->child);
  430. pci_host_bus_register(parent);
  431. }
  432. static void pci_bus_uninit(PCIBus *bus)
  433. {
  434. pci_host_bus_unregister(BUS(bus)->parent);
  435. }
  436. bool pci_bus_is_express(const PCIBus *bus)
  437. {
  438. return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
  439. }
  440. void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
  441. const char *name,
  442. MemoryRegion *address_space_mem,
  443. MemoryRegion *address_space_io,
  444. uint8_t devfn_min, const char *typename)
  445. {
  446. qbus_init(bus, bus_size, typename, parent, name);
  447. pci_root_bus_internal_init(bus, parent, address_space_mem,
  448. address_space_io, devfn_min);
  449. }
  450. PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
  451. MemoryRegion *address_space_mem,
  452. MemoryRegion *address_space_io,
  453. uint8_t devfn_min, const char *typename)
  454. {
  455. PCIBus *bus;
  456. bus = PCI_BUS(qbus_new(typename, parent, name));
  457. pci_root_bus_internal_init(bus, parent, address_space_mem,
  458. address_space_io, devfn_min);
  459. return bus;
  460. }
  461. void pci_root_bus_cleanup(PCIBus *bus)
  462. {
  463. pci_bus_uninit(bus);
  464. /* the caller of the unplug hotplug handler will delete this device */
  465. qbus_unrealize(BUS(bus));
  466. }
  467. void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
  468. void *irq_opaque, int nirq)
  469. {
  470. bus->set_irq = set_irq;
  471. bus->irq_opaque = irq_opaque;
  472. bus->nirq = nirq;
  473. bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
  474. }
  475. void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq)
  476. {
  477. bus->map_irq = map_irq;
  478. }
  479. void pci_bus_irqs_cleanup(PCIBus *bus)
  480. {
  481. bus->set_irq = NULL;
  482. bus->map_irq = NULL;
  483. bus->irq_opaque = NULL;
  484. bus->nirq = 0;
  485. g_free(bus->irq_count);
  486. }
  487. PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
  488. pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
  489. void *irq_opaque,
  490. MemoryRegion *address_space_mem,
  491. MemoryRegion *address_space_io,
  492. uint8_t devfn_min, int nirq,
  493. const char *typename)
  494. {
  495. PCIBus *bus;
  496. bus = pci_root_bus_new(parent, name, address_space_mem,
  497. address_space_io, devfn_min, typename);
  498. pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
  499. pci_bus_map_irqs(bus, map_irq);
  500. return bus;
  501. }
  502. void pci_unregister_root_bus(PCIBus *bus)
  503. {
  504. pci_bus_irqs_cleanup(bus);
  505. pci_root_bus_cleanup(bus);
  506. }
  507. int pci_bus_num(PCIBus *s)
  508. {
  509. return PCI_BUS_GET_CLASS(s)->bus_num(s);
  510. }
  511. /* Returns the min and max bus numbers of a PCI bus hierarchy */
  512. void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
  513. {
  514. int i;
  515. *min_bus = *max_bus = pci_bus_num(bus);
  516. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  517. PCIDevice *dev = bus->devices[i];
  518. if (dev && IS_PCI_BRIDGE(dev)) {
  519. *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
  520. *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
  521. }
  522. }
  523. }
  524. int pci_bus_numa_node(PCIBus *bus)
  525. {
  526. return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
  527. }
  528. static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
  529. const VMStateField *field)
  530. {
  531. PCIDevice *s = container_of(pv, PCIDevice, config);
  532. uint8_t *config;
  533. int i;
  534. assert(size == pci_config_size(s));
  535. config = g_malloc(size);
  536. qemu_get_buffer(f, config, size);
  537. for (i = 0; i < size; ++i) {
  538. if ((config[i] ^ s->config[i]) &
  539. s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
  540. error_report("%s: Bad config data: i=0x%x read: %x device: %x "
  541. "cmask: %x wmask: %x w1cmask:%x", __func__,
  542. i, config[i], s->config[i],
  543. s->cmask[i], s->wmask[i], s->w1cmask[i]);
  544. g_free(config);
  545. return -EINVAL;
  546. }
  547. }
  548. memcpy(s->config, config, size);
  549. pci_update_mappings(s);
  550. if (IS_PCI_BRIDGE(s)) {
  551. pci_bridge_update_mappings(PCI_BRIDGE(s));
  552. }
  553. memory_region_set_enabled(&s->bus_master_enable_region,
  554. pci_get_word(s->config + PCI_COMMAND)
  555. & PCI_COMMAND_MASTER);
  556. g_free(config);
  557. return 0;
  558. }
  559. /* just put buffer */
  560. static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
  561. const VMStateField *field, JSONWriter *vmdesc)
  562. {
  563. const uint8_t **v = pv;
  564. assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
  565. qemu_put_buffer(f, *v, size);
  566. return 0;
  567. }
  568. static VMStateInfo vmstate_info_pci_config = {
  569. .name = "pci config",
  570. .get = get_pci_config_device,
  571. .put = put_pci_config_device,
  572. };
  573. static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
  574. const VMStateField *field)
  575. {
  576. PCIDevice *s = container_of(pv, PCIDevice, irq_state);
  577. uint32_t irq_state[PCI_NUM_PINS];
  578. int i;
  579. for (i = 0; i < PCI_NUM_PINS; ++i) {
  580. irq_state[i] = qemu_get_be32(f);
  581. if (irq_state[i] != 0x1 && irq_state[i] != 0) {
  582. fprintf(stderr, "irq state %d: must be 0 or 1.\n",
  583. irq_state[i]);
  584. return -EINVAL;
  585. }
  586. }
  587. for (i = 0; i < PCI_NUM_PINS; ++i) {
  588. pci_set_irq_state(s, i, irq_state[i]);
  589. }
  590. return 0;
  591. }
  592. static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
  593. const VMStateField *field, JSONWriter *vmdesc)
  594. {
  595. int i;
  596. PCIDevice *s = container_of(pv, PCIDevice, irq_state);
  597. for (i = 0; i < PCI_NUM_PINS; ++i) {
  598. qemu_put_be32(f, pci_irq_state(s, i));
  599. }
  600. return 0;
  601. }
  602. static VMStateInfo vmstate_info_pci_irq_state = {
  603. .name = "pci irq state",
  604. .get = get_pci_irq_state,
  605. .put = put_pci_irq_state,
  606. };
  607. static bool migrate_is_pcie(void *opaque, int version_id)
  608. {
  609. return pci_is_express((PCIDevice *)opaque);
  610. }
  611. static bool migrate_is_not_pcie(void *opaque, int version_id)
  612. {
  613. return !pci_is_express((PCIDevice *)opaque);
  614. }
  615. const VMStateDescription vmstate_pci_device = {
  616. .name = "PCIDevice",
  617. .version_id = 2,
  618. .minimum_version_id = 1,
  619. .fields = (VMStateField[]) {
  620. VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
  621. VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
  622. migrate_is_not_pcie,
  623. 0, vmstate_info_pci_config,
  624. PCI_CONFIG_SPACE_SIZE),
  625. VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
  626. migrate_is_pcie,
  627. 0, vmstate_info_pci_config,
  628. PCIE_CONFIG_SPACE_SIZE),
  629. VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
  630. vmstate_info_pci_irq_state,
  631. PCI_NUM_PINS * sizeof(int32_t)),
  632. VMSTATE_END_OF_LIST()
  633. }
  634. };
  635. void pci_device_save(PCIDevice *s, QEMUFile *f)
  636. {
  637. /* Clear interrupt status bit: it is implicit
  638. * in irq_state which we are saving.
  639. * This makes us compatible with old devices
  640. * which never set or clear this bit. */
  641. s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
  642. vmstate_save_state(f, &vmstate_pci_device, s, NULL);
  643. /* Restore the interrupt status bit. */
  644. pci_update_irq_status(s);
  645. }
  646. int pci_device_load(PCIDevice *s, QEMUFile *f)
  647. {
  648. int ret;
  649. ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
  650. /* Restore the interrupt status bit. */
  651. pci_update_irq_status(s);
  652. return ret;
  653. }
  654. static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
  655. {
  656. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
  657. pci_default_sub_vendor_id);
  658. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
  659. pci_default_sub_device_id);
  660. }
  661. /*
  662. * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
  663. * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
  664. */
  665. static int pci_parse_devaddr(const char *addr, int *domp, int *busp,
  666. unsigned int *slotp, unsigned int *funcp)
  667. {
  668. const char *p;
  669. char *e;
  670. unsigned long val;
  671. unsigned long dom = 0, bus = 0;
  672. unsigned int slot = 0;
  673. unsigned int func = 0;
  674. p = addr;
  675. val = strtoul(p, &e, 16);
  676. if (e == p)
  677. return -1;
  678. if (*e == ':') {
  679. bus = val;
  680. p = e + 1;
  681. val = strtoul(p, &e, 16);
  682. if (e == p)
  683. return -1;
  684. if (*e == ':') {
  685. dom = bus;
  686. bus = val;
  687. p = e + 1;
  688. val = strtoul(p, &e, 16);
  689. if (e == p)
  690. return -1;
  691. }
  692. }
  693. slot = val;
  694. if (funcp != NULL) {
  695. if (*e != '.')
  696. return -1;
  697. p = e + 1;
  698. val = strtoul(p, &e, 16);
  699. if (e == p)
  700. return -1;
  701. func = val;
  702. }
  703. /* if funcp == NULL func is 0 */
  704. if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
  705. return -1;
  706. if (*e)
  707. return -1;
  708. *domp = dom;
  709. *busp = bus;
  710. *slotp = slot;
  711. if (funcp != NULL)
  712. *funcp = func;
  713. return 0;
  714. }
  715. static void pci_init_cmask(PCIDevice *dev)
  716. {
  717. pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
  718. pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
  719. dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
  720. dev->cmask[PCI_REVISION_ID] = 0xff;
  721. dev->cmask[PCI_CLASS_PROG] = 0xff;
  722. pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
  723. dev->cmask[PCI_HEADER_TYPE] = 0xff;
  724. dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
  725. }
  726. static void pci_init_wmask(PCIDevice *dev)
  727. {
  728. int config_size = pci_config_size(dev);
  729. dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
  730. dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
  731. pci_set_word(dev->wmask + PCI_COMMAND,
  732. PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
  733. PCI_COMMAND_INTX_DISABLE);
  734. pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
  735. memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
  736. config_size - PCI_CONFIG_HEADER_SIZE);
  737. }
  738. static void pci_init_w1cmask(PCIDevice *dev)
  739. {
  740. /*
  741. * Note: It's okay to set w1cmask even for readonly bits as
  742. * long as their value is hardwired to 0.
  743. */
  744. pci_set_word(dev->w1cmask + PCI_STATUS,
  745. PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
  746. PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
  747. PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
  748. }
  749. static void pci_init_mask_bridge(PCIDevice *d)
  750. {
  751. /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
  752. PCI_SEC_LETENCY_TIMER */
  753. memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
  754. /* base and limit */
  755. d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
  756. d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
  757. pci_set_word(d->wmask + PCI_MEMORY_BASE,
  758. PCI_MEMORY_RANGE_MASK & 0xffff);
  759. pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
  760. PCI_MEMORY_RANGE_MASK & 0xffff);
  761. pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
  762. PCI_PREF_RANGE_MASK & 0xffff);
  763. pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
  764. PCI_PREF_RANGE_MASK & 0xffff);
  765. /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
  766. memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
  767. /* Supported memory and i/o types */
  768. d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
  769. d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
  770. pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
  771. PCI_PREF_RANGE_TYPE_64);
  772. pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
  773. PCI_PREF_RANGE_TYPE_64);
  774. /*
  775. * TODO: Bridges default to 10-bit VGA decoding but we currently only
  776. * implement 16-bit decoding (no alias support).
  777. */
  778. pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
  779. PCI_BRIDGE_CTL_PARITY |
  780. PCI_BRIDGE_CTL_SERR |
  781. PCI_BRIDGE_CTL_ISA |
  782. PCI_BRIDGE_CTL_VGA |
  783. PCI_BRIDGE_CTL_VGA_16BIT |
  784. PCI_BRIDGE_CTL_MASTER_ABORT |
  785. PCI_BRIDGE_CTL_BUS_RESET |
  786. PCI_BRIDGE_CTL_FAST_BACK |
  787. PCI_BRIDGE_CTL_DISCARD |
  788. PCI_BRIDGE_CTL_SEC_DISCARD |
  789. PCI_BRIDGE_CTL_DISCARD_SERR);
  790. /* Below does not do anything as we never set this bit, put here for
  791. * completeness. */
  792. pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
  793. PCI_BRIDGE_CTL_DISCARD_STATUS);
  794. d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
  795. d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
  796. pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
  797. PCI_PREF_RANGE_TYPE_MASK);
  798. pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
  799. PCI_PREF_RANGE_TYPE_MASK);
  800. }
  801. static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
  802. {
  803. uint8_t slot = PCI_SLOT(dev->devfn);
  804. uint8_t func;
  805. if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  806. dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
  807. }
  808. /*
  809. * With SR/IOV and ARI, a device at function 0 need not be a multifunction
  810. * device, as it may just be a VF that ended up with function 0 in
  811. * the legacy PCI interpretation. Avoid failing in such cases:
  812. */
  813. if (pci_is_vf(dev) &&
  814. dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  815. return;
  816. }
  817. /*
  818. * multifunction bit is interpreted in two ways as follows.
  819. * - all functions must set the bit to 1.
  820. * Example: Intel X53
  821. * - function 0 must set the bit, but the rest function (> 0)
  822. * is allowed to leave the bit to 0.
  823. * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
  824. *
  825. * So OS (at least Linux) checks the bit of only function 0,
  826. * and doesn't see the bit of function > 0.
  827. *
  828. * The below check allows both interpretation.
  829. */
  830. if (PCI_FUNC(dev->devfn)) {
  831. PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
  832. if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
  833. /* function 0 should set multifunction bit */
  834. error_setg(errp, "PCI: single function device can't be populated "
  835. "in function %x.%x", slot, PCI_FUNC(dev->devfn));
  836. return;
  837. }
  838. return;
  839. }
  840. if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  841. return;
  842. }
  843. /* function 0 indicates single function, so function > 0 must be NULL */
  844. for (func = 1; func < PCI_FUNC_MAX; ++func) {
  845. if (bus->devices[PCI_DEVFN(slot, func)]) {
  846. error_setg(errp, "PCI: %x.0 indicates single function, "
  847. "but %x.%x is already populated.",
  848. slot, slot, func);
  849. return;
  850. }
  851. }
  852. }
  853. static void pci_config_alloc(PCIDevice *pci_dev)
  854. {
  855. int config_size = pci_config_size(pci_dev);
  856. pci_dev->config = g_malloc0(config_size);
  857. pci_dev->cmask = g_malloc0(config_size);
  858. pci_dev->wmask = g_malloc0(config_size);
  859. pci_dev->w1cmask = g_malloc0(config_size);
  860. pci_dev->used = g_malloc0(config_size);
  861. }
  862. static void pci_config_free(PCIDevice *pci_dev)
  863. {
  864. g_free(pci_dev->config);
  865. g_free(pci_dev->cmask);
  866. g_free(pci_dev->wmask);
  867. g_free(pci_dev->w1cmask);
  868. g_free(pci_dev->used);
  869. }
  870. static void do_pci_unregister_device(PCIDevice *pci_dev)
  871. {
  872. pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
  873. pci_config_free(pci_dev);
  874. if (xen_mode == XEN_EMULATE) {
  875. xen_evtchn_remove_pci_device(pci_dev);
  876. }
  877. if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
  878. memory_region_del_subregion(&pci_dev->bus_master_container_region,
  879. &pci_dev->bus_master_enable_region);
  880. }
  881. address_space_destroy(&pci_dev->bus_master_as);
  882. }
  883. /* Extract PCIReqIDCache into BDF format */
  884. static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
  885. {
  886. uint8_t bus_n;
  887. uint16_t result;
  888. switch (cache->type) {
  889. case PCI_REQ_ID_BDF:
  890. result = pci_get_bdf(cache->dev);
  891. break;
  892. case PCI_REQ_ID_SECONDARY_BUS:
  893. bus_n = pci_dev_bus_num(cache->dev);
  894. result = PCI_BUILD_BDF(bus_n, 0);
  895. break;
  896. default:
  897. error_report("Invalid PCI requester ID cache type: %d",
  898. cache->type);
  899. exit(1);
  900. break;
  901. }
  902. return result;
  903. }
  904. /* Parse bridges up to the root complex and return requester ID
  905. * cache for specific device. For full PCIe topology, the cache
  906. * result would be exactly the same as getting BDF of the device.
  907. * However, several tricks are required when system mixed up with
  908. * legacy PCI devices and PCIe-to-PCI bridges.
  909. *
  910. * Here we cache the proxy device (and type) not requester ID since
  911. * bus number might change from time to time.
  912. */
  913. static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev)
  914. {
  915. PCIDevice *parent;
  916. PCIReqIDCache cache = {
  917. .dev = dev,
  918. .type = PCI_REQ_ID_BDF,
  919. };
  920. while (!pci_bus_is_root(pci_get_bus(dev))) {
  921. /* We are under PCI/PCIe bridges */
  922. parent = pci_get_bus(dev)->parent_dev;
  923. if (pci_is_express(parent)) {
  924. if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
  925. /* When we pass through PCIe-to-PCI/PCIX bridges, we
  926. * override the requester ID using secondary bus
  927. * number of parent bridge with zeroed devfn
  928. * (pcie-to-pci bridge spec chap 2.3). */
  929. cache.type = PCI_REQ_ID_SECONDARY_BUS;
  930. cache.dev = dev;
  931. }
  932. } else {
  933. /* Legacy PCI, override requester ID with the bridge's
  934. * BDF upstream. When the root complex connects to
  935. * legacy PCI devices (including buses), it can only
  936. * obtain requester ID info from directly attached
  937. * devices. If devices are attached under bridges, only
  938. * the requester ID of the bridge that is directly
  939. * attached to the root complex can be recognized. */
  940. cache.type = PCI_REQ_ID_BDF;
  941. cache.dev = parent;
  942. }
  943. dev = parent;
  944. }
  945. return cache;
  946. }
  947. uint16_t pci_requester_id(PCIDevice *dev)
  948. {
  949. return pci_req_id_cache_extract(&dev->requester_id_cache);
  950. }
  951. static bool pci_bus_devfn_available(PCIBus *bus, int devfn)
  952. {
  953. return !(bus->devices[devfn]);
  954. }
  955. static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn)
  956. {
  957. return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn));
  958. }
  959. /* -1 for devfn means auto assign */
  960. static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
  961. const char *name, int devfn,
  962. Error **errp)
  963. {
  964. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  965. PCIConfigReadFunc *config_read = pc->config_read;
  966. PCIConfigWriteFunc *config_write = pc->config_write;
  967. Error *local_err = NULL;
  968. DeviceState *dev = DEVICE(pci_dev);
  969. PCIBus *bus = pci_get_bus(pci_dev);
  970. bool is_bridge = IS_PCI_BRIDGE(pci_dev);
  971. /* Only pci bridges can be attached to extra PCI root buses */
  972. if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
  973. error_setg(errp,
  974. "PCI: Only PCI/PCIe bridges can be plugged into %s",
  975. bus->parent_dev->name);
  976. return NULL;
  977. }
  978. if (devfn < 0) {
  979. for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
  980. devfn += PCI_FUNC_MAX) {
  981. if (pci_bus_devfn_available(bus, devfn) &&
  982. !pci_bus_devfn_reserved(bus, devfn)) {
  983. goto found;
  984. }
  985. }
  986. error_setg(errp, "PCI: no slot/function available for %s, all in use "
  987. "or reserved", name);
  988. return NULL;
  989. found: ;
  990. } else if (pci_bus_devfn_reserved(bus, devfn)) {
  991. error_setg(errp, "PCI: slot %d function %d not available for %s,"
  992. " reserved",
  993. PCI_SLOT(devfn), PCI_FUNC(devfn), name);
  994. return NULL;
  995. } else if (!pci_bus_devfn_available(bus, devfn)) {
  996. error_setg(errp, "PCI: slot %d function %d not available for %s,"
  997. " in use by %s,id=%s",
  998. PCI_SLOT(devfn), PCI_FUNC(devfn), name,
  999. bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
  1000. return NULL;
  1001. } else if (dev->hotplugged &&
  1002. !pci_is_vf(pci_dev) &&
  1003. pci_get_function_0(pci_dev)) {
  1004. error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
  1005. " new func %s cannot be exposed to guest.",
  1006. PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
  1007. pci_get_function_0(pci_dev)->name,
  1008. name);
  1009. return NULL;
  1010. }
  1011. pci_dev->devfn = devfn;
  1012. pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
  1013. pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
  1014. memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
  1015. "bus master container", UINT64_MAX);
  1016. address_space_init(&pci_dev->bus_master_as,
  1017. &pci_dev->bus_master_container_region, pci_dev->name);
  1018. if (phase_check(PHASE_MACHINE_READY)) {
  1019. pci_init_bus_master(pci_dev);
  1020. }
  1021. pci_dev->irq_state = 0;
  1022. pci_config_alloc(pci_dev);
  1023. pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
  1024. pci_config_set_device_id(pci_dev->config, pc->device_id);
  1025. pci_config_set_revision(pci_dev->config, pc->revision);
  1026. pci_config_set_class(pci_dev->config, pc->class_id);
  1027. if (!is_bridge) {
  1028. if (pc->subsystem_vendor_id || pc->subsystem_id) {
  1029. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
  1030. pc->subsystem_vendor_id);
  1031. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
  1032. pc->subsystem_id);
  1033. } else {
  1034. pci_set_default_subsystem_id(pci_dev);
  1035. }
  1036. } else {
  1037. /* subsystem_vendor_id/subsystem_id are only for header type 0 */
  1038. assert(!pc->subsystem_vendor_id);
  1039. assert(!pc->subsystem_id);
  1040. }
  1041. pci_init_cmask(pci_dev);
  1042. pci_init_wmask(pci_dev);
  1043. pci_init_w1cmask(pci_dev);
  1044. if (is_bridge) {
  1045. pci_init_mask_bridge(pci_dev);
  1046. }
  1047. pci_init_multifunction(bus, pci_dev, &local_err);
  1048. if (local_err) {
  1049. error_propagate(errp, local_err);
  1050. do_pci_unregister_device(pci_dev);
  1051. return NULL;
  1052. }
  1053. if (!config_read)
  1054. config_read = pci_default_read_config;
  1055. if (!config_write)
  1056. config_write = pci_default_write_config;
  1057. pci_dev->config_read = config_read;
  1058. pci_dev->config_write = config_write;
  1059. bus->devices[devfn] = pci_dev;
  1060. pci_dev->version_id = 2; /* Current pci device vmstate version */
  1061. return pci_dev;
  1062. }
  1063. static void pci_unregister_io_regions(PCIDevice *pci_dev)
  1064. {
  1065. PCIIORegion *r;
  1066. int i;
  1067. for(i = 0; i < PCI_NUM_REGIONS; i++) {
  1068. r = &pci_dev->io_regions[i];
  1069. if (!r->size || r->addr == PCI_BAR_UNMAPPED)
  1070. continue;
  1071. memory_region_del_subregion(r->address_space, r->memory);
  1072. }
  1073. pci_unregister_vga(pci_dev);
  1074. }
  1075. static void pci_qdev_unrealize(DeviceState *dev)
  1076. {
  1077. PCIDevice *pci_dev = PCI_DEVICE(dev);
  1078. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  1079. pci_unregister_io_regions(pci_dev);
  1080. pci_del_option_rom(pci_dev);
  1081. if (pc->exit) {
  1082. pc->exit(pci_dev);
  1083. }
  1084. pci_device_deassert_intx(pci_dev);
  1085. do_pci_unregister_device(pci_dev);
  1086. pci_dev->msi_trigger = NULL;
  1087. /*
  1088. * clean up acpi-index so it could reused by another device
  1089. */
  1090. if (pci_dev->acpi_index) {
  1091. GSequence *used_indexes = pci_acpi_index_list();
  1092. g_sequence_remove(g_sequence_lookup(used_indexes,
  1093. GINT_TO_POINTER(pci_dev->acpi_index),
  1094. g_cmp_uint32, NULL));
  1095. }
  1096. }
  1097. void pci_register_bar(PCIDevice *pci_dev, int region_num,
  1098. uint8_t type, MemoryRegion *memory)
  1099. {
  1100. PCIIORegion *r;
  1101. uint32_t addr; /* offset in pci config space */
  1102. uint64_t wmask;
  1103. pcibus_t size = memory_region_size(memory);
  1104. uint8_t hdr_type;
  1105. assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */
  1106. assert(region_num >= 0);
  1107. assert(region_num < PCI_NUM_REGIONS);
  1108. assert(is_power_of_2(size));
  1109. /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */
  1110. hdr_type =
  1111. pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
  1112. assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
  1113. r = &pci_dev->io_regions[region_num];
  1114. r->addr = PCI_BAR_UNMAPPED;
  1115. r->size = size;
  1116. r->type = type;
  1117. r->memory = memory;
  1118. r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO
  1119. ? pci_get_bus(pci_dev)->address_space_io
  1120. : pci_get_bus(pci_dev)->address_space_mem;
  1121. wmask = ~(size - 1);
  1122. if (region_num == PCI_ROM_SLOT) {
  1123. /* ROM enable bit is writable */
  1124. wmask |= PCI_ROM_ADDRESS_ENABLE;
  1125. }
  1126. addr = pci_bar(pci_dev, region_num);
  1127. pci_set_long(pci_dev->config + addr, type);
  1128. if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
  1129. r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1130. pci_set_quad(pci_dev->wmask + addr, wmask);
  1131. pci_set_quad(pci_dev->cmask + addr, ~0ULL);
  1132. } else {
  1133. pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
  1134. pci_set_long(pci_dev->cmask + addr, 0xffffffff);
  1135. }
  1136. }
  1137. static void pci_update_vga(PCIDevice *pci_dev)
  1138. {
  1139. uint16_t cmd;
  1140. if (!pci_dev->has_vga) {
  1141. return;
  1142. }
  1143. cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
  1144. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
  1145. cmd & PCI_COMMAND_MEMORY);
  1146. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
  1147. cmd & PCI_COMMAND_IO);
  1148. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
  1149. cmd & PCI_COMMAND_IO);
  1150. }
  1151. void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
  1152. MemoryRegion *io_lo, MemoryRegion *io_hi)
  1153. {
  1154. PCIBus *bus = pci_get_bus(pci_dev);
  1155. assert(!pci_dev->has_vga);
  1156. assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
  1157. pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
  1158. memory_region_add_subregion_overlap(bus->address_space_mem,
  1159. QEMU_PCI_VGA_MEM_BASE, mem, 1);
  1160. assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
  1161. pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
  1162. memory_region_add_subregion_overlap(bus->address_space_io,
  1163. QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
  1164. assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
  1165. pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
  1166. memory_region_add_subregion_overlap(bus->address_space_io,
  1167. QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
  1168. pci_dev->has_vga = true;
  1169. pci_update_vga(pci_dev);
  1170. }
  1171. void pci_unregister_vga(PCIDevice *pci_dev)
  1172. {
  1173. PCIBus *bus = pci_get_bus(pci_dev);
  1174. if (!pci_dev->has_vga) {
  1175. return;
  1176. }
  1177. memory_region_del_subregion(bus->address_space_mem,
  1178. pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
  1179. memory_region_del_subregion(bus->address_space_io,
  1180. pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
  1181. memory_region_del_subregion(bus->address_space_io,
  1182. pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
  1183. pci_dev->has_vga = false;
  1184. }
  1185. pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
  1186. {
  1187. return pci_dev->io_regions[region_num].addr;
  1188. }
  1189. static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
  1190. uint8_t type, pcibus_t size)
  1191. {
  1192. pcibus_t new_addr;
  1193. if (!pci_is_vf(d)) {
  1194. int bar = pci_bar(d, reg);
  1195. if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1196. new_addr = pci_get_quad(d->config + bar);
  1197. } else {
  1198. new_addr = pci_get_long(d->config + bar);
  1199. }
  1200. } else {
  1201. PCIDevice *pf = d->exp.sriov_vf.pf;
  1202. uint16_t sriov_cap = pf->exp.sriov_cap;
  1203. int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4;
  1204. uint16_t vf_offset =
  1205. pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
  1206. uint16_t vf_stride =
  1207. pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
  1208. uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride;
  1209. if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1210. new_addr = pci_get_quad(pf->config + bar);
  1211. } else {
  1212. new_addr = pci_get_long(pf->config + bar);
  1213. }
  1214. new_addr += vf_num * size;
  1215. }
  1216. /* The ROM slot has a specific enable bit, keep it intact */
  1217. if (reg != PCI_ROM_SLOT) {
  1218. new_addr &= ~(size - 1);
  1219. }
  1220. return new_addr;
  1221. }
  1222. pcibus_t pci_bar_address(PCIDevice *d,
  1223. int reg, uint8_t type, pcibus_t size)
  1224. {
  1225. pcibus_t new_addr, last_addr;
  1226. uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
  1227. Object *machine = qdev_get_machine();
  1228. ObjectClass *oc = object_get_class(machine);
  1229. MachineClass *mc = MACHINE_CLASS(oc);
  1230. bool allow_0_address = mc->pci_allow_0_address;
  1231. if (type & PCI_BASE_ADDRESS_SPACE_IO) {
  1232. if (!(cmd & PCI_COMMAND_IO)) {
  1233. return PCI_BAR_UNMAPPED;
  1234. }
  1235. new_addr = pci_config_get_bar_addr(d, reg, type, size);
  1236. last_addr = new_addr + size - 1;
  1237. /* Check if 32 bit BAR wraps around explicitly.
  1238. * TODO: make priorities correct and remove this work around.
  1239. */
  1240. if (last_addr <= new_addr || last_addr >= UINT32_MAX ||
  1241. (!allow_0_address && new_addr == 0)) {
  1242. return PCI_BAR_UNMAPPED;
  1243. }
  1244. return new_addr;
  1245. }
  1246. if (!(cmd & PCI_COMMAND_MEMORY)) {
  1247. return PCI_BAR_UNMAPPED;
  1248. }
  1249. new_addr = pci_config_get_bar_addr(d, reg, type, size);
  1250. /* the ROM slot has a specific enable bit */
  1251. if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
  1252. return PCI_BAR_UNMAPPED;
  1253. }
  1254. new_addr &= ~(size - 1);
  1255. last_addr = new_addr + size - 1;
  1256. /* NOTE: we do not support wrapping */
  1257. /* XXX: as we cannot support really dynamic
  1258. mappings, we handle specific values as invalid
  1259. mappings. */
  1260. if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED ||
  1261. (!allow_0_address && new_addr == 0)) {
  1262. return PCI_BAR_UNMAPPED;
  1263. }
  1264. /* Now pcibus_t is 64bit.
  1265. * Check if 32 bit BAR wraps around explicitly.
  1266. * Without this, PC ide doesn't work well.
  1267. * TODO: remove this work around.
  1268. */
  1269. if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
  1270. return PCI_BAR_UNMAPPED;
  1271. }
  1272. /*
  1273. * OS is allowed to set BAR beyond its addressable
  1274. * bits. For example, 32 bit OS can set 64bit bar
  1275. * to >4G. Check it. TODO: we might need to support
  1276. * it in the future for e.g. PAE.
  1277. */
  1278. if (last_addr >= HWADDR_MAX) {
  1279. return PCI_BAR_UNMAPPED;
  1280. }
  1281. return new_addr;
  1282. }
  1283. static void pci_update_mappings(PCIDevice *d)
  1284. {
  1285. PCIIORegion *r;
  1286. int i;
  1287. pcibus_t new_addr;
  1288. for(i = 0; i < PCI_NUM_REGIONS; i++) {
  1289. r = &d->io_regions[i];
  1290. /* this region isn't registered */
  1291. if (!r->size)
  1292. continue;
  1293. new_addr = pci_bar_address(d, i, r->type, r->size);
  1294. if (!d->has_power) {
  1295. new_addr = PCI_BAR_UNMAPPED;
  1296. }
  1297. /* This bar isn't changed */
  1298. if (new_addr == r->addr)
  1299. continue;
  1300. /* now do the real mapping */
  1301. if (r->addr != PCI_BAR_UNMAPPED) {
  1302. trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
  1303. PCI_SLOT(d->devfn),
  1304. PCI_FUNC(d->devfn),
  1305. i, r->addr, r->size);
  1306. memory_region_del_subregion(r->address_space, r->memory);
  1307. }
  1308. r->addr = new_addr;
  1309. if (r->addr != PCI_BAR_UNMAPPED) {
  1310. trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
  1311. PCI_SLOT(d->devfn),
  1312. PCI_FUNC(d->devfn),
  1313. i, r->addr, r->size);
  1314. memory_region_add_subregion_overlap(r->address_space,
  1315. r->addr, r->memory, 1);
  1316. }
  1317. }
  1318. pci_update_vga(d);
  1319. }
  1320. static inline int pci_irq_disabled(PCIDevice *d)
  1321. {
  1322. return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
  1323. }
  1324. /* Called after interrupt disabled field update in config space,
  1325. * assert/deassert interrupts if necessary.
  1326. * Gets original interrupt disable bit value (before update). */
  1327. static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
  1328. {
  1329. int i, disabled = pci_irq_disabled(d);
  1330. if (disabled == was_irq_disabled)
  1331. return;
  1332. for (i = 0; i < PCI_NUM_PINS; ++i) {
  1333. int state = pci_irq_state(d, i);
  1334. pci_change_irq_level(d, i, disabled ? -state : state);
  1335. }
  1336. }
  1337. uint32_t pci_default_read_config(PCIDevice *d,
  1338. uint32_t address, int len)
  1339. {
  1340. uint32_t val = 0;
  1341. assert(address + len <= pci_config_size(d));
  1342. if (pci_is_express_downstream_port(d) &&
  1343. ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) {
  1344. pcie_sync_bridge_lnk(d);
  1345. }
  1346. memcpy(&val, d->config + address, len);
  1347. return le32_to_cpu(val);
  1348. }
  1349. void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
  1350. {
  1351. int i, was_irq_disabled = pci_irq_disabled(d);
  1352. uint32_t val = val_in;
  1353. assert(addr + l <= pci_config_size(d));
  1354. for (i = 0; i < l; val >>= 8, ++i) {
  1355. uint8_t wmask = d->wmask[addr + i];
  1356. uint8_t w1cmask = d->w1cmask[addr + i];
  1357. assert(!(wmask & w1cmask));
  1358. d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
  1359. d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
  1360. }
  1361. if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
  1362. ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
  1363. ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
  1364. range_covers_byte(addr, l, PCI_COMMAND))
  1365. pci_update_mappings(d);
  1366. if (range_covers_byte(addr, l, PCI_COMMAND)) {
  1367. pci_update_irq_disabled(d, was_irq_disabled);
  1368. memory_region_set_enabled(&d->bus_master_enable_region,
  1369. (pci_get_word(d->config + PCI_COMMAND)
  1370. & PCI_COMMAND_MASTER) && d->has_power);
  1371. }
  1372. msi_write_config(d, addr, val_in, l);
  1373. msix_write_config(d, addr, val_in, l);
  1374. pcie_sriov_config_write(d, addr, val_in, l);
  1375. }
  1376. /***********************************************************/
  1377. /* generic PCI irq support */
  1378. /* 0 <= irq_num <= 3. level must be 0 or 1 */
  1379. static void pci_irq_handler(void *opaque, int irq_num, int level)
  1380. {
  1381. PCIDevice *pci_dev = opaque;
  1382. int change;
  1383. assert(0 <= irq_num && irq_num < PCI_NUM_PINS);
  1384. assert(level == 0 || level == 1);
  1385. change = level - pci_irq_state(pci_dev, irq_num);
  1386. if (!change)
  1387. return;
  1388. pci_set_irq_state(pci_dev, irq_num, level);
  1389. pci_update_irq_status(pci_dev);
  1390. if (pci_irq_disabled(pci_dev))
  1391. return;
  1392. pci_change_irq_level(pci_dev, irq_num, change);
  1393. }
  1394. qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
  1395. {
  1396. int intx = pci_intx(pci_dev);
  1397. assert(0 <= intx && intx < PCI_NUM_PINS);
  1398. return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
  1399. }
  1400. void pci_set_irq(PCIDevice *pci_dev, int level)
  1401. {
  1402. int intx = pci_intx(pci_dev);
  1403. pci_irq_handler(pci_dev, intx, level);
  1404. }
  1405. /* Special hooks used by device assignment */
  1406. void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
  1407. {
  1408. assert(pci_bus_is_root(bus));
  1409. bus->route_intx_to_irq = route_intx_to_irq;
  1410. }
  1411. PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
  1412. {
  1413. PCIBus *bus;
  1414. do {
  1415. int dev_irq = pin;
  1416. bus = pci_get_bus(dev);
  1417. pin = bus->map_irq(dev, pin);
  1418. trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
  1419. pci_bus_is_root(bus) ? "root-complex"
  1420. : DEVICE(bus->parent_dev)->canonical_path);
  1421. dev = bus->parent_dev;
  1422. } while (dev);
  1423. if (!bus->route_intx_to_irq) {
  1424. error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
  1425. object_get_typename(OBJECT(bus->qbus.parent)));
  1426. return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
  1427. }
  1428. return bus->route_intx_to_irq(bus->irq_opaque, pin);
  1429. }
  1430. bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
  1431. {
  1432. return old->mode != new->mode || old->irq != new->irq;
  1433. }
  1434. void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
  1435. {
  1436. PCIDevice *dev;
  1437. PCIBus *sec;
  1438. int i;
  1439. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  1440. dev = bus->devices[i];
  1441. if (dev && dev->intx_routing_notifier) {
  1442. dev->intx_routing_notifier(dev);
  1443. }
  1444. }
  1445. QLIST_FOREACH(sec, &bus->child, sibling) {
  1446. pci_bus_fire_intx_routing_notifier(sec);
  1447. }
  1448. }
  1449. void pci_device_set_intx_routing_notifier(PCIDevice *dev,
  1450. PCIINTxRoutingNotifier notifier)
  1451. {
  1452. dev->intx_routing_notifier = notifier;
  1453. }
  1454. /*
  1455. * PCI-to-PCI bridge specification
  1456. * 9.1: Interrupt routing. Table 9-1
  1457. *
  1458. * the PCI Express Base Specification, Revision 2.1
  1459. * 2.2.8.1: INTx interrupt signaling - Rules
  1460. * the Implementation Note
  1461. * Table 2-20
  1462. */
  1463. /*
  1464. * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
  1465. * 0-origin unlike PCI interrupt pin register.
  1466. */
  1467. int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
  1468. {
  1469. return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
  1470. }
  1471. /***********************************************************/
  1472. /* monitor info on PCI */
  1473. static const pci_class_desc pci_class_descriptions[] =
  1474. {
  1475. { 0x0001, "VGA controller", "display"},
  1476. { 0x0100, "SCSI controller", "scsi"},
  1477. { 0x0101, "IDE controller", "ide"},
  1478. { 0x0102, "Floppy controller", "fdc"},
  1479. { 0x0103, "IPI controller", "ipi"},
  1480. { 0x0104, "RAID controller", "raid"},
  1481. { 0x0106, "SATA controller"},
  1482. { 0x0107, "SAS controller"},
  1483. { 0x0180, "Storage controller"},
  1484. { 0x0200, "Ethernet controller", "ethernet"},
  1485. { 0x0201, "Token Ring controller", "token-ring"},
  1486. { 0x0202, "FDDI controller", "fddi"},
  1487. { 0x0203, "ATM controller", "atm"},
  1488. { 0x0280, "Network controller"},
  1489. { 0x0300, "VGA controller", "display", 0x00ff},
  1490. { 0x0301, "XGA controller"},
  1491. { 0x0302, "3D controller"},
  1492. { 0x0380, "Display controller"},
  1493. { 0x0400, "Video controller", "video"},
  1494. { 0x0401, "Audio controller", "sound"},
  1495. { 0x0402, "Phone"},
  1496. { 0x0403, "Audio controller", "sound"},
  1497. { 0x0480, "Multimedia controller"},
  1498. { 0x0500, "RAM controller", "memory"},
  1499. { 0x0501, "Flash controller", "flash"},
  1500. { 0x0580, "Memory controller"},
  1501. { 0x0600, "Host bridge", "host"},
  1502. { 0x0601, "ISA bridge", "isa"},
  1503. { 0x0602, "EISA bridge", "eisa"},
  1504. { 0x0603, "MC bridge", "mca"},
  1505. { 0x0604, "PCI bridge", "pci-bridge"},
  1506. { 0x0605, "PCMCIA bridge", "pcmcia"},
  1507. { 0x0606, "NUBUS bridge", "nubus"},
  1508. { 0x0607, "CARDBUS bridge", "cardbus"},
  1509. { 0x0608, "RACEWAY bridge"},
  1510. { 0x0680, "Bridge"},
  1511. { 0x0700, "Serial port", "serial"},
  1512. { 0x0701, "Parallel port", "parallel"},
  1513. { 0x0800, "Interrupt controller", "interrupt-controller"},
  1514. { 0x0801, "DMA controller", "dma-controller"},
  1515. { 0x0802, "Timer", "timer"},
  1516. { 0x0803, "RTC", "rtc"},
  1517. { 0x0900, "Keyboard", "keyboard"},
  1518. { 0x0901, "Pen", "pen"},
  1519. { 0x0902, "Mouse", "mouse"},
  1520. { 0x0A00, "Dock station", "dock", 0x00ff},
  1521. { 0x0B00, "i386 cpu", "cpu", 0x00ff},
  1522. { 0x0c00, "Firewire controller", "firewire"},
  1523. { 0x0c01, "Access bus controller", "access-bus"},
  1524. { 0x0c02, "SSA controller", "ssa"},
  1525. { 0x0c03, "USB controller", "usb"},
  1526. { 0x0c04, "Fibre channel controller", "fibre-channel"},
  1527. { 0x0c05, "SMBus"},
  1528. { 0, NULL}
  1529. };
  1530. void pci_for_each_device_under_bus_reverse(PCIBus *bus,
  1531. pci_bus_dev_fn fn,
  1532. void *opaque)
  1533. {
  1534. PCIDevice *d;
  1535. int devfn;
  1536. for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
  1537. d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
  1538. if (d) {
  1539. fn(bus, d, opaque);
  1540. }
  1541. }
  1542. }
  1543. void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
  1544. pci_bus_dev_fn fn, void *opaque)
  1545. {
  1546. bus = pci_find_bus_nr(bus, bus_num);
  1547. if (bus) {
  1548. pci_for_each_device_under_bus_reverse(bus, fn, opaque);
  1549. }
  1550. }
  1551. void pci_for_each_device_under_bus(PCIBus *bus,
  1552. pci_bus_dev_fn fn, void *opaque)
  1553. {
  1554. PCIDevice *d;
  1555. int devfn;
  1556. for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
  1557. d = bus->devices[devfn];
  1558. if (d) {
  1559. fn(bus, d, opaque);
  1560. }
  1561. }
  1562. }
  1563. void pci_for_each_device(PCIBus *bus, int bus_num,
  1564. pci_bus_dev_fn fn, void *opaque)
  1565. {
  1566. bus = pci_find_bus_nr(bus, bus_num);
  1567. if (bus) {
  1568. pci_for_each_device_under_bus(bus, fn, opaque);
  1569. }
  1570. }
  1571. const pci_class_desc *get_class_desc(int class)
  1572. {
  1573. const pci_class_desc *desc;
  1574. desc = pci_class_descriptions;
  1575. while (desc->desc && class != desc->class) {
  1576. desc++;
  1577. }
  1578. return desc;
  1579. }
  1580. /* Initialize a PCI NIC. */
  1581. PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
  1582. const char *default_model,
  1583. const char *default_devaddr)
  1584. {
  1585. const char *devaddr = nd->devaddr ? nd->devaddr : default_devaddr;
  1586. GPtrArray *pci_nic_models;
  1587. PCIBus *bus;
  1588. PCIDevice *pci_dev;
  1589. DeviceState *dev;
  1590. int devfn;
  1591. int i;
  1592. int dom, busnr;
  1593. unsigned slot;
  1594. if (nd->model && !strcmp(nd->model, "virtio")) {
  1595. g_free(nd->model);
  1596. nd->model = g_strdup("virtio-net-pci");
  1597. }
  1598. pci_nic_models = qemu_get_nic_models(TYPE_PCI_DEVICE);
  1599. if (qemu_show_nic_models(nd->model, (const char **)pci_nic_models->pdata)) {
  1600. exit(0);
  1601. }
  1602. i = qemu_find_nic_model(nd, (const char **)pci_nic_models->pdata,
  1603. default_model);
  1604. if (i < 0) {
  1605. exit(1);
  1606. }
  1607. if (!rootbus) {
  1608. error_report("No primary PCI bus");
  1609. exit(1);
  1610. }
  1611. assert(!rootbus->parent_dev);
  1612. if (!devaddr) {
  1613. devfn = -1;
  1614. busnr = 0;
  1615. } else {
  1616. if (pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) {
  1617. error_report("Invalid PCI device address %s for device %s",
  1618. devaddr, nd->model);
  1619. exit(1);
  1620. }
  1621. if (dom != 0) {
  1622. error_report("No support for non-zero PCI domains");
  1623. exit(1);
  1624. }
  1625. devfn = PCI_DEVFN(slot, 0);
  1626. }
  1627. bus = pci_find_bus_nr(rootbus, busnr);
  1628. if (!bus) {
  1629. error_report("Invalid PCI device address %s for device %s",
  1630. devaddr, nd->model);
  1631. exit(1);
  1632. }
  1633. pci_dev = pci_new(devfn, nd->model);
  1634. dev = &pci_dev->qdev;
  1635. qdev_set_nic_properties(dev, nd);
  1636. pci_realize_and_unref(pci_dev, bus, &error_fatal);
  1637. g_ptr_array_free(pci_nic_models, true);
  1638. return pci_dev;
  1639. }
  1640. PCIDevice *pci_vga_init(PCIBus *bus)
  1641. {
  1642. vga_interface_created = true;
  1643. switch (vga_interface_type) {
  1644. case VGA_CIRRUS:
  1645. return pci_create_simple(bus, -1, "cirrus-vga");
  1646. case VGA_QXL:
  1647. return pci_create_simple(bus, -1, "qxl-vga");
  1648. case VGA_STD:
  1649. return pci_create_simple(bus, -1, "VGA");
  1650. case VGA_VMWARE:
  1651. return pci_create_simple(bus, -1, "vmware-svga");
  1652. case VGA_VIRTIO:
  1653. return pci_create_simple(bus, -1, "virtio-vga");
  1654. case VGA_NONE:
  1655. default: /* Other non-PCI types. Checking for unsupported types is already
  1656. done in vl.c. */
  1657. return NULL;
  1658. }
  1659. }
  1660. /* Whether a given bus number is in range of the secondary
  1661. * bus of the given bridge device. */
  1662. static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
  1663. {
  1664. return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
  1665. PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
  1666. dev->config[PCI_SECONDARY_BUS] <= bus_num &&
  1667. bus_num <= dev->config[PCI_SUBORDINATE_BUS];
  1668. }
  1669. /* Whether a given bus number is in a range of a root bus */
  1670. static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
  1671. {
  1672. int i;
  1673. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  1674. PCIDevice *dev = bus->devices[i];
  1675. if (dev && IS_PCI_BRIDGE(dev)) {
  1676. if (pci_secondary_bus_in_range(dev, bus_num)) {
  1677. return true;
  1678. }
  1679. }
  1680. }
  1681. return false;
  1682. }
  1683. PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
  1684. {
  1685. PCIBus *sec;
  1686. if (!bus) {
  1687. return NULL;
  1688. }
  1689. if (pci_bus_num(bus) == bus_num) {
  1690. return bus;
  1691. }
  1692. /* Consider all bus numbers in range for the host pci bridge. */
  1693. if (!pci_bus_is_root(bus) &&
  1694. !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
  1695. return NULL;
  1696. }
  1697. /* try child bus */
  1698. for (; bus; bus = sec) {
  1699. QLIST_FOREACH(sec, &bus->child, sibling) {
  1700. if (pci_bus_num(sec) == bus_num) {
  1701. return sec;
  1702. }
  1703. /* PXB buses assumed to be children of bus 0 */
  1704. if (pci_bus_is_root(sec)) {
  1705. if (pci_root_bus_in_range(sec, bus_num)) {
  1706. break;
  1707. }
  1708. } else {
  1709. if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
  1710. break;
  1711. }
  1712. }
  1713. }
  1714. }
  1715. return NULL;
  1716. }
  1717. void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
  1718. pci_bus_fn end, void *parent_state)
  1719. {
  1720. PCIBus *sec;
  1721. void *state;
  1722. if (!bus) {
  1723. return;
  1724. }
  1725. if (begin) {
  1726. state = begin(bus, parent_state);
  1727. } else {
  1728. state = parent_state;
  1729. }
  1730. QLIST_FOREACH(sec, &bus->child, sibling) {
  1731. pci_for_each_bus_depth_first(sec, begin, end, state);
  1732. }
  1733. if (end) {
  1734. end(bus, state);
  1735. }
  1736. }
  1737. PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
  1738. {
  1739. bus = pci_find_bus_nr(bus, bus_num);
  1740. if (!bus)
  1741. return NULL;
  1742. return bus->devices[devfn];
  1743. }
  1744. #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
  1745. static void pci_qdev_realize(DeviceState *qdev, Error **errp)
  1746. {
  1747. PCIDevice *pci_dev = (PCIDevice *)qdev;
  1748. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  1749. ObjectClass *klass = OBJECT_CLASS(pc);
  1750. Error *local_err = NULL;
  1751. bool is_default_rom;
  1752. uint16_t class_id;
  1753. /*
  1754. * capped by systemd (see: udev-builtin-net_id.c)
  1755. * as it's the only known user honor it to avoid users
  1756. * misconfigure QEMU and then wonder why acpi-index doesn't work
  1757. */
  1758. if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
  1759. error_setg(errp, "acpi-index should be less or equal to %u",
  1760. ONBOARD_INDEX_MAX);
  1761. return;
  1762. }
  1763. /*
  1764. * make sure that acpi-index is unique across all present PCI devices
  1765. */
  1766. if (pci_dev->acpi_index) {
  1767. GSequence *used_indexes = pci_acpi_index_list();
  1768. if (g_sequence_lookup(used_indexes,
  1769. GINT_TO_POINTER(pci_dev->acpi_index),
  1770. g_cmp_uint32, NULL)) {
  1771. error_setg(errp, "a PCI device with acpi-index = %" PRIu32
  1772. " already exist", pci_dev->acpi_index);
  1773. return;
  1774. }
  1775. g_sequence_insert_sorted(used_indexes,
  1776. GINT_TO_POINTER(pci_dev->acpi_index),
  1777. g_cmp_uint32, NULL);
  1778. }
  1779. if (pci_dev->romsize != -1 && !is_power_of_2(pci_dev->romsize)) {
  1780. error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
  1781. return;
  1782. }
  1783. /* initialize cap_present for pci_is_express() and pci_config_size(),
  1784. * Note that hybrid PCIs are not set automatically and need to manage
  1785. * QEMU_PCI_CAP_EXPRESS manually */
  1786. if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
  1787. !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
  1788. pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
  1789. }
  1790. if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) {
  1791. pci_dev->cap_present |= QEMU_PCIE_CAP_CXL;
  1792. }
  1793. pci_dev = do_pci_register_device(pci_dev,
  1794. object_get_typename(OBJECT(qdev)),
  1795. pci_dev->devfn, errp);
  1796. if (pci_dev == NULL)
  1797. return;
  1798. if (pc->realize) {
  1799. pc->realize(pci_dev, &local_err);
  1800. if (local_err) {
  1801. error_propagate(errp, local_err);
  1802. do_pci_unregister_device(pci_dev);
  1803. return;
  1804. }
  1805. }
  1806. if (pci_dev->failover_pair_id) {
  1807. if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
  1808. error_setg(errp, "failover primary device must be on "
  1809. "PCIExpress bus");
  1810. pci_qdev_unrealize(DEVICE(pci_dev));
  1811. return;
  1812. }
  1813. class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE);
  1814. if (class_id != PCI_CLASS_NETWORK_ETHERNET) {
  1815. error_setg(errp, "failover primary device is not an "
  1816. "Ethernet device");
  1817. pci_qdev_unrealize(DEVICE(pci_dev));
  1818. return;
  1819. }
  1820. if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)
  1821. || (PCI_FUNC(pci_dev->devfn) != 0)) {
  1822. error_setg(errp, "failover: primary device must be in its own "
  1823. "PCI slot");
  1824. pci_qdev_unrealize(DEVICE(pci_dev));
  1825. return;
  1826. }
  1827. qdev->allow_unplug_during_migration = true;
  1828. }
  1829. /* rom loading */
  1830. is_default_rom = false;
  1831. if (pci_dev->romfile == NULL && pc->romfile != NULL) {
  1832. pci_dev->romfile = g_strdup(pc->romfile);
  1833. is_default_rom = true;
  1834. }
  1835. pci_add_option_rom(pci_dev, is_default_rom, &local_err);
  1836. if (local_err) {
  1837. error_propagate(errp, local_err);
  1838. pci_qdev_unrealize(DEVICE(pci_dev));
  1839. return;
  1840. }
  1841. pci_set_power(pci_dev, true);
  1842. pci_dev->msi_trigger = pci_msi_trigger;
  1843. }
  1844. PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
  1845. const char *name)
  1846. {
  1847. DeviceState *dev;
  1848. dev = qdev_new(name);
  1849. qdev_prop_set_int32(dev, "addr", devfn);
  1850. qdev_prop_set_bit(dev, "multifunction", multifunction);
  1851. return PCI_DEVICE(dev);
  1852. }
  1853. PCIDevice *pci_new(int devfn, const char *name)
  1854. {
  1855. return pci_new_multifunction(devfn, false, name);
  1856. }
  1857. bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
  1858. {
  1859. return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
  1860. }
  1861. PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
  1862. bool multifunction,
  1863. const char *name)
  1864. {
  1865. PCIDevice *dev = pci_new_multifunction(devfn, multifunction, name);
  1866. pci_realize_and_unref(dev, bus, &error_fatal);
  1867. return dev;
  1868. }
  1869. PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
  1870. {
  1871. return pci_create_simple_multifunction(bus, devfn, false, name);
  1872. }
  1873. static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
  1874. {
  1875. int offset = PCI_CONFIG_HEADER_SIZE;
  1876. int i;
  1877. for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
  1878. if (pdev->used[i])
  1879. offset = i + 1;
  1880. else if (i - offset + 1 == size)
  1881. return offset;
  1882. }
  1883. return 0;
  1884. }
  1885. static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
  1886. uint8_t *prev_p)
  1887. {
  1888. uint8_t next, prev;
  1889. if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
  1890. return 0;
  1891. for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
  1892. prev = next + PCI_CAP_LIST_NEXT)
  1893. if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
  1894. break;
  1895. if (prev_p)
  1896. *prev_p = prev;
  1897. return next;
  1898. }
  1899. static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
  1900. {
  1901. uint8_t next, prev, found = 0;
  1902. if (!(pdev->used[offset])) {
  1903. return 0;
  1904. }
  1905. assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
  1906. for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
  1907. prev = next + PCI_CAP_LIST_NEXT) {
  1908. if (next <= offset && next > found) {
  1909. found = next;
  1910. }
  1911. }
  1912. return found;
  1913. }
  1914. /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
  1915. This is needed for an option rom which is used for more than one device. */
  1916. static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
  1917. {
  1918. uint16_t vendor_id;
  1919. uint16_t device_id;
  1920. uint16_t rom_vendor_id;
  1921. uint16_t rom_device_id;
  1922. uint16_t rom_magic;
  1923. uint16_t pcir_offset;
  1924. uint8_t checksum;
  1925. /* Words in rom data are little endian (like in PCI configuration),
  1926. so they can be read / written with pci_get_word / pci_set_word. */
  1927. /* Only a valid rom will be patched. */
  1928. rom_magic = pci_get_word(ptr);
  1929. if (rom_magic != 0xaa55) {
  1930. PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic);
  1931. return;
  1932. }
  1933. pcir_offset = pci_get_word(ptr + 0x18);
  1934. if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
  1935. PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset);
  1936. return;
  1937. }
  1938. vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
  1939. device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
  1940. rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
  1941. rom_device_id = pci_get_word(ptr + pcir_offset + 6);
  1942. PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile,
  1943. vendor_id, device_id, rom_vendor_id, rom_device_id);
  1944. checksum = ptr[6];
  1945. if (vendor_id != rom_vendor_id) {
  1946. /* Patch vendor id and checksum (at offset 6 for etherboot roms). */
  1947. checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
  1948. checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
  1949. PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
  1950. ptr[6] = checksum;
  1951. pci_set_word(ptr + pcir_offset + 4, vendor_id);
  1952. }
  1953. if (device_id != rom_device_id) {
  1954. /* Patch device id and checksum (at offset 6 for etherboot roms). */
  1955. checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
  1956. checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
  1957. PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
  1958. ptr[6] = checksum;
  1959. pci_set_word(ptr + pcir_offset + 6, device_id);
  1960. }
  1961. }
  1962. /* Add an option rom for the device */
  1963. static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
  1964. Error **errp)
  1965. {
  1966. int64_t size;
  1967. char *path;
  1968. void *ptr;
  1969. char name[32];
  1970. const VMStateDescription *vmsd;
  1971. if (!pdev->romfile)
  1972. return;
  1973. if (strlen(pdev->romfile) == 0)
  1974. return;
  1975. if (!pdev->rom_bar) {
  1976. /*
  1977. * Load rom via fw_cfg instead of creating a rom bar,
  1978. * for 0.11 compatibility.
  1979. */
  1980. int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
  1981. /*
  1982. * Hot-plugged devices can't use the option ROM
  1983. * if the rom bar is disabled.
  1984. */
  1985. if (DEVICE(pdev)->hotplugged) {
  1986. error_setg(errp, "Hot-plugged device without ROM bar"
  1987. " can't have an option ROM");
  1988. return;
  1989. }
  1990. if (class == 0x0300) {
  1991. rom_add_vga(pdev->romfile);
  1992. } else {
  1993. rom_add_option(pdev->romfile, -1);
  1994. }
  1995. return;
  1996. }
  1997. path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
  1998. if (path == NULL) {
  1999. path = g_strdup(pdev->romfile);
  2000. }
  2001. size = get_image_size(path);
  2002. if (size < 0) {
  2003. error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
  2004. g_free(path);
  2005. return;
  2006. } else if (size == 0) {
  2007. error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
  2008. g_free(path);
  2009. return;
  2010. } else if (size > 2 * GiB) {
  2011. error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)",
  2012. pdev->romfile);
  2013. g_free(path);
  2014. return;
  2015. }
  2016. if (pdev->romsize != -1) {
  2017. if (size > pdev->romsize) {
  2018. error_setg(errp, "romfile \"%s\" (%u bytes) is too large for ROM size %u",
  2019. pdev->romfile, (uint32_t)size, pdev->romsize);
  2020. g_free(path);
  2021. return;
  2022. }
  2023. } else {
  2024. pdev->romsize = pow2ceil(size);
  2025. }
  2026. vmsd = qdev_get_vmsd(DEVICE(pdev));
  2027. if (vmsd) {
  2028. snprintf(name, sizeof(name), "%s.rom", vmsd->name);
  2029. } else {
  2030. snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev)));
  2031. }
  2032. pdev->has_rom = true;
  2033. memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, &error_fatal);
  2034. ptr = memory_region_get_ram_ptr(&pdev->rom);
  2035. if (load_image_size(path, ptr, size) < 0) {
  2036. error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
  2037. g_free(path);
  2038. return;
  2039. }
  2040. g_free(path);
  2041. if (is_default_rom) {
  2042. /* Only the default rom images will be patched (if needed). */
  2043. pci_patch_ids(pdev, ptr, size);
  2044. }
  2045. pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
  2046. }
  2047. static void pci_del_option_rom(PCIDevice *pdev)
  2048. {
  2049. if (!pdev->has_rom)
  2050. return;
  2051. vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
  2052. pdev->has_rom = false;
  2053. }
  2054. /*
  2055. * On success, pci_add_capability() returns a positive value
  2056. * that the offset of the pci capability.
  2057. * On failure, it sets an error and returns a negative error
  2058. * code.
  2059. */
  2060. int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
  2061. uint8_t offset, uint8_t size,
  2062. Error **errp)
  2063. {
  2064. uint8_t *config;
  2065. int i, overlapping_cap;
  2066. if (!offset) {
  2067. offset = pci_find_space(pdev, size);
  2068. /* out of PCI config space is programming error */
  2069. assert(offset);
  2070. } else {
  2071. /* Verify that capabilities don't overlap. Note: device assignment
  2072. * depends on this check to verify that the device is not broken.
  2073. * Should never trigger for emulated devices, but it's helpful
  2074. * for debugging these. */
  2075. for (i = offset; i < offset + size; i++) {
  2076. overlapping_cap = pci_find_capability_at_offset(pdev, i);
  2077. if (overlapping_cap) {
  2078. error_setg(errp, "%s:%02x:%02x.%x "
  2079. "Attempt to add PCI capability %x at offset "
  2080. "%x overlaps existing capability %x at offset %x",
  2081. pci_root_bus_path(pdev), pci_dev_bus_num(pdev),
  2082. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
  2083. cap_id, offset, overlapping_cap, i);
  2084. return -EINVAL;
  2085. }
  2086. }
  2087. }
  2088. config = pdev->config + offset;
  2089. config[PCI_CAP_LIST_ID] = cap_id;
  2090. config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
  2091. pdev->config[PCI_CAPABILITY_LIST] = offset;
  2092. pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
  2093. memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
  2094. /* Make capability read-only by default */
  2095. memset(pdev->wmask + offset, 0, size);
  2096. /* Check capability by default */
  2097. memset(pdev->cmask + offset, 0xFF, size);
  2098. return offset;
  2099. }
  2100. /* Unlink capability from the pci config space. */
  2101. void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
  2102. {
  2103. uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
  2104. if (!offset)
  2105. return;
  2106. pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
  2107. /* Make capability writable again */
  2108. memset(pdev->wmask + offset, 0xff, size);
  2109. memset(pdev->w1cmask + offset, 0, size);
  2110. /* Clear cmask as device-specific registers can't be checked */
  2111. memset(pdev->cmask + offset, 0, size);
  2112. memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
  2113. if (!pdev->config[PCI_CAPABILITY_LIST])
  2114. pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
  2115. }
  2116. uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
  2117. {
  2118. return pci_find_capability_list(pdev, cap_id, NULL);
  2119. }
  2120. static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
  2121. {
  2122. PCIDevice *d = (PCIDevice *)dev;
  2123. const char *name = NULL;
  2124. const pci_class_desc *desc = pci_class_descriptions;
  2125. int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
  2126. while (desc->desc &&
  2127. (class & ~desc->fw_ign_bits) !=
  2128. (desc->class & ~desc->fw_ign_bits)) {
  2129. desc++;
  2130. }
  2131. if (desc->desc) {
  2132. name = desc->fw_name;
  2133. }
  2134. if (name) {
  2135. pstrcpy(buf, len, name);
  2136. } else {
  2137. snprintf(buf, len, "pci%04x,%04x",
  2138. pci_get_word(d->config + PCI_VENDOR_ID),
  2139. pci_get_word(d->config + PCI_DEVICE_ID));
  2140. }
  2141. return buf;
  2142. }
  2143. static char *pcibus_get_fw_dev_path(DeviceState *dev)
  2144. {
  2145. PCIDevice *d = (PCIDevice *)dev;
  2146. char name[33];
  2147. int has_func = !!PCI_FUNC(d->devfn);
  2148. return g_strdup_printf("%s@%x%s%.*x",
  2149. pci_dev_fw_name(dev, name, sizeof(name)),
  2150. PCI_SLOT(d->devfn),
  2151. has_func ? "," : "",
  2152. has_func,
  2153. PCI_FUNC(d->devfn));
  2154. }
  2155. static char *pcibus_get_dev_path(DeviceState *dev)
  2156. {
  2157. PCIDevice *d = container_of(dev, PCIDevice, qdev);
  2158. PCIDevice *t;
  2159. int slot_depth;
  2160. /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
  2161. * 00 is added here to make this format compatible with
  2162. * domain:Bus:Slot.Func for systems without nested PCI bridges.
  2163. * Slot.Function list specifies the slot and function numbers for all
  2164. * devices on the path from root to the specific device. */
  2165. const char *root_bus_path;
  2166. int root_bus_len;
  2167. char slot[] = ":SS.F";
  2168. int slot_len = sizeof slot - 1 /* For '\0' */;
  2169. int path_len;
  2170. char *path, *p;
  2171. int s;
  2172. root_bus_path = pci_root_bus_path(d);
  2173. root_bus_len = strlen(root_bus_path);
  2174. /* Calculate # of slots on path between device and root. */;
  2175. slot_depth = 0;
  2176. for (t = d; t; t = pci_get_bus(t)->parent_dev) {
  2177. ++slot_depth;
  2178. }
  2179. path_len = root_bus_len + slot_len * slot_depth;
  2180. /* Allocate memory, fill in the terminating null byte. */
  2181. path = g_malloc(path_len + 1 /* For '\0' */);
  2182. path[path_len] = '\0';
  2183. memcpy(path, root_bus_path, root_bus_len);
  2184. /* Fill in slot numbers. We walk up from device to root, so need to print
  2185. * them in the reverse order, last to first. */
  2186. p = path + path_len;
  2187. for (t = d; t; t = pci_get_bus(t)->parent_dev) {
  2188. p -= slot_len;
  2189. s = snprintf(slot, sizeof slot, ":%02x.%x",
  2190. PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
  2191. assert(s == slot_len);
  2192. memcpy(p, slot, slot_len);
  2193. }
  2194. return path;
  2195. }
  2196. static int pci_qdev_find_recursive(PCIBus *bus,
  2197. const char *id, PCIDevice **pdev)
  2198. {
  2199. DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
  2200. if (!qdev) {
  2201. return -ENODEV;
  2202. }
  2203. /* roughly check if given qdev is pci device */
  2204. if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
  2205. *pdev = PCI_DEVICE(qdev);
  2206. return 0;
  2207. }
  2208. return -EINVAL;
  2209. }
  2210. int pci_qdev_find_device(const char *id, PCIDevice **pdev)
  2211. {
  2212. PCIHostState *host_bridge;
  2213. int rc = -ENODEV;
  2214. QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
  2215. int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
  2216. if (!tmp) {
  2217. rc = 0;
  2218. break;
  2219. }
  2220. if (tmp != -ENODEV) {
  2221. rc = tmp;
  2222. }
  2223. }
  2224. return rc;
  2225. }
  2226. MemoryRegion *pci_address_space(PCIDevice *dev)
  2227. {
  2228. return pci_get_bus(dev)->address_space_mem;
  2229. }
  2230. MemoryRegion *pci_address_space_io(PCIDevice *dev)
  2231. {
  2232. return pci_get_bus(dev)->address_space_io;
  2233. }
  2234. static void pci_device_class_init(ObjectClass *klass, void *data)
  2235. {
  2236. DeviceClass *k = DEVICE_CLASS(klass);
  2237. k->realize = pci_qdev_realize;
  2238. k->unrealize = pci_qdev_unrealize;
  2239. k->bus_type = TYPE_PCI_BUS;
  2240. device_class_set_props(k, pci_props);
  2241. }
  2242. static void pci_device_class_base_init(ObjectClass *klass, void *data)
  2243. {
  2244. if (!object_class_is_abstract(klass)) {
  2245. ObjectClass *conventional =
  2246. object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE);
  2247. ObjectClass *pcie =
  2248. object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE);
  2249. ObjectClass *cxl =
  2250. object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE);
  2251. assert(conventional || pcie || cxl);
  2252. }
  2253. }
  2254. AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
  2255. {
  2256. PCIBus *bus = pci_get_bus(dev);
  2257. PCIBus *iommu_bus = bus;
  2258. uint8_t devfn = dev->devfn;
  2259. while (iommu_bus && !iommu_bus->iommu_fn && iommu_bus->parent_dev) {
  2260. PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev);
  2261. /*
  2262. * The requester ID of the provided device may be aliased, as seen from
  2263. * the IOMMU, due to topology limitations. The IOMMU relies on a
  2264. * requester ID to provide a unique AddressSpace for devices, but
  2265. * conventional PCI buses pre-date such concepts. Instead, the PCIe-
  2266. * to-PCI bridge creates and accepts transactions on behalf of down-
  2267. * stream devices. When doing so, all downstream devices are masked
  2268. * (aliased) behind a single requester ID. The requester ID used
  2269. * depends on the format of the bridge devices. Proper PCIe-to-PCI
  2270. * bridges, with a PCIe capability indicating such, follow the
  2271. * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification,
  2272. * where the bridge uses the seconary bus as the bridge portion of the
  2273. * requester ID and devfn of 00.0. For other bridges, typically those
  2274. * found on the root complex such as the dmi-to-pci-bridge, we follow
  2275. * the convention of typical bare-metal hardware, which uses the
  2276. * requester ID of the bridge itself. There are device specific
  2277. * exceptions to these rules, but these are the defaults that the
  2278. * Linux kernel uses when determining DMA aliases itself and believed
  2279. * to be true for the bare metal equivalents of the devices emulated
  2280. * in QEMU.
  2281. */
  2282. if (!pci_bus_is_express(iommu_bus)) {
  2283. PCIDevice *parent = iommu_bus->parent_dev;
  2284. if (pci_is_express(parent) &&
  2285. pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
  2286. devfn = PCI_DEVFN(0, 0);
  2287. bus = iommu_bus;
  2288. } else {
  2289. devfn = parent->devfn;
  2290. bus = parent_bus;
  2291. }
  2292. }
  2293. iommu_bus = parent_bus;
  2294. }
  2295. if (!pci_bus_bypass_iommu(bus) && iommu_bus && iommu_bus->iommu_fn) {
  2296. return iommu_bus->iommu_fn(bus, iommu_bus->iommu_opaque, devfn);
  2297. }
  2298. return &address_space_memory;
  2299. }
  2300. void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque)
  2301. {
  2302. bus->iommu_fn = fn;
  2303. bus->iommu_opaque = opaque;
  2304. }
  2305. static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
  2306. {
  2307. Range *range = opaque;
  2308. uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
  2309. int i;
  2310. if (!(cmd & PCI_COMMAND_MEMORY)) {
  2311. return;
  2312. }
  2313. if (IS_PCI_BRIDGE(dev)) {
  2314. pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  2315. pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  2316. base = MAX(base, 0x1ULL << 32);
  2317. if (limit >= base) {
  2318. Range pref_range;
  2319. range_set_bounds(&pref_range, base, limit);
  2320. range_extend(range, &pref_range);
  2321. }
  2322. }
  2323. for (i = 0; i < PCI_NUM_REGIONS; ++i) {
  2324. PCIIORegion *r = &dev->io_regions[i];
  2325. pcibus_t lob, upb;
  2326. Range region_range;
  2327. if (!r->size ||
  2328. (r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
  2329. !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
  2330. continue;
  2331. }
  2332. lob = pci_bar_address(dev, i, r->type, r->size);
  2333. upb = lob + r->size - 1;
  2334. if (lob == PCI_BAR_UNMAPPED) {
  2335. continue;
  2336. }
  2337. lob = MAX(lob, 0x1ULL << 32);
  2338. if (upb >= lob) {
  2339. range_set_bounds(&region_range, lob, upb);
  2340. range_extend(range, &region_range);
  2341. }
  2342. }
  2343. }
  2344. void pci_bus_get_w64_range(PCIBus *bus, Range *range)
  2345. {
  2346. range_make_empty(range);
  2347. pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
  2348. }
  2349. static bool pcie_has_upstream_port(PCIDevice *dev)
  2350. {
  2351. PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev));
  2352. /* Device associated with an upstream port.
  2353. * As there are several types of these, it's easier to check the
  2354. * parent device: upstream ports are always connected to
  2355. * root or downstream ports.
  2356. */
  2357. return parent_dev &&
  2358. pci_is_express(parent_dev) &&
  2359. parent_dev->exp.exp_cap &&
  2360. (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  2361. pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM);
  2362. }
  2363. PCIDevice *pci_get_function_0(PCIDevice *pci_dev)
  2364. {
  2365. PCIBus *bus = pci_get_bus(pci_dev);
  2366. if(pcie_has_upstream_port(pci_dev)) {
  2367. /* With an upstream PCIe port, we only support 1 device at slot 0 */
  2368. return bus->devices[0];
  2369. } else {
  2370. /* Other bus types might support multiple devices at slots 0-31 */
  2371. return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)];
  2372. }
  2373. }
  2374. MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
  2375. {
  2376. MSIMessage msg;
  2377. if (msix_enabled(dev)) {
  2378. msg = msix_get_message(dev, vector);
  2379. } else if (msi_enabled(dev)) {
  2380. msg = msi_get_message(dev, vector);
  2381. } else {
  2382. /* Should never happen */
  2383. error_report("%s: unknown interrupt type", __func__);
  2384. abort();
  2385. }
  2386. return msg;
  2387. }
  2388. void pci_set_power(PCIDevice *d, bool state)
  2389. {
  2390. if (d->has_power == state) {
  2391. return;
  2392. }
  2393. d->has_power = state;
  2394. pci_update_mappings(d);
  2395. memory_region_set_enabled(&d->bus_master_enable_region,
  2396. (pci_get_word(d->config + PCI_COMMAND)
  2397. & PCI_COMMAND_MASTER) && d->has_power);
  2398. if (!d->has_power) {
  2399. pci_device_reset(d);
  2400. }
  2401. }
  2402. static const TypeInfo pci_device_type_info = {
  2403. .name = TYPE_PCI_DEVICE,
  2404. .parent = TYPE_DEVICE,
  2405. .instance_size = sizeof(PCIDevice),
  2406. .abstract = true,
  2407. .class_size = sizeof(PCIDeviceClass),
  2408. .class_init = pci_device_class_init,
  2409. .class_base_init = pci_device_class_base_init,
  2410. };
  2411. static void pci_register_types(void)
  2412. {
  2413. type_register_static(&pci_bus_info);
  2414. type_register_static(&pcie_bus_info);
  2415. type_register_static(&cxl_bus_info);
  2416. type_register_static(&conventional_pci_interface_info);
  2417. type_register_static(&cxl_interface_info);
  2418. type_register_static(&pcie_interface_info);
  2419. type_register_static(&pci_device_type_info);
  2420. }
  2421. type_init(pci_register_types)